Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 4347 → Rev 4348

/branches/dynload/kernel/generic/src/ipc/event.c
0,0 → 1,155
/*
* Copyright (c) 2009 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup generic
* @{
*/
/**
* @file
* @brief Kernel event notifications.
*/
 
#include <ipc/event.h>
#include <ipc/event_types.h>
#include <mm/slab.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <console/console.h>
#include <memstr.h>
#include <errno.h>
#include <arch.h>
 
/**
* The events array.
* Arranging the events in this two-dimensional array should decrease the
* likelyhood of cacheline ping-pong.
*/
static event_t events[EVENT_END];
 
/** Initialize kernel events. */
void event_init(void)
{
unsigned int i;
for (i = 0; i < EVENT_END; i++) {
spinlock_initialize(&events[i].lock, "event.lock");
events[i].answerbox = NULL;
events[i].counter = 0;
events[i].method = 0;
}
}
 
static int
event_subscribe(event_type_t evno, unative_t method, answerbox_t *answerbox)
{
if (evno >= EVENT_END)
return ELIMIT;
spinlock_lock(&events[evno].lock);
int res;
if (events[evno].answerbox == NULL) {
events[evno].answerbox = answerbox;
events[evno].method = method;
events[evno].counter = 0;
res = EOK;
} else
res = EEXISTS;
spinlock_unlock(&events[evno].lock);
return res;
}
 
unative_t sys_event_subscribe(unative_t evno, unative_t method)
{
return (unative_t) event_subscribe((event_type_t) evno, (unative_t)
method, &TASK->answerbox);
}
 
bool event_is_subscribed(event_type_t evno)
{
bool res;
ASSERT(evno < EVENT_END);
spinlock_lock(&events[evno].lock);
res = events[evno].answerbox != NULL;
spinlock_unlock(&events[evno].lock);
return res;
}
 
 
void event_cleanup_answerbox(answerbox_t *answerbox)
{
unsigned int i;
for (i = 0; i < EVENT_END; i++) {
spinlock_lock(&events[i].lock);
if (events[i].answerbox == answerbox) {
events[i].answerbox = NULL;
events[i].counter = 0;
events[i].method = 0;
}
spinlock_unlock(&events[i].lock);
}
}
 
void
event_notify(event_type_t evno, unative_t a1, unative_t a2, unative_t a3,
unative_t a4, unative_t a5)
{
ASSERT(evno < EVENT_END);
spinlock_lock(&events[evno].lock);
if (events[evno].answerbox != NULL) {
call_t *call = ipc_call_alloc(FRAME_ATOMIC);
if (call) {
call->flags |= IPC_CALL_NOTIF;
call->priv = ++events[evno].counter;
IPC_SET_METHOD(call->data, events[evno].method);
IPC_SET_ARG1(call->data, a1);
IPC_SET_ARG2(call->data, a2);
IPC_SET_ARG3(call->data, a3);
IPC_SET_ARG4(call->data, a4);
IPC_SET_ARG5(call->data, a5);
spinlock_lock(&events[evno].answerbox->irq_lock);
list_append(&call->link, &events[evno].answerbox->irq_notifs);
spinlock_unlock(&events[evno].answerbox->irq_lock);
waitq_wakeup(&events[evno].answerbox->wq, WAKEUP_FIRST);
}
}
spinlock_unlock(&events[evno].lock);
}
 
/** @}
*/
Property changes:
Added: svn:mergeinfo
/branches/dynload/kernel/generic/src/ipc/sysipc.c
332,7 → 332,7
src = IPC_GET_ARG1(call->data);
size = IPC_GET_ARG2(call->data);
if ((size <= 0) || (size > DATA_XFER_LIMIT))
if (size > DATA_XFER_LIMIT)
return ELIMIT;
call->buffer = (uint8_t *) malloc(size, 0);
/branches/dynload/kernel/generic/src/ipc/ipc.c
44,7 → 44,7
#include <synch/synch.h>
#include <ipc/ipc.h>
#include <ipc/kbox.h>
#include <event/event.h>
#include <ipc/event.h>
#include <errno.h>
#include <mm/slab.h>
#include <arch.h>
51,8 → 51,6
#include <proc/task.h>
#include <memstr.h>
#include <debug.h>
 
 
#include <print.h>
#include <console/console.h>
#include <proc/thread.h>
/branches/dynload/kernel/generic/src/ipc/irq.c
128,13 → 128,14
 
/** Register an answerbox as a receiving end for IRQ notifications.
*
* @param box Receiving answerbox.
* @param inr IRQ number.
* @param devno Device number.
* @param method Method to be associated with the notification.
* @param ucode Uspace pointer to top-half pseudocode.
* @param box Receiving answerbox.
* @param inr IRQ number.
* @param devno Device number.
* @param method Method to be associated with the notification.
* @param ucode Uspace pointer to top-half pseudocode.
*
* @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success.
* @return EBADMEM, ENOENT or EEXISTS on failure or 0 on success.
*
*/
int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
unative_t method, irq_code_t *ucode)
142,11 → 143,12
ipl_t ipl;
irq_code_t *code;
irq_t *irq;
link_t *hlp;
unative_t key[] = {
(unative_t) inr,
(unative_t) devno
};
 
if (ucode) {
code = code_from_uspace(ucode);
if (!code)
154,7 → 156,7
} else {
code = NULL;
}
 
/*
* Allocate and populate the IRQ structure.
*/
169,7 → 171,7
irq->notif_cfg.method = method;
irq->notif_cfg.code = code;
irq->notif_cfg.counter = 0;
 
/*
* Enlist the IRQ structure in the uspace IRQ hash table and the
* answerbox's list.
176,23 → 178,28
*/
ipl = interrupts_disable();
spinlock_lock(&irq_uspace_hash_table_lock);
spinlock_lock(&irq->lock);
spinlock_lock(&box->irq_lock);
if (hash_table_find(&irq_uspace_hash_table, key)) {
hlp = hash_table_find(&irq_uspace_hash_table, key);
if (hlp) {
irq_t *hirq __attribute__((unused))
= hash_table_get_instance(hlp, irq_t, link);
/* hirq is locked */
spinlock_unlock(&hirq->lock);
code_free(code);
spinlock_unlock(&box->irq_lock);
spinlock_unlock(&irq->lock);
spinlock_unlock(&irq_uspace_hash_table_lock);
free(irq);
interrupts_restore(ipl);
return EEXISTS;
}
spinlock_lock(&irq->lock); /* Not really necessary, but paranoid */
spinlock_lock(&box->irq_lock);
hash_table_insert(&irq_uspace_hash_table, key, &irq->link);
list_append(&irq->notif_cfg.link, &box->irq_head);
spinlock_unlock(&box->irq_lock);
spinlock_unlock(&irq->lock);
spinlock_unlock(&irq_uspace_hash_table_lock);
 
interrupts_restore(ipl);
return EOK;
}
222,7 → 229,7
return ENOENT;
}
irq = hash_table_get_instance(lnk, irq_t, link);
spinlock_lock(&irq->lock);
/* irq is locked */
spinlock_lock(&box->irq_lock);
ASSERT(irq->notif_cfg.answerbox == box);
233,11 → 240,19
/* Remove the IRQ from the answerbox's list. */
list_remove(&irq->notif_cfg.link);
 
/*
* We need to drop the IRQ lock now because hash_table_remove() will try
* to reacquire it. That basically violates the natural locking order,
* but a deadlock in hash_table_remove() is prevented by the fact that
* we already held the IRQ lock and didn't drop the hash table lock in
* the meantime.
*/
spinlock_unlock(&irq->lock);
 
/* Remove the IRQ from the uspace IRQ hash table. */
hash_table_remove(&irq_uspace_hash_table, key, 2);
spinlock_unlock(&irq_uspace_hash_table_lock);
spinlock_unlock(&irq->lock);
spinlock_unlock(&box->irq_lock);
/* Free up the IRQ structure. */
291,13 → 306,21
/* Unlist from the answerbox. */
list_remove(&irq->notif_cfg.link);
/* Remove from the hash table. */
hash_table_remove(&irq_uspace_hash_table, key, 2);
/* Free up the pseudo code and associated structures. */
code_free(irq->notif_cfg.code);
/*
* We need to drop the IRQ lock now because hash_table_remove()
* will try to reacquire it. That basically violates the natural
* locking order, but a deadlock in hash_table_remove() is
* prevented by the fact that we already held the IRQ lock and
* didn't drop the hash table lock in the meantime.
*/
spinlock_unlock(&irq->lock);
/* Remove from the hash table. */
hash_table_remove(&irq_uspace_hash_table, key, 2);
free(irq);
}