Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2292 → Rev 2291

/branches/rcu/kernel/generic/include/proc/thread.h
53,7 → 53,10
 
/* Thread flags */
 
/** Thread cannot be migrated to another CPU. */
/** Thread cannot be migrated to another CPU.
* When using this flag, the caller must set cpu in the thread_t
* structure manually before calling thread_ready (even on uniprocessor)
*/
#define THREAD_FLAG_WIRED (1 << 0)
/** Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_STOLEN (1 << 1)
/branches/rcu/kernel/generic/include/ddi/irq.h
1,154 → 1,162
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/** @file
*/
 
#ifndef KERN_IRQ_H_
#define KERN_IRQ_H_
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <arch/types.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <proc/task.h>
 
typedef enum {
IRQ_DECLINE, /**< Decline to service. */
IRQ_ACCEPT /**< Accept to service. */
} irq_ownership_t;
 
typedef enum {
IRQ_TRIGGER_LEVEL = 1,
IRQ_TRIGGER_EDGE
} irq_trigger_t;
 
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
typedef struct {
/** When false, notifications are not sent. */
bool notify;
/** Answerbox for notifications. */
answerbox_t *answerbox;
/** Method to be used for the notification. */
unative_t method;
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
*/
link_t link;
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will be multiple irq_t
* instantions with the same devno.
*/
typedef struct irq {
/** Hash table link. */
link_t link;
 
/** Lock protecting everything in this structure
* except the link member. When both the IRQ
* hash table lock and this lock are to be acquired,
* this lock must not be taken first.
*/
SPINLOCK_DECLARE(lock);
 
/** Unique device number. -1 if not yet assigned. */
devno_t devno;
 
/** Actual IRQ number. -1 if not yet assigned. */
inr_t inr;
/** Trigger level of the IRQ.*/
irq_trigger_t trigger;
/** Claim ownership of the IRQ. */
irq_ownership_t (* claim)(void);
/** Handler for this IRQ and device. */
irq_handler_t handler;
/** Argument for the handler. */
void *arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
 
extern void irq_init(count_t inrs, count_t chains);
extern void irq_initialize(irq_t *irq);
extern void irq_register(irq_t *irq);
extern irq_t *irq_dispatch_and_lock(inr_t inr);
extern irq_t *irq_find_and_lock(inr_t inr, devno_t devno);
 
#endif
 
#endif
 
/** @}
*/
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/** @file
*/
 
#ifndef KERN_IRQ_H_
#define KERN_IRQ_H_
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <arch/types.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <proc/task.h>
 
typedef enum {
IRQ_DECLINE, /**< Decline to service. */
IRQ_ACCEPT /**< Accept to service. */
} irq_ownership_t;
 
typedef enum {
IRQ_TRIGGER_LEVEL = 1,
IRQ_TRIGGER_EDGE
} irq_trigger_t;
 
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
typedef struct {
/** When false, notifications are not sent. */
bool notify;
/** Answerbox for notifications. */
answerbox_t *answerbox;
/** Method to be used for the notification. */
unative_t method;
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
*/
link_t link;
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will be multiple irq_t
* instantions with the same devno.
*/
typedef struct irq {
/** Hash table link. */
link_t link;
 
/** Lock protecting everything in this structure
* except the link member. When both the IRQ
* hash table lock and this lock are to be acquired,
* this lock must not be taken first.
*/
SPINLOCK_DECLARE(lock);
/** Send EOI before processing the interrupt.
* This is essential for timer interrupt which
* has to be acknowledged before doing preemption
* to make sure another timer interrupt will
* be eventually generated.
*/
bool preack;
 
/** Unique device number. -1 if not yet assigned. */
devno_t devno;
 
/** Actual IRQ number. -1 if not yet assigned. */
inr_t inr;
/** Trigger level of the IRQ. */
irq_trigger_t trigger;
/** Claim ownership of the IRQ. */
irq_ownership_t (* claim)(void);
/** Handler for this IRQ and device. */
irq_handler_t handler;
/** Argument for the handler. */
void *arg;
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
} irq_t;
 
extern void irq_init(count_t inrs, count_t chains);
extern void irq_initialize(irq_t *irq);
extern void irq_register(irq_t *irq);
extern irq_t *irq_dispatch_and_lock(inr_t inr);
extern irq_t *irq_find_and_lock(inr_t inr, devno_t devno);
 
#endif
 
#endif
 
/** @}
*/
/branches/rcu/kernel/generic/include/mm/as.h
89,17 → 89,6
@public
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
mutex_t lock;
106,9 → 95,18
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
135,17 → 133,6
typedef struct as {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
 
mutex_t lock;
 
152,9 → 139,18
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
 
209,6 → 205,7
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
typedef struct {
mutex_t lock;
253,6 → 250,7
extern as_operations_t *as_operations;
#endif
 
SPINLOCK_EXTERN(inactive_as_with_asid_lock);
extern link_t inactive_as_with_asid_head;
 
extern void as_init(void);
/branches/rcu/kernel/generic/src/main/main.c
238,7 → 238,7
printf("config.memory_size=%zdM\n", config.memory_size >> 20);
printf("config.cpu_count=%zd\n", config.cpu_count);
cpu_init();
 
calibrate_delay_loop();
clock_counter_init();
timeout_init();
246,6 → 246,8
task_init();
thread_init();
futex_init();
 
 
klog_init();
if (init.cnt > 0) {
255,7 → 257,6
init.tasks[i].size);
} else
printf("No init binaries found\n");
ipc_init();
 
/*
272,9 → 273,10
if (!t)
panic("can't create kinit thread\n");
thread_ready(t);
 
//tasklets disabled for debugging purposes
tasklet_run_tasklet_thread(k);
tasklet_run_tasklet_thread(k);
 
/*
* This call to scheduler() will return to kinit,
* starting the thread of kernel threads.
/branches/rcu/kernel/generic/src/proc/scheduler.c
61,6 → 61,7
#include <cpu.h>
#include <print.h>
#include <debug.h>
#include <proc/tasklet.h>
 
static void before_task_runs(void);
static void before_thread_runs(void);
226,7 → 227,11
* Take the first thread from the queue.
*/
t = list_get_instance(r->rq_head.next, thread_t, rq_link);
if (verbose)
printf("cpu%d removing, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
list_remove(&t->rq_link);
if (verbose)
printf("cpu%d removed, rq_head %x, t: %x, next: %x, link: %x \n",CPU->id, r->rq_head, t, r->rq_head.next, t->rq_link);
 
spinlock_unlock(&r->lock);
 
438,7 → 443,8
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
 
if (verbose)
printf("cpu%d, Sleeping unlocking \n", CPU->id);
spinlock_unlock(&THREAD->lock);
 
break;
454,12 → 460,17
 
THREAD = NULL;
}
 
if (verbose)
printf("cpu%d looking for next thread\n", CPU->id);
THREAD = find_best_thread();
if (verbose)
printf("cpu%d t locking THREAD:%x \n", CPU->id, THREAD);
spinlock_lock(&THREAD->lock);
priority = THREAD->priority;
spinlock_unlock(&THREAD->lock);
if (verbose)
printf("cpu%d t unlocked after priority THREAD:%x \n", CPU->id, THREAD);
 
relink_rq(priority);
 
/branches/rcu/kernel/generic/src/proc/thread.c
238,6 → 238,7
cpu = CPU;
if (t->flags & THREAD_FLAG_WIRED) {
ASSERT(t->cpu != NULL);
cpu = t->cpu;
}
t->state = Ready;
/branches/rcu/kernel/generic/src/ddi/irq.c
1,379 → 1,380
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/**
* @file
* @brief IRQ dispatcher.
*
* This file provides means of connecting IRQs with particular
* devices and logic for dispatching interrupts to IRQ handlers
* defined by those devices.
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
*
*
* Note about architectures.
*
* Some architectures has the term IRQ well defined. Examples
* of such architectures include amd64, ia32 and mips32. Some
* other architectures, such as sparc64, don't use the term
* at all. In those cases, we boldly step forward and define what
* an IRQ is.
*
* The implementation is generic enough and still allows the
* architectures to use the hardware layout effectively.
* For instance, on amd64 and ia32, where there is only 16
* IRQs, the irq_hash_table can be optimized to a one-dimensional
* array. Next, when it is known that the IRQ numbers (aka INR's)
* are unique, the claim functions can always return IRQ_ACCEPT.
*
*
* Note about the irq_hash_table.
*
* The hash table is configured to use two keys: inr and devno.
* However, the hash index is computed only from inr. Moreover,
* if devno is -1, the match is based on the return value of
* the claim() function instead of on devno.
*/
 
#include <ddi/irq.h>
#include <adt/hash_table.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <arch.h>
 
#define KEY_INR 0
#define KEY_DEVNO 1
 
/**
* Spinlock protecting the hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_hash_table_lock);
static hash_table_t irq_hash_table;
 
/**
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
static index_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_ht_ops = {
.hash = irq_ht_hash,
.compare = irq_ht_compare,
.remove_callback = NULL /* not used */
};
 
/**
* Hash table operations for cases when we know that
* there will be no collisions between different keys.
* However, there might be still collisions among
* elements with single key (sharing of one IRQ).
*/
static index_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_lin_ops = {
.hash = irq_lin_hash,
.compare = irq_lin_compare,
.remove_callback = NULL /* not used */
};
 
/** Initialize IRQ subsystem.
*
* @param inrs Numbers of unique IRQ numbers or INRs.
* @param chains Number of chains in the hash table.
*/
void irq_init(count_t inrs, count_t chains)
{
/*
* Be smart about the choice of the hash table operations.
* In cases in which inrs equals the requested number of
* chains (i.e. where there is no collision between
* different keys), we can use optimized set of operations.
*/
if (inrs == chains)
hash_table_create(&irq_hash_table, chains, 2, &irq_lin_ops);
else
hash_table_create(&irq_hash_table, chains, 2, &irq_ht_ops);
}
 
/** Initialize one IRQ structure.
*
* @param irq Pointer to the IRQ structure to be initialized.
*
*/
void irq_initialize(irq_t *irq)
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
irq->notif_cfg.method = 0;
irq->notif_cfg.counter = 0;
link_initialize(&irq->notif_cfg.link);
}
 
/** Register IRQ for device.
*
* The irq structure must be filled with information
* about the interrupt source and with the claim()
* function pointer and irq_handler() function pointer.
*
* @param irq IRQ structure belonging to a device.
*/
void irq_register(irq_t *irq)
{
ipl_t ipl;
unative_t key[] = {
(unative_t) irq->inr,
(unative_t) irq->devno
};
ipl = interrupts_disable();
spinlock_lock(&irq_hash_table_lock);
hash_table_insert(&irq_hash_table, key, &irq->link);
spinlock_unlock(&irq_hash_table_lock);
interrupts_restore(ipl);
}
 
/** Dispatch the IRQ.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
{
link_t *lnk;
unative_t key[] = {
(unative_t) inr,
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Find the IRQ structure corresponding to inr and devno.
*
* This functions attempts to lookup the IRQ structure
* corresponding to its arguments. On success, this
* function returns with interrups disabled, holding
* the lock of the respective IRQ structure.
*
* This function assumes interrupts are already disabled.
*
* @param inr INR being looked up.
* @param devno Devno being looked up.
*
* @return Locked IRQ structure on success or NULL on failure.
*/
irq_t *irq_find_and_lock(inr_t inr, devno_t devno)
{
link_t *lnk;
unative_t keys[] = {
(unative_t) inr,
(unative_t) devno
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, keys);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* can be collisions between different
* INRs.
*
* The devno is not used to compute the hash.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % irq_hash_table.entries;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the more complex architecture setup
* in which there are way too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match must be done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_ht_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = hash_table_get_instance(item, irq_t, link);
inr_t inr = (inr_t) key[KEY_INR];
devno_t devno = (devno_t) key[KEY_DEVNO];
 
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
 
return rv;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* are no collisions between different
* INRs.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_lin_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the less complex architecture setup
* in which there are not too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match is not done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_lin_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = list_get_instance(item, irq_t, link);
devno_t devno = (devno_t) key[KEY_DEVNO];
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
return rv;
}
 
/** @}
*/
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericddi
* @{
*/
/**
* @file
* @brief IRQ dispatcher.
*
* This file provides means of connecting IRQs with particular
* devices and logic for dispatching interrupts to IRQ handlers
* defined by those devices.
*
* This code is designed to support:
* - multiple devices sharing single IRQ
* - multiple IRQs per signle device
*
*
* Note about architectures.
*
* Some architectures has the term IRQ well defined. Examples
* of such architectures include amd64, ia32 and mips32. Some
* other architectures, such as sparc64, don't use the term
* at all. In those cases, we boldly step forward and define what
* an IRQ is.
*
* The implementation is generic enough and still allows the
* architectures to use the hardware layout effectively.
* For instance, on amd64 and ia32, where there is only 16
* IRQs, the irq_hash_table can be optimized to a one-dimensional
* array. Next, when it is known that the IRQ numbers (aka INR's)
* are unique, the claim functions can always return IRQ_ACCEPT.
*
*
* Note about the irq_hash_table.
*
* The hash table is configured to use two keys: inr and devno.
* However, the hash index is computed only from inr. Moreover,
* if devno is -1, the match is based on the return value of
* the claim() function instead of on devno.
*/
 
#include <ddi/irq.h>
#include <adt/hash_table.h>
#include <arch/types.h>
#include <synch/spinlock.h>
#include <arch.h>
 
#define KEY_INR 0
#define KEY_DEVNO 1
 
/**
* Spinlock protecting the hash table.
* This lock must be taken only when interrupts are disabled.
*/
SPINLOCK_INITIALIZE(irq_hash_table_lock);
static hash_table_t irq_hash_table;
 
/**
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
static index_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_ht_ops = {
.hash = irq_ht_hash,
.compare = irq_ht_compare,
.remove_callback = NULL /* not used */
};
 
/**
* Hash table operations for cases when we know that
* there will be no collisions between different keys.
* However, there might be still collisions among
* elements with single key (sharing of one IRQ).
*/
static index_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, count_t keys, link_t *item);
 
static hash_table_operations_t irq_lin_ops = {
.hash = irq_lin_hash,
.compare = irq_lin_compare,
.remove_callback = NULL /* not used */
};
 
/** Initialize IRQ subsystem.
*
* @param inrs Numbers of unique IRQ numbers or INRs.
* @param chains Number of chains in the hash table.
*/
void irq_init(count_t inrs, count_t chains)
{
/*
* Be smart about the choice of the hash table operations.
* In cases in which inrs equals the requested number of
* chains (i.e. where there is no collision between
* different keys), we can use optimized set of operations.
*/
if (inrs == chains)
hash_table_create(&irq_hash_table, chains, 2, &irq_lin_ops);
else
hash_table_create(&irq_hash_table, chains, 2, &irq_ht_ops);
}
 
/** Initialize one IRQ structure.
*
* @param irq Pointer to the IRQ structure to be initialized.
*
*/
void irq_initialize(irq_t *irq)
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->preack = false;
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
irq->claim = NULL;
irq->handler = NULL;
irq->arg = NULL;
irq->notif_cfg.notify = false;
irq->notif_cfg.answerbox = NULL;
irq->notif_cfg.code = NULL;
irq->notif_cfg.method = 0;
irq->notif_cfg.counter = 0;
link_initialize(&irq->notif_cfg.link);
}
 
/** Register IRQ for device.
*
* The irq structure must be filled with information
* about the interrupt source and with the claim()
* function pointer and irq_handler() function pointer.
*
* @param irq IRQ structure belonging to a device.
*/
void irq_register(irq_t *irq)
{
ipl_t ipl;
unative_t key[] = {
(unative_t) irq->inr,
(unative_t) irq->devno
};
ipl = interrupts_disable();
spinlock_lock(&irq_hash_table_lock);
hash_table_insert(&irq_hash_table, key, &irq->link);
spinlock_unlock(&irq_hash_table_lock);
interrupts_restore(ipl);
}
 
/** Dispatch the IRQ.
*
* We assume this function is only called from interrupt
* context (i.e. that interrupts are disabled prior to
* this call).
*
* This function attempts to lookup a fitting IRQ
* structure. In case of success, return with interrupts
* disabled and holding the respective structure.
*
* @param inr Interrupt number (aka inr or irq).
*
* @return IRQ structure of the respective device or NULL.
*/
irq_t *irq_dispatch_and_lock(inr_t inr)
{
link_t *lnk;
unative_t key[] = {
(unative_t) inr,
(unative_t) -1 /* search will use claim() instead of devno */
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, key);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Find the IRQ structure corresponding to inr and devno.
*
* This functions attempts to lookup the IRQ structure
* corresponding to its arguments. On success, this
* function returns with interrups disabled, holding
* the lock of the respective IRQ structure.
*
* This function assumes interrupts are already disabled.
*
* @param inr INR being looked up.
* @param devno Devno being looked up.
*
* @return Locked IRQ structure on success or NULL on failure.
*/
irq_t *irq_find_and_lock(inr_t inr, devno_t devno)
{
link_t *lnk;
unative_t keys[] = {
(unative_t) inr,
(unative_t) devno
};
spinlock_lock(&irq_hash_table_lock);
 
lnk = hash_table_find(&irq_hash_table, keys);
if (lnk) {
irq_t *irq;
irq = hash_table_get_instance(lnk, irq_t, link);
 
spinlock_unlock(&irq_hash_table_lock);
return irq;
}
spinlock_unlock(&irq_hash_table_lock);
 
return NULL;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* can be collisions between different
* INRs.
*
* The devno is not used to compute the hash.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % irq_hash_table.entries;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the more complex architecture setup
* in which there are way too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match must be done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_ht_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = hash_table_get_instance(item, irq_t, link);
inr_t inr = (inr_t) key[KEY_INR];
devno_t devno = (devno_t) key[KEY_DEVNO];
 
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock(). */
rv = ((irq->inr == inr) && (irq->claim() == IRQ_ACCEPT));
} else {
/* Invoked by irq_find_and_lock(). */
rv = ((irq->inr == inr) && (irq->devno == devno));
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
 
return rv;
}
 
/** Compute hash index for the key.
*
* This function computes hash index into
* the IRQ hash table for which there
* are no collisions between different
* INRs.
*
* @param key The first of the keys is inr and the second is devno or -1.
*
* @return Index into the hash table.
*/
index_t irq_lin_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr;
}
 
/** Compare hash table element with a key.
*
* There are two things to note about this function.
* First, it is used for the less complex architecture setup
* in which there are not too many interrupt numbers (i.e. inr's)
* to arrange the hash table so that collisions occur only
* among same inrs of different devnos. So the explicit check
* for inr match is not done.
* Second, if devno is -1, the second key (i.e. devno) is not
* used for the match and the result of the claim() function
* is used instead.
*
* This function assumes interrupts are already disabled.
*
* @param key Keys (i.e. inr and devno).
* @param keys This is 2.
* @param item The item to compare the key with.
*
* @return True on match or false otherwise.
*/
bool irq_lin_compare(unative_t key[], count_t keys, link_t *item)
{
irq_t *irq = list_get_instance(item, irq_t, link);
devno_t devno = (devno_t) key[KEY_DEVNO];
bool rv;
spinlock_lock(&irq->lock);
if (devno == -1) {
/* Invoked by irq_dispatch_and_lock() */
rv = (irq->claim() == IRQ_ACCEPT);
} else {
/* Invoked by irq_find_and_lock() */
rv = (irq->devno == devno);
}
/* unlock only on non-match */
if (!rv)
spinlock_unlock(&irq->lock);
return rv;
}
 
/** @}
*/
/branches/rcu/kernel/generic/src/ddi/ddi.c
99,7 → 99,8
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area.
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
138,6 → 139,18
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
/branches/rcu/kernel/generic/src/mm/frame.c
70,19 → 70,16
typedef struct {
count_t refcount; /**< tracking of shared frames */
uint8_t buddy_order; /**< buddy system block order */
link_t buddy_link; /**< link to the next free block inside one
order */
link_t buddy_link; /**< link to the next free block inside one order */
void *parent; /**< If allocated by slab, this points there */
} frame_t;
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
pfn_t base; /**< frame_no of the first frame in the frames
array */
pfn_t base; /**< frame_no of the first frame in the frames array */
count_t count; /**< Size of zone */
 
frame_t *frames; /**< array of frame_t structures in this
zone */
frame_t *frames; /**< array of frame_t structures in this zone */
count_t free_count; /**< number of free frame_t structures */
count_t busy_count; /**< number of busy frame_t structures */
160,8 → 157,8
for (i = 0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[i];
if (overlaps(newzone->base,newzone->count, z->base,
z->count)) {
if (overlaps(newzone->base,newzone->count,
z->base, z->count)) {
printf("Zones overlap!\n");
return -1;
}
169,7 → 166,7
break;
}
/* Move other zones up */
for (j = i; j < zones.count; j++)
for (j = i;j < zones.count; j++)
zones.info[j + 1] = zones.info[j];
zones.info[i] = newzone;
zones.count++;
205,8 → 202,7
z = zones.info[i];
spinlock_lock(&z->lock);
if (z->base <= frame && z->base + z->count > frame) {
/* Unlock the global lock */
spinlock_unlock(&zones.lock);
spinlock_unlock(&zones.lock); /* Unlock the global lock */
if (pzone)
*pzone = i;
return z;
233,8 → 229,7
* Assume interrupts are disabled.
*
* @param order Size (2^order) of free space we are trying to find
* @param pzone Pointer to preferred zone or NULL, on return contains zone
* number
* @param pzone Pointer to preferred zone or NULL, on return contains zone number
*/
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
{
278,10 → 273,10
* @param order - Order of parent must be different then this parameter!!
*/
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child,
uint8_t order)
uint8_t order)
{
frame_t *frame;
zone_t *zone;
frame_t * frame;
zone_t * zone;
index_t index;
frame = list_get_instance(child, frame_t, buddy_link);
298,8 → 293,8
 
static void zone_buddy_print_id(buddy_system_t *b, link_t *block)
{
frame_t *frame;
zone_t *zone;
frame_t * frame;
zone_t * zone;
index_t index;
 
frame = list_get_instance(block, frame_t, buddy_link);
315,17 → 310,16
*
* @return Buddy for given block if found
*/
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block)
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block)
{
frame_t *frame;
zone_t *zone;
frame_t * frame;
zone_t * zone;
index_t index;
bool is_left, is_right;
 
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),
frame->buddy_order));
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order));
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame);
354,8 → 348,8
*
* @return right block
*/
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) {
frame_t *frame_l, *frame_r;
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) {
frame_t * frame_l, * frame_r;
 
frame_l = list_get_instance(block, frame_t, buddy_link);
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));
371,8 → 365,8
*
* @return Coalesced block (actually block that represents lower address)
*/
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1,
link_t *block_2)
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1,
link_t * block_2)
{
frame_t *frame1, *frame2;
388,9 → 382,8
* @param block Buddy system block
* @param order Order to set
*/
static void zone_buddy_set_order(buddy_system_t *b, link_t *block,
uint8_t order) {
frame_t *frame;
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) {
frame_t * frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->buddy_order = order;
}
402,8 → 395,8
*
* @return Order of block
*/
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) {
frame_t *frame;
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t * block) {
frame_t * frame;
frame = list_get_instance(block, frame_t, buddy_link);
return frame->buddy_order;
}
427,8 → 420,8
* @param block Buddy system block
*
*/
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) {
frame_t *frame;
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) {
frame_t * frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->refcount = 0;
}
527,8 → 520,8
frame = zone_get_frame(zone, frame_idx);
if (frame->refcount)
return;
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
ASSERT(link);
zone->free_count--;
}
552,12 → 545,12
pfn_t frame_idx;
frame_t *frame;
 
ASSERT(!overlaps(z1->base, z1->count, z2->base, z2->count));
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count));
ASSERT(z1->base < z2->base);
 
spinlock_initialize(&z->lock, "zone_lock");
z->base = z1->base;
z->count = z2->base + z2->count - z1->base;
z->count = z2->base+z2->count - z1->base;
z->flags = z1->flags & z2->flags;
 
z->free_count = z1->free_count + z2->free_count;
565,12 → 558,12
max_order = fnzb(z->count);
 
z->buddy_system = (buddy_system_t *) &z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations, (void *) z);
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
 
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
for (i = 0; i < z->count; i++) {
/* This marks all frames busy */
frame_initialize(&z->frames[i]);
610,7 → 603,7
}
while (zone_can_alloc(z2, 0)) {
frame_idx = zone_frame_alloc(z2, 0);
frame = &z->frames[frame_idx + (z2->base - z1->base)];
frame = &z->frames[frame_idx + (z2->base-z1->base)];
frame->refcount = 0;
buddy_system_free(z->buddy_system, &frame->buddy_link);
}
675,7 → 668,7
for (i = 0; i < (count_t) (1 << order); i++) {
frame = &zone->frames[i + frame_idx];
frame->buddy_order = 0;
if (!frame->refcount)
if (! frame->refcount)
frame->refcount = 1;
ASSERT(frame->refcount == 1);
}
717,8 → 710,7
spinlock_lock(&zone1->lock);
spinlock_lock(&zone2->lock);
 
cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count -
zone1->base));
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base));
if (cframes == 1)
order = 0;
else
811,8 → 803,7
/* Allocate frames _after_ the conframe */
/* Check sizes */
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
for (i = 0; i < count; i++) {
frame_initialize(&z->frames[i]);
}
874,20 → 865,16
if (confframe >= start && confframe < start+count) {
for (;confframe < start + count; confframe++) {
addr = PFN2ADDR(confframe);
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base), config.kernel_size))
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size))
continue;
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.stack_base), config.stack_size))
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size))
continue;
bool overlap = false;
count_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(init.tasks[i].addr),
init.tasks[i].size)) {
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
overlap = true;
break;
}
928,7 → 915,7
spinlock_unlock(&zone->lock);
}
 
void *frame_get_parent(pfn_t pfn, unsigned int hint)
void * frame_get_parent(pfn_t pfn, unsigned int hint)
{
zone_t *zone = find_zone_and_lock(pfn, &hint);
void *res;
1086,21 → 1073,15
/* Tell the architecture to create some memory */
frame_arch_init();
if (config.cpu_active == 1) {
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
SIZE2FRAMES(config.stack_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size));
count_t i;
for (i = 0; i < init.cnt; i++) {
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr));
frame_mark_unavailable(pfn,
SIZE2FRAMES(init.tasks[i].size));
}
for (i = 0; i < init.cnt; i++)
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size));
 
if (ballocs.size)
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
SIZE2FRAMES(ballocs.size));
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size));
 
/* Black list first frame, as allocating NULL would
* fail in some places */
1125,8 → 1106,7
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
spinlock_unlock(&zones.lock);
1158,14 → 1138,10
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count,
((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count,
(zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count,
(zone->free_count * FRAME_SIZE) >> 10);
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
1176,4 → 1152,3
 
/** @}
*/
 
/branches/rcu/kernel/generic/src/mm/as.c
95,13 → 95,10
#endif
 
/**
* This lock serializes access to the ASID subsystem.
* It protects:
* - inactive_as_with_asid_head list
* - as->asid for each as of the as_t type
* - asids_allocated counter
* This lock protects inactive_as_with_asid_head list. It must be acquired
* before as_t mutex.
*/
SPINLOCK_INITIALIZE(asidlock);
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
 
/**
* This list contains address spaces that are not active on any
208,15 → 205,14
* Since there is no reference to this area,
* it is safe not to lock its mutex.
*/
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
spinlock_lock(&inactive_as_with_asid_lock);
if (as->asid != ASID_INVALID && as != AS_KERNEL) {
if (as != AS && as->cpu_refcount == 0)
list_remove(&as->inactive_as_with_asid_link);
asid_put(as->asid);
}
spinlock_unlock(&asidlock);
spinlock_unlock(&inactive_as_with_asid_lock);
 
/*
* Destroy address space areas of the address space.
415,7 → 411,7
int i = 0;
if (overlaps(b, c * PAGE_SIZE, area->base,
pages * PAGE_SIZE)) {
pages*PAGE_SIZE)) {
if (b + c * PAGE_SIZE <= start_free) {
/*
557,7 → 553,7
if (area->backend &&
area->backend->frame_free) {
area->backend->frame_free(area, b +
j * PAGE_SIZE, PTE_GET_FRAME(pte));
j * PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + j * PAGE_SIZE);
page_table_unlock(as, false);
617,7 → 613,8
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing.
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
670,6 → 667,20
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
864,8 → 875,7
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* scheduling. Sleeping here would lead to deadlock on wakeup. Another
* thing which is forbidden in this context is locking the address space.
* scheduling. Sleeping here would lead to deadlock on wakeup.
*
* @param old Old address space or NULL.
* @param new New address space.
872,12 → 882,17
*/
void as_switch(as_t *old_as, as_t *new_as)
{
spinlock_lock(&asidlock);
ipl_t ipl;
bool needs_asid = false;
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
 
/*
* First, take care of the old address space.
*/
if (old_as) {
mutex_lock_active(&old_as->lock);
ASSERT(old_as->cpu_refcount);
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
/*
886,10 → 901,11
* list of inactive address spaces with assigned
* ASID.
*/
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old_as->lock);
 
/*
* Perform architecture-specific tasks when the address space
901,15 → 917,36
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&new_as->lock);
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
if (new_as->asid != ASID_INVALID)
if (new_as->asid != ASID_INVALID) {
list_remove(&new_as->inactive_as_with_asid_link);
else
new_as->asid = asid_get();
} else {
/*
* Defer call to asid_get() until new_as->lock is released.
*/
needs_asid = true;
}
}
#ifdef AS_PAGE_TABLE
SET_PTL0_ADDRESS(new_as->genarch.page_table);
#endif
mutex_unlock(&new_as->lock);
 
if (needs_asid) {
/*
* Allocation of new ASID was deferred
* until now in order to avoid deadlock.
*/
asid_t asid;
asid = asid_get();
mutex_lock_active(&new_as->lock);
new_as->asid = asid;
mutex_unlock(&new_as->lock);
}
spinlock_unlock(&inactive_as_with_asid_lock);
interrupts_restore(ipl);
/*
* Perform architecture-specific steps.
916,8 → 953,6
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(new_as);
 
spinlock_unlock(&asidlock);
AS = new_as;
}
/branches/rcu/kernel/generic/src/mm/tlb.c
78,8 → 78,7
* @param page Virtual page address, if required by type.
* @param count Number of pages, if required by type.
*/
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count)
{
int i;
 
108,11 → 107,11
/*
* Enqueue the message.
*/
index_t idx = cpu->tlb_messages_count++;
cpu->tlb_messages[idx].type = type;
cpu->tlb_messages[idx].asid = asid;
cpu->tlb_messages[idx].page = page;
cpu->tlb_messages[idx].count = count;
cpu->tlb_messages[cpu->tlb_messages_count].type = type;
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid;
cpu->tlb_messages[cpu->tlb_messages_count].page = page;
cpu->tlb_messages[cpu->tlb_messages_count].count = count;
cpu->tlb_messages_count++;
}
spinlock_unlock(&cpu->lock);
}
/branches/rcu/kernel/generic/src/mm/backend_anon.c
72,13 → 72,11
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
uintptr_t frame;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
88,14 → 86,13
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info
* structure.
* in the pagemap of the address space area share info structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
105,8 → 102,7
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
114,15 → 110,11
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
* Insert the address of the newly allocated
* frame to the pagemap.
* Insert the address of the newly allocated frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
}
}
frame_reference_add(ADDR2PFN(frame));
145,13 → 137,12
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is
* being inserted into page tables.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
171,6 → 162,9
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
 
/** Share the anonymous address space area.
190,8 → 184,7
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
205,19 → 198,14
pte_t *pte;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
}
 
}
}
mutex_unlock(&area->sh_info->lock);
/branches/rcu/kernel/generic/src/mm/backend_phys.c
32,8 → 32,7
 
/**
* @file
* @brief Backend for address space areas backed by continuous physical
* memory.
* @brief Backend for address space areas backed by continuous physical memory.
*/
 
#include <debug.h>
63,8 → 62,7
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
74,8 → 72,7
return AS_PF_FAULT;
 
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
page_mapping_insert(AS, addr, base + (addr - area->base),
as_area_get_flags(area));
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
/branches/rcu/kernel/generic/src/mm/backend_elf.c
71,8 → 71,7
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
81,13 → 80,11
btree_node_t *leaf;
uintptr_t base, frame;
index_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) &&
(addr < entry->p_vaddr + entry->p_memsz));
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
110,8 → 107,7
*/
 
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
found = true;
break;
}
119,10 → 115,8
}
if (frame || found) {
frame_reference_add(ADDR2PFN(frame));
page_mapping_insert(AS, addr, frame,
as_area_get_flags(area));
if (!used_space_insert(area,
ALIGN_DOWN(addr, PAGE_SIZE), 1))
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
130,12 → 124,10
}
/*
* The area is either not shared or the pagemap does not contain the
* mapping.
* The area is either not shared or the pagemap does not contain the mapping.
*/
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
entry->p_vaddr + entry->p_filesz) {
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
146,22 → 138,18
*/
if (entry->p_flags & PF_W) {
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
dirty = true;
 
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
170,13 → 158,11
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
189,15 → 175,12
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
size);
dirty = true;
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
}
228,28 → 211,31
uintptr_t base;
index_t i;
ASSERT((page >= entry->p_vaddr) &&
(page < entry->p_vaddr + entry->p_memsz));
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE <
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment
* data.
* Free the frame with the copy of writable segment data.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e.
* lower part is backed by the ELF image and the upper is
* anonymous). In any case, a frame needs to be freed.
*/
frame_free(frame);
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
}
 
274,12 → 260,10
* Find the node in which to start linear search.
*/
if (area->flags & AS_AREA_WRITE) {
node = list_get_instance(area->used_space.leaf_head.next,
btree_node_t, leaf_link);
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
} else {
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
if (!node)
node = leaf;
}
288,8 → 272,7
* Copy used anonymous portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
cur = cur->next) {
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
311,26 → 294,19
pte_t *pte;
/*
* Skip read-only pages that are backed by the
* ELF image.
* Skip read-only pages that are backed by the ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + (j + 1) * PAGE_SIZE <=
start_anon)
if (base + (j + 1)*PAGE_SIZE <= start_anon)
continue;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
}
}
/branches/rcu/kernel/generic/src/lib/rd.c
90,6 → 90,8
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/branches/rcu/kernel/generic/src/console/klog.c
90,6 → 90,8
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/branches/rcu/kernel/generic/src/time/clock.c
104,6 → 104,8
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}