/tags/0.2.0.2/kernel/generic/src/sysinfo/sysinfo.c |
---|
0,0 → 1,311 |
/* |
* Copyright (C) 2006 Jakub Vana |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <sysinfo/sysinfo.h> |
#include <mm/slab.h> |
#include <print.h> |
#include <syscall/copy.h> |
sysinfo_item_t *_root = NULL; |
static sysinfo_item_t *sysinfo_find_item(const char *name, sysinfo_item_t *subtree) |
{ |
if (subtree == NULL) |
return NULL; |
while (subtree != NULL) { |
int i = 0; |
char *a = (char *) name; |
char *b = subtree->name; |
while ((a[i] == b[i]) && (b[i])) |
i++; |
if ((!a[i]) && (!b[i])) /* Last name in path matches */ |
return subtree; |
if ((a[i] == '.') && (!b[i])) { /* Middle name in path matches */ |
if (subtree->subinfo_type == SYSINFO_SUBINFO_TABLE) |
return sysinfo_find_item(a + i + 1, subtree->subinfo.table); |
//if (subtree->subinfo_type == SYSINFO_SUBINFO_FUNCTION) /* Subinfo managed by subsystem */ |
// return NULL; |
return NULL; /* No subinfo */ |
} |
/* No matches try next */ |
subtree = subtree->next; |
i = 0; |
} |
return NULL; |
} |
static sysinfo_item_t *sysinfo_create_path(const char *name, sysinfo_item_t **psubtree) |
{ |
sysinfo_item_t *subtree; |
subtree = *psubtree; |
if (subtree == NULL) { |
sysinfo_item_t *item = malloc(sizeof(sysinfo_item_t), 0); |
int i = 0, j; |
ASSERT(item); |
*psubtree = item; |
item->next = NULL; |
item->val_type = SYSINFO_VAL_UNDEFINED; |
item->subinfo.table = NULL; |
while (name[i] && (name[i] != '.')) |
i++; |
item->name = malloc(i, 0); |
ASSERT(item->name); |
for (j = 0; j < i; j++) |
item->name[j] = name[j]; |
item->name[j] = 0; |
if (name[i]) { /* =='.' */ |
item->subinfo_type = SYSINFO_SUBINFO_TABLE; |
return sysinfo_create_path(name + i + 1, &(item->subinfo.table)); |
} |
item->subinfo_type = SYSINFO_SUBINFO_NONE; |
return item; |
} |
while (subtree != NULL) { |
int i = 0, j; |
char *a = (char *) name; |
char *b = subtree->name; |
while ((a[i] == b[i]) && (b[i])) |
i++; |
if ((!a[i]) && (!b[i])) /* Last name in path matches */ |
return subtree; |
if ((a[i] == '.') && (!b[i])) { /* Middle name in path matches */ |
if (subtree->subinfo_type == SYSINFO_SUBINFO_TABLE) |
return sysinfo_create_path(a + i + 1, &(subtree->subinfo.table)); |
if (subtree->subinfo_type == SYSINFO_SUBINFO_NONE) { |
subtree->subinfo_type = SYSINFO_SUBINFO_TABLE; |
return sysinfo_create_path(a + i + 1,&(subtree->subinfo.table)); |
} |
//if (subtree->subinfo_type == SYSINFO_SUBINFO_FUNCTION) /* Subinfo managed by subsystem */ |
// return NULL; |
return NULL; |
} |
/* No matches try next or create new*/ |
if (subtree->next == NULL) { |
sysinfo_item_t *item = malloc(sizeof(sysinfo_item_t), 0); |
ASSERT(item); |
subtree->next = item; |
item->next = NULL; |
item->val_type = SYSINFO_VAL_UNDEFINED; |
item->subinfo.table = NULL; |
i = 0; |
while (name[i] && (name[i] != '.')) |
i++; |
item->name = malloc(i, 0); |
ASSERT(item->name); |
for (j = 0; j < i; j++) |
item->name[j] = name[j]; |
item->name[j] = 0; |
if(name[i]) { /* =='.' */ |
item->subinfo_type = SYSINFO_SUBINFO_TABLE; |
return sysinfo_create_path(name + i + 1, &(item->subinfo.table)); |
} |
item->subinfo_type = SYSINFO_SUBINFO_NONE; |
return item; |
} else { |
subtree = subtree->next; |
i = 0; |
} |
} |
panic("Not reached\n"); |
return NULL; |
} |
void sysinfo_set_item_val(const char *name, sysinfo_item_t **root, __native val) |
{ |
if (root == NULL) |
root = &_root; |
/* If already created create only returns pointer |
If not, create it */ |
sysinfo_item_t *item = sysinfo_create_path(name, root); |
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */ |
item->val.val=val; |
item->val_type = SYSINFO_VAL_VAL; |
} |
} |
void sysinfo_set_item_function(const char *name, sysinfo_item_t **root, sysinfo_val_fn_t fn) |
{ |
if (root == NULL) |
root = &_root; |
/* If already created create only returns pointer |
If not, create it */ |
sysinfo_item_t *item = sysinfo_create_path(name, root); |
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */ |
item->val.fn=fn; |
item->val_type = SYSINFO_VAL_FUNCTION; |
} |
} |
void sysinfo_set_item_undefined(const char *name, sysinfo_item_t **root) |
{ |
if (root == NULL) |
root = &_root; |
/* If already created create only returns pointer |
If not, create it */ |
sysinfo_item_t *item = sysinfo_create_path(name, root); |
if (item != NULL) |
item->val_type = SYSINFO_VAL_UNDEFINED; |
} |
void sysinfo_dump(sysinfo_item_t **proot, int depth) |
{ |
sysinfo_item_t *root; |
if (proot == NULL) |
proot = &_root; |
root = *proot; |
while (root != NULL) { |
int i; |
__native val = 0; |
char *vtype = NULL; |
for (i = 0; i < depth; i++) |
printf(" "); |
switch (root->val_type) { |
case SYSINFO_VAL_UNDEFINED: |
val = 0; |
vtype = "UND"; |
break; |
case SYSINFO_VAL_VAL: |
val = root->val.val; |
vtype = "VAL"; |
break; |
case SYSINFO_VAL_FUNCTION: |
val = ((sysinfo_val_fn_t) (root->val.fn)) (root); |
vtype = "FUN"; |
break; |
} |
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN")); |
if (root->subinfo_type == SYSINFO_SUBINFO_TABLE) |
sysinfo_dump(&(root -> subinfo.table), depth + 1); |
root = root->next; |
} |
} |
sysinfo_rettype_t sysinfo_get_val(const char *name, sysinfo_item_t **root) |
{ |
// TODO: Implement Subsystem subinfo (by function implemented subinfo) |
sysinfo_rettype_t ret = {0, false}; |
if (root == NULL) |
root = &_root; |
sysinfo_item_t *item = sysinfo_find_item(name, *root); |
if (item != NULL) { |
if (item->val_type == SYSINFO_VAL_UNDEFINED) |
return ret; |
else |
ret.valid = true; |
if (item->val_type == SYSINFO_VAL_VAL) |
ret.val = item->val.val; |
else |
ret.val = ((sysinfo_val_fn_t) (item->val.fn)) (item); |
} |
return ret; |
} |
__native sys_sysinfo_valid(__native ptr, __native len) |
{ |
char *str; |
sysinfo_rettype_t ret = {0, 0}; |
str = malloc(len + 1, 0); |
ASSERT(str); |
if (!((copy_from_uspace(str, (void *) ptr, len + 1)) || (str[len]))) |
ret = sysinfo_get_val(str, NULL); |
free(str); |
return ret.valid; |
} |
__native sys_sysinfo_value(__native ptr, __native len) |
{ |
char *str; |
sysinfo_rettype_t ret = {0, 0}; |
str = malloc(len + 1, 0); |
ASSERT(str); |
if (!((copy_from_uspace(str, (void *) ptr, len + 1)) || (str[len]))) |
ret = sysinfo_get_val(str, NULL); |
free(str); |
return ret.val; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/console/cmd.c |
---|
0,0 → 1,811 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericconsole |
* @{ |
*/ |
/** |
* @file cmd.c |
* @brief Kernel console command wrappers. |
* |
* This file is meant to contain all wrapper functions for |
* all kconsole commands. The point is in separating |
* kconsole specific wrappers from kconsole-unaware functions |
* from other subsystems. |
*/ |
#include <console/cmd.h> |
#include <console/console.h> |
#include <console/kconsole.h> |
#include <print.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <func.h> |
#include <macros.h> |
#include <debug.h> |
#include <symtab.h> |
#include <cpu.h> |
#include <mm/tlb.h> |
#include <arch/mm/tlb.h> |
#include <mm/frame.h> |
#include <main/version.h> |
#include <mm/slab.h> |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <ipc/ipc.h> |
#include <ipc/irq.h> |
/* Data and methods for 'help' command. */ |
static int cmd_help(cmd_arg_t *argv); |
static cmd_info_t help_info = { |
.name = "help", |
.description = "List of supported commands.", |
.func = cmd_help, |
.argc = 0 |
}; |
static cmd_info_t exit_info = { |
.name = "exit", |
.description ="Exit kconsole", |
.argc = 0 |
}; |
static int cmd_continue(cmd_arg_t *argv); |
static cmd_info_t continue_info = { |
.name = "continue", |
.description ="Return console back to userspace.", |
.func = cmd_continue, |
.argc = 0 |
}; |
/* Data and methods for 'description' command. */ |
static int cmd_desc(cmd_arg_t *argv); |
static void desc_help(void); |
static char desc_buf[MAX_CMDLINE+1]; |
static cmd_arg_t desc_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = desc_buf, |
.len = sizeof(desc_buf) |
}; |
static cmd_info_t desc_info = { |
.name = "describe", |
.description = "Describe specified command.", |
.help = desc_help, |
.func = cmd_desc, |
.argc = 1, |
.argv = &desc_argv |
}; |
/* Data and methods for 'symaddr' command. */ |
static int cmd_symaddr(cmd_arg_t *argv); |
static char symaddr_buf[MAX_CMDLINE+1]; |
static cmd_arg_t symaddr_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = symaddr_buf, |
.len = sizeof(symaddr_buf) |
}; |
static cmd_info_t symaddr_info = { |
.name = "symaddr", |
.description = "Return symbol address.", |
.func = cmd_symaddr, |
.argc = 1, |
.argv = &symaddr_argv |
}; |
static char set_buf[MAX_CMDLINE+1]; |
static int cmd_set4(cmd_arg_t *argv); |
static cmd_arg_t set4_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = set_buf, |
.len = sizeof(set_buf) |
}, |
{ |
.type = ARG_TYPE_INT |
} |
}; |
static cmd_info_t set4_info = { |
.name = "set4", |
.description = "set <dest_addr> <value> - 4byte version", |
.func = cmd_set4, |
.argc = 2, |
.argv = set4_argv |
}; |
/* Data and methods for 'call0' command. */ |
static char call0_buf[MAX_CMDLINE+1]; |
static char carg1_buf[MAX_CMDLINE+1]; |
static char carg2_buf[MAX_CMDLINE+1]; |
static char carg3_buf[MAX_CMDLINE+1]; |
static int cmd_call0(cmd_arg_t *argv); |
static cmd_arg_t call0_argv = { |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}; |
static cmd_info_t call0_info = { |
.name = "call0", |
.description = "call0 <function> -> call function().", |
.func = cmd_call0, |
.argc = 1, |
.argv = &call0_argv |
}; |
/* Data and methods for 'call1' command. */ |
static int cmd_call1(cmd_arg_t *argv); |
static cmd_arg_t call1_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
} |
}; |
static cmd_info_t call1_info = { |
.name = "call1", |
.description = "call1 <function> <arg1> -> call function(arg1).", |
.func = cmd_call1, |
.argc = 2, |
.argv = call1_argv |
}; |
/* Data and methods for 'call2' command. */ |
static int cmd_call2(cmd_arg_t *argv); |
static cmd_arg_t call2_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg2_buf, |
.len = sizeof(carg2_buf) |
} |
}; |
static cmd_info_t call2_info = { |
.name = "call2", |
.description = "call2 <function> <arg1> <arg2> -> call function(arg1,arg2).", |
.func = cmd_call2, |
.argc = 3, |
.argv = call2_argv |
}; |
/* Data and methods for 'call3' command. */ |
static int cmd_call3(cmd_arg_t *argv); |
static cmd_arg_t call3_argv[] = { |
{ |
.type = ARG_TYPE_STRING, |
.buffer = call0_buf, |
.len = sizeof(call0_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg1_buf, |
.len = sizeof(carg1_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg2_buf, |
.len = sizeof(carg2_buf) |
}, |
{ |
.type = ARG_TYPE_VAR, |
.buffer = carg3_buf, |
.len = sizeof(carg3_buf) |
} |
}; |
static cmd_info_t call3_info = { |
.name = "call3", |
.description = "call3 <function> <arg1> <arg2> <arg3> -> call function(arg1,arg2,arg3).", |
.func = cmd_call3, |
.argc = 4, |
.argv = call3_argv |
}; |
/* Data and methods for 'halt' command. */ |
static int cmd_halt(cmd_arg_t *argv); |
static cmd_info_t halt_info = { |
.name = "halt", |
.description = "Halt the kernel.", |
.func = cmd_halt, |
.argc = 0 |
}; |
/* Data and methods for 'tlb' command. */ |
static int cmd_tlb(cmd_arg_t *argv); |
cmd_info_t tlb_info = { |
.name = "tlb", |
.description = "Print TLB of current processor.", |
.help = NULL, |
.func = cmd_tlb, |
.argc = 0, |
.argv = NULL |
}; |
static int cmd_threads(cmd_arg_t *argv); |
static cmd_info_t threads_info = { |
.name = "threads", |
.description = "List all threads.", |
.func = cmd_threads, |
.argc = 0 |
}; |
static int cmd_tasks(cmd_arg_t *argv); |
static cmd_info_t tasks_info = { |
.name = "tasks", |
.description = "List all tasks.", |
.func = cmd_tasks, |
.argc = 0 |
}; |
static int cmd_sched(cmd_arg_t *argv); |
static cmd_info_t sched_info = { |
.name = "scheduler", |
.description = "List all scheduler information.", |
.func = cmd_sched, |
.argc = 0 |
}; |
static int cmd_slabs(cmd_arg_t *argv); |
static cmd_info_t slabs_info = { |
.name = "slabs", |
.description = "List slab caches.", |
.func = cmd_slabs, |
.argc = 0 |
}; |
/* Data and methods for 'zones' command */ |
static int cmd_zones(cmd_arg_t *argv); |
static cmd_info_t zones_info = { |
.name = "zones", |
.description = "List of memory zones.", |
.func = cmd_zones, |
.argc = 0 |
}; |
/* Data and methods for 'ipc_task' command */ |
static int cmd_ipc_task(cmd_arg_t *argv); |
static cmd_arg_t ipc_task_argv = { |
.type = ARG_TYPE_INT, |
}; |
static cmd_info_t ipc_task_info = { |
.name = "ipc_task", |
.description = "ipc_task <taskid> Show IPC information of given task.", |
.func = cmd_ipc_task, |
.argc = 1, |
.argv = &ipc_task_argv |
}; |
/* Data and methods for 'zone' command */ |
static int cmd_zone(cmd_arg_t *argv); |
static cmd_arg_t zone_argv = { |
.type = ARG_TYPE_INT, |
}; |
static cmd_info_t zone_info = { |
.name = "zone", |
.description = "Show memory zone structure.", |
.func = cmd_zone, |
.argc = 1, |
.argv = &zone_argv |
}; |
/* Data and methods for 'cpus' command. */ |
static int cmd_cpus(cmd_arg_t *argv); |
cmd_info_t cpus_info = { |
.name = "cpus", |
.description = "List all processors.", |
.help = NULL, |
.func = cmd_cpus, |
.argc = 0, |
.argv = NULL |
}; |
/* Data and methods for 'version' command. */ |
static int cmd_version(cmd_arg_t *argv); |
cmd_info_t version_info = { |
.name = "version", |
.description = "Print version information.", |
.help = NULL, |
.func = cmd_version, |
.argc = 0, |
.argv = NULL |
}; |
static cmd_info_t *basic_commands[] = { |
&call0_info, |
&call1_info, |
&call2_info, |
&call3_info, |
&continue_info, |
&cpus_info, |
&desc_info, |
&exit_info, |
&halt_info, |
&help_info, |
&ipc_task_info, |
&set4_info, |
&slabs_info, |
&symaddr_info, |
&sched_info, |
&threads_info, |
&tasks_info, |
&tlb_info, |
&version_info, |
&zones_info, |
&zone_info, |
NULL |
}; |
/** Initialize command info structure. |
* |
* @param cmd Command info structure. |
* |
*/ |
void cmd_initialize(cmd_info_t *cmd) |
{ |
spinlock_initialize(&cmd->lock, "cmd"); |
link_initialize(&cmd->link); |
} |
/** Initialize and register commands. */ |
void cmd_init(void) |
{ |
int i; |
for (i=0;basic_commands[i]; i++) { |
cmd_initialize(basic_commands[i]); |
if (!cmd_register(basic_commands[i])) |
panic("could not register command %s\n", |
basic_commands[i]->name); |
} |
} |
/** List supported commands. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_help(cmd_arg_t *argv) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
printf("%s - %s\n", hlp->name, hlp->description); |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Describe specified command. |
* |
* @param argv Argument vector. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_desc(cmd_arg_t *argv) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
if (strncmp(hlp->name, (const char *) argv->buffer, strlen(hlp->name)) == 0) { |
printf("%s - %s\n", hlp->name, hlp->description); |
if (hlp->help) |
hlp->help(); |
spinlock_unlock(&hlp->lock); |
break; |
} |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Search symbol table */ |
int cmd_symaddr(cmd_arg_t *argv) |
{ |
symtab_print_search(argv->buffer); |
return 1; |
} |
/** Call function with zero parameters */ |
int cmd_call0(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(void); |
#ifdef ia64 |
struct { |
__native f; |
__native gp; |
}fptr; |
#endif |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(): %.*p: %s\n", sizeof(__address) * 2, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((__native *)cmd_call2)[1]; |
f = (__native (*)(void)) &fptr; |
#else |
f = (__native (*)(void)) symaddr; |
#endif |
printf("Result: %#zx\n", f()); |
} |
return 1; |
} |
/** Call function with one parameter */ |
int cmd_call1(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,...); |
__native arg1 = argv[1].intval; |
#ifdef ia64 |
struct { |
__native f; |
__native gp; |
}fptr; |
#endif |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(%#zx): %.*p: %s\n", arg1, sizeof(__address) * 2, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((__native *)cmd_call2)[1]; |
f = (__native (*)(__native,...)) &fptr; |
#else |
f = (__native (*)(__native,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1)); |
} |
return 1; |
} |
/** Call function with two parameters */ |
int cmd_call2(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,__native,...); |
__native arg1 = argv[1].intval; |
__native arg2 = argv[2].intval; |
#ifdef ia64 |
struct { |
__native f; |
__native gp; |
}fptr; |
#endif |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx): %.*p: %s\n", |
arg1, arg2, sizeof(__address) * 2, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((__native *)cmd_call2)[1]; |
f = (__native (*)(__native,__native,...)) &fptr; |
#else |
f = (__native (*)(__native,__native,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1, arg2)); |
} |
return 1; |
} |
/** Call function with three parameters */ |
int cmd_call3(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
__native (*f)(__native,__native,__native,...); |
__native arg1 = argv[1].intval; |
__native arg2 = argv[2].intval; |
__native arg3 = argv[3].intval; |
#ifdef ia64 |
struct { |
__native f; |
__native gp; |
}fptr; |
#endif |
symaddr = get_symbol_addr(argv->buffer); |
if (!symaddr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (symaddr == (__address) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx, 0x%zx): %.*p: %s\n", |
arg1, arg2, arg3, sizeof(__address) * 2, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((__native *)cmd_call2)[1]; |
f = (__native (*)(__native,__native,__native,...)) &fptr; |
#else |
f = (__native (*)(__native,__native,__native,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1, arg2, arg3)); |
} |
return 1; |
} |
/** Print detailed description of 'describe' command. */ |
void desc_help(void) |
{ |
printf("Syntax: describe command_name\n"); |
} |
/** Halt the kernel. |
* |
* @param argv Argument vector (ignored). |
* |
* @return 0 on failure, 1 on success (never returns). |
*/ |
int cmd_halt(cmd_arg_t *argv) |
{ |
halt(); |
return 1; |
} |
/** Command for printing TLB contents. |
* |
* @param argv Not used. |
* |
* @return Always returns 1. |
*/ |
int cmd_tlb(cmd_arg_t *argv) |
{ |
tlb_print(); |
return 1; |
} |
/** Write 4 byte value to address */ |
int cmd_set4(cmd_arg_t *argv) |
{ |
__u32 *addr ; |
__u32 arg1 = argv[1].intval; |
bool pointer = false; |
if (((char *)argv->buffer)[0] == '*') { |
addr = (__u32 *) get_symbol_addr(argv->buffer+1); |
pointer = true; |
} else if (((char *)argv->buffer)[0] >= '0' && |
((char *)argv->buffer)[0] <= '9') |
addr = (__u32 *)atoi((char *)argv->buffer); |
else |
addr = (__u32 *)get_symbol_addr(argv->buffer); |
if (!addr) |
printf("Symbol %s not found.\n", argv->buffer); |
else if (addr == (__u32 *) -1) { |
symtab_print_search(argv->buffer); |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
if (pointer) |
addr = (__u32 *)(*(__native *)addr); |
printf("Writing 0x%x -> %.*p\n", arg1, sizeof(__address) * 2, addr); |
*addr = arg1; |
} |
return 1; |
} |
/** Command for listings SLAB caches |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_slabs(cmd_arg_t * argv) { |
slab_print_list(); |
return 1; |
} |
/** Command for listings Thread information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_threads(cmd_arg_t * argv) { |
thread_print_list(); |
return 1; |
} |
/** Command for listings Task information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_tasks(cmd_arg_t * argv) { |
task_print_list(); |
return 1; |
} |
/** Command for listings Thread information |
* |
* @param argv Ignores |
* |
* @return Always 1 |
*/ |
int cmd_sched(cmd_arg_t * argv) { |
sched_print_list(); |
return 1; |
} |
/** Command for listing memory zones |
* |
* @param argv Ignored |
* |
* return Always 1 |
*/ |
int cmd_zones(cmd_arg_t * argv) { |
zone_print_list(); |
return 1; |
} |
/** Command for memory zone details |
* |
* @param argv Integer argument from cmdline expected |
* |
* return Always 1 |
*/ |
int cmd_zone(cmd_arg_t * argv) { |
zone_print_one(argv[0].intval); |
return 1; |
} |
/** Command for printing task ipc details |
* |
* @param argv Integer argument from cmdline expected |
* |
* return Always 1 |
*/ |
int cmd_ipc_task(cmd_arg_t * argv) { |
ipc_print_task(argv[0].intval); |
return 1; |
} |
/** Command for listing processors. |
* |
* @param argv Ignored. |
* |
* return Always 1. |
*/ |
int cmd_cpus(cmd_arg_t *argv) |
{ |
cpu_list(); |
return 1; |
} |
/** Command for printing kernel version. |
* |
* @param argv Ignored. |
* |
* return Always 1. |
*/ |
int cmd_version(cmd_arg_t *argv) |
{ |
version_print(); |
return 1; |
} |
/** Command for returning console back to userspace. |
* |
* @param argv Ignored. |
* |
* return Always 1. |
*/ |
int cmd_continue(cmd_arg_t *argv) |
{ |
printf("The kernel will now relinquish the console.\n"); |
printf("Use userspace controls to redraw the screen.\n"); |
arch_release_console(); |
ipc_irq_send_msg(IPC_IRQ_KBDRESTART, 0, 0, 0); |
return 1; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/console/console.c |
---|
0,0 → 1,170 |
/* |
* Copyright (C) 2003 Josef Cejka |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericconsole |
* @{ |
*/ |
/** @file |
*/ |
#include <console/console.h> |
#include <console/chardev.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch.h> |
#include <func.h> |
#include <print.h> |
#include <atomic.h> |
#define BUFLEN 2048 |
static char debug_buffer[BUFLEN]; |
static size_t offset = 0; |
/** Initialize stdout to something that does not print, but does not fail |
* |
* Save data in some buffer so that it could be retrieved in the debugger |
*/ |
static void null_putchar(chardev_t *d, const char ch) |
{ |
if (offset >= BUFLEN) |
offset = 0; |
debug_buffer[offset++] = ch; |
} |
static chardev_operations_t null_stdout_ops = { |
.write = null_putchar |
}; |
chardev_t null_stdout = { |
.name = "null", |
.op = &null_stdout_ops |
}; |
/** Standard input character device. */ |
chardev_t *stdin = NULL; |
chardev_t *stdout = &null_stdout; |
/** Get character from character device. Do not echo character. |
* |
* @param chardev Character device. |
* |
* @return Character read. |
*/ |
__u8 _getc(chardev_t *chardev) |
{ |
__u8 ch; |
ipl_t ipl; |
if (atomic_get(&haltstate)) { |
/* If we are here, we are hopefully on the processor, that |
* issued the 'halt' command, so proceed to read the character |
* directly from input |
*/ |
if (chardev->op->read) |
return chardev->op->read(chardev); |
/* no other way of interacting with user, halt */ |
if (CPU) |
printf("cpu%d: ", CPU->id); |
else |
printf("cpu: "); |
printf("halted - no kconsole\n"); |
cpu_halt(); |
} |
waitq_sleep(&chardev->wq); |
ipl = interrupts_disable(); |
spinlock_lock(&chardev->lock); |
ch = chardev->buffer[(chardev->index - chardev->counter) % CHARDEV_BUFLEN]; |
chardev->counter--; |
spinlock_unlock(&chardev->lock); |
interrupts_restore(ipl); |
chardev->op->resume(chardev); |
return ch; |
} |
/** Get string from character device. |
* |
* Read characters from character device until first occurrence |
* of newline character. |
* |
* @param chardev Character device. |
* @param buf Buffer where to store string terminated by '\0'. |
* @param buflen Size of the buffer. |
* |
* @return Number of characters read. |
*/ |
count_t gets(chardev_t *chardev, char *buf, size_t buflen) |
{ |
index_t index = 0; |
char ch; |
while (index < buflen) { |
ch = _getc(chardev); |
if (ch == '\b') { |
if (index > 0) { |
index--; |
/* Space backspace, space */ |
putchar('\b'); |
putchar(' '); |
putchar('\b'); |
} |
continue; |
} |
putchar(ch); |
if (ch == '\n') { /* end of string => write 0, return */ |
buf[index] = '\0'; |
return (count_t) index; |
} |
buf[index++] = ch; |
} |
return (count_t) index; |
} |
/** Get character from device & echo it to screen */ |
__u8 getc(chardev_t *chardev) |
{ |
__u8 ch; |
ch = _getc(chardev); |
putchar(ch); |
return ch; |
} |
void putchar(char c) |
{ |
if (stdout->op->write) |
stdout->op->write(stdout, c); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/console/kconsole.c |
---|
0,0 → 1,631 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericconsole |
* @{ |
*/ |
/** |
* @file kconsole.c |
* @brief Kernel console. |
* |
* This file contains kernel thread managing the kernel console. |
*/ |
#include <console/kconsole.h> |
#include <console/console.h> |
#include <console/chardev.h> |
#include <console/cmd.h> |
#include <print.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <macros.h> |
#include <debug.h> |
#include <func.h> |
#include <symtab.h> |
#include <macros.h> |
/** Simple kernel console. |
* |
* The console is realized by kernel thread kconsole. |
* It doesn't understand any useful command on its own, |
* but makes it possible for other kernel subsystems to |
* register their own commands. |
*/ |
/** Locking. |
* |
* There is a list of cmd_info_t structures. This list |
* is protected by cmd_lock spinlock. Note that specially |
* the link elements of cmd_info_t are protected by |
* this lock. |
* |
* Each cmd_info_t also has its own lock, which protects |
* all elements thereof except the link element. |
* |
* cmd_lock must be acquired before any cmd_info lock. |
* When locking two cmd info structures, structure with |
* lower address must be locked first. |
*/ |
SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ |
LIST_INITIALIZE(cmd_head); /**< Command list. */ |
static cmd_info_t *parse_cmdline(char *cmdline, size_t len); |
static bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end); |
static char history[KCONSOLE_HISTORY][MAX_CMDLINE] = {}; |
/** Initialize kconsole data structures. */ |
void kconsole_init(void) |
{ |
int i; |
cmd_init(); |
for (i=0; i<KCONSOLE_HISTORY; i++) |
history[i][0] = '\0'; |
} |
/** Register kconsole command. |
* |
* @param cmd Structure describing the command. |
* |
* @return 0 on failure, 1 on success. |
*/ |
int cmd_register(cmd_info_t *cmd) |
{ |
link_t *cur; |
spinlock_lock(&cmd_lock); |
/* |
* Make sure the command is not already listed. |
*/ |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
if (hlp == cmd) { |
/* The command is already there. */ |
spinlock_unlock(&cmd_lock); |
return 0; |
} |
/* Avoid deadlock. */ |
if (hlp < cmd) { |
spinlock_lock(&hlp->lock); |
spinlock_lock(&cmd->lock); |
} else { |
spinlock_lock(&cmd->lock); |
spinlock_lock(&hlp->lock); |
} |
if ((strncmp(hlp->name, |
cmd->name, max(strlen(cmd->name), |
strlen(hlp->name))) == 0)) { |
/* The command is already there. */ |
spinlock_unlock(&hlp->lock); |
spinlock_unlock(&cmd->lock); |
spinlock_unlock(&cmd_lock); |
return 0; |
} |
spinlock_unlock(&hlp->lock); |
spinlock_unlock(&cmd->lock); |
} |
/* |
* Now the command can be added. |
*/ |
list_append(&cmd->link, &cmd_head); |
spinlock_unlock(&cmd_lock); |
return 1; |
} |
/** Print count times a character */ |
static void rdln_print_c(char ch, int count) |
{ |
int i; |
for (i=0;i<count;i++) |
putchar(ch); |
} |
/** Insert character to string */ |
static void insert_char(char *str, char ch, int pos) |
{ |
int i; |
for (i=strlen(str);i > pos; i--) |
str[i] = str[i-1]; |
str[pos] = ch; |
} |
/** Try to find a command beginning with prefix */ |
static const char * cmdtab_search_one(const char *name,link_t **startpos) |
{ |
int namelen = strlen(name); |
const char *curname; |
spinlock_lock(&cmd_lock); |
if (!*startpos) |
*startpos = cmd_head.next; |
for (;*startpos != &cmd_head;*startpos = (*startpos)->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(*startpos, cmd_info_t, link); |
curname = hlp->name; |
if (strlen(curname) < namelen) |
continue; |
if (strncmp(curname, name, namelen) == 0) { |
spinlock_unlock(&cmd_lock); |
return curname+namelen; |
} |
} |
spinlock_unlock(&cmd_lock); |
return NULL; |
} |
/** Command completion of the commands |
* |
* @param name - string to match, changed to hint on exit |
* @return number of found matches |
*/ |
static int cmdtab_compl(char *name) |
{ |
char output[MAX_SYMBOL_NAME+1]; |
link_t *startpos = NULL; |
const char *foundtxt; |
int found = 0; |
int i; |
output[0] = '\0'; |
while ((foundtxt = cmdtab_search_one(name, &startpos))) { |
startpos = startpos->next; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
else { |
for (i=0; output[i] && foundtxt[i] && output[i]==foundtxt[i]; i++) |
; |
output[i] = '\0'; |
} |
found++; |
} |
if (!found) |
return 0; |
if (found > 1 && !strlen(output)) { |
printf("\n"); |
startpos = NULL; |
while ((foundtxt = cmdtab_search_one(name, &startpos))) { |
cmd_info_t *hlp; |
hlp = list_get_instance(startpos, cmd_info_t, link); |
printf("%s - %s\n", hlp->name, hlp->description); |
startpos = startpos->next; |
} |
} |
strncpy(name, output, MAX_SYMBOL_NAME); |
return found; |
} |
static char * clever_readline(const char *prompt, chardev_t *input) |
{ |
static int histposition = 0; |
char tmp[MAX_CMDLINE+1]; |
int curlen = 0, position = 0; |
char *current = history[histposition]; |
int i; |
char mod; /* Command Modifier */ |
char c; |
printf("%s> ", prompt); |
while (1) { |
c = _getc(input); |
if (c == '\n') { |
putchar(c); |
break; |
} if (c == '\b') { /* Backspace */ |
if (position == 0) |
continue; |
for (i=position; i<curlen;i++) |
current[i-1] = current[i]; |
curlen--; |
position--; |
putchar('\b'); |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
putchar(' '); |
rdln_print_c('\b',curlen-position+1); |
continue; |
} |
if (c == '\t') { /* Tabulator */ |
int found; |
/* Move to the end of the word */ |
for (;position<curlen && current[position]!=' ';position++) |
putchar(current[position]); |
/* Copy to tmp last word */ |
for (i=position-1;i >= 0 && current[i]!=' ' ;i--) |
; |
/* If word begins with * or &, skip it */ |
if (tmp[0] == '*' || tmp[0] == '&') |
for (i=1;tmp[i];i++) |
tmp[i-1] = tmp[i]; |
i++; /* I is at the start of the word */ |
strncpy(tmp, current+i, position-i+1); |
if (i==0) { /* Command completion */ |
found = cmdtab_compl(tmp); |
} else { /* Symtab completion */ |
found = symtab_compl(tmp); |
} |
if (found == 0) |
continue; |
for (i=0;tmp[i] && curlen < MAX_CMDLINE;i++,curlen++) |
insert_char(current, tmp[i], i+position); |
if (strlen(tmp) || found==1) { /* If we have a hint */ |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position += strlen(tmp); |
/* Add space to end */ |
if (found == 1 && position == curlen && \ |
curlen < MAX_CMDLINE) { |
current[position] = ' '; |
curlen++; |
position++; |
putchar(' '); |
} |
} else { /* No hint, table was printed */ |
printf("%s> ", prompt); |
for (i=0; i<curlen;i++) |
putchar(current[i]); |
position += strlen(tmp); |
} |
rdln_print_c('\b', curlen-position); |
continue; |
} |
if (c == 0x1b) { /* Special command */ |
mod = _getc(input); |
c = _getc(input); |
if (mod != 0x5b && mod != 0x4f) |
continue; |
if (c == 0x33 && _getc(input) == 0x7e) { |
/* Delete */ |
if (position == curlen) |
continue; |
for (i=position+1; i<curlen;i++) { |
putchar(current[i]); |
current[i-1] = current[i]; |
} |
putchar(' '); |
rdln_print_c('\b',curlen-position); |
curlen--; |
} |
else if (c == 0x48) { /* Home */ |
rdln_print_c('\b',position); |
position = 0; |
} |
else if (c == 0x46) { /* End */ |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position = curlen; |
} |
else if (c == 0x44) { /* Left */ |
if (position > 0) { |
putchar('\b'); |
position--; |
} |
continue; |
} |
else if (c == 0x43) { /* Right */ |
if (position < curlen) { |
putchar(current[position]); |
position++; |
} |
continue; |
} |
else if (c == 0x41 || c == 0x42) { |
/* Up,down */ |
rdln_print_c('\b',position); |
rdln_print_c(' ',curlen); |
rdln_print_c('\b',curlen); |
if (c == 0x41) /* Up */ |
histposition--; |
else |
histposition++; |
if (histposition < 0) |
histposition = KCONSOLE_HISTORY -1 ; |
else |
histposition = histposition % KCONSOLE_HISTORY; |
current = history[histposition]; |
printf("%s", current); |
curlen = strlen(current); |
position = curlen; |
continue; |
} |
continue; |
} |
if (curlen >= MAX_CMDLINE) |
continue; |
insert_char(current, c, position); |
curlen++; |
for (i=position;i<curlen;i++) |
putchar(current[i]); |
position++; |
rdln_print_c('\b',curlen-position); |
} |
if (curlen) { |
histposition++; |
histposition = histposition % KCONSOLE_HISTORY; |
} |
current[curlen] = '\0'; |
return current; |
} |
/** Kernel console managing thread. |
* |
* @param prompt Kernel console prompt (e.g kconsole/panic). |
*/ |
void kconsole(void *prompt) |
{ |
cmd_info_t *cmd_info; |
count_t len; |
char *cmdline; |
if (!stdin) { |
printf("%s: no stdin\n", __FUNCTION__); |
return; |
} |
while (true) { |
cmdline = clever_readline(prompt, stdin); |
len = strlen(cmdline); |
if (!len) |
continue; |
cmd_info = parse_cmdline(cmdline, len); |
if (!cmd_info) |
continue; |
if (strncmp(cmd_info->name,"exit", \ |
min(strlen(cmd_info->name),5)) == 0) |
break; |
(void) cmd_info->func(cmd_info->argv); |
} |
} |
static int parse_int_arg(char *text, size_t len, __native *result) |
{ |
char symname[MAX_SYMBOL_NAME]; |
__address symaddr; |
bool isaddr = false; |
bool isptr = false; |
/* If we get a name, try to find it in symbol table */ |
if (text[0] == '&') { |
isaddr = true; |
text++;len--; |
} else if (text[0] == '*') { |
isptr = true; |
text++;len--; |
} |
if (text[0] < '0' || text[0] > '9') { |
strncpy(symname, text, min(len+1, MAX_SYMBOL_NAME)); |
symaddr = get_symbol_addr(symname); |
if (!symaddr) { |
printf("Symbol %s not found.\n",symname); |
return -1; |
} |
if (symaddr == (__address) -1) { |
printf("Duplicate symbol %s.\n",symname); |
symtab_print_search(symname); |
return -1; |
} |
if (isaddr) |
*result = (__native)symaddr; |
else if (isptr) |
*result = **((__native **)symaddr); |
else |
*result = *((__native *)symaddr); |
} else { /* It's a number - convert it */ |
*result = atoi(text); |
if (isptr) |
*result = *((__native *)*result); |
} |
return 0; |
} |
/** Parse command line. |
* |
* @param cmdline Command line as read from input device. |
* @param len Command line length. |
* |
* @return Structure describing the command. |
*/ |
cmd_info_t *parse_cmdline(char *cmdline, size_t len) |
{ |
index_t start = 0, end = 0; |
cmd_info_t *cmd = NULL; |
link_t *cur; |
int i; |
int error = 0; |
if (!parse_argument(cmdline, len, &start, &end)) { |
/* Command line did not contain alphanumeric word. */ |
return NULL; |
} |
spinlock_lock(&cmd_lock); |
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) { |
cmd_info_t *hlp; |
hlp = list_get_instance(cur, cmd_info_t, link); |
spinlock_lock(&hlp->lock); |
if (strncmp(hlp->name, &cmdline[start], max(strlen(hlp->name), |
end-start+1)) == 0) { |
cmd = hlp; |
break; |
} |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&cmd_lock); |
if (!cmd) { |
/* Unknown command. */ |
printf("Unknown command.\n"); |
return NULL; |
} |
/* cmd == hlp is locked */ |
/* |
* The command line must be further analyzed and |
* the parameters therefrom must be matched and |
* converted to those specified in the cmd info |
* structure. |
*/ |
for (i = 0; i < cmd->argc; i++) { |
char *buf; |
start = end + 1; |
if (!parse_argument(cmdline, len, &start, &end)) { |
printf("Too few arguments.\n"); |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
error = 0; |
switch (cmd->argv[i].type) { |
case ARG_TYPE_STRING: |
buf = cmd->argv[i].buffer; |
strncpy(buf, (const char *) &cmdline[start], min((end - start) + 2, cmd->argv[i].len)); |
buf[min((end - start) + 1, cmd->argv[i].len - 1)] = '\0'; |
break; |
case ARG_TYPE_INT: |
if (parse_int_arg(cmdline+start, end-start+1, |
&cmd->argv[i].intval)) |
error = 1; |
break; |
case ARG_TYPE_VAR: |
if (start != end && cmdline[start] == '"' && cmdline[end] == '"') { |
buf = cmd->argv[i].buffer; |
strncpy(buf, (const char *) &cmdline[start+1], |
min((end-start), cmd->argv[i].len)); |
buf[min((end - start), cmd->argv[i].len - 1)] = '\0'; |
cmd->argv[i].intval = (__native) buf; |
cmd->argv[i].vartype = ARG_TYPE_STRING; |
} else if (!parse_int_arg(cmdline+start, end-start+1, |
&cmd->argv[i].intval)) |
cmd->argv[i].vartype = ARG_TYPE_INT; |
else { |
printf("Unrecognized variable argument.\n"); |
error = 1; |
} |
break; |
case ARG_TYPE_INVALID: |
default: |
printf("invalid argument type\n"); |
error = 1; |
break; |
} |
} |
if (error) { |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
start = end + 1; |
if (parse_argument(cmdline, len, &start, &end)) { |
printf("Too many arguments.\n"); |
spinlock_unlock(&cmd->lock); |
return NULL; |
} |
spinlock_unlock(&cmd->lock); |
return cmd; |
} |
/** Parse argument. |
* |
* Find start and end positions of command line argument. |
* |
* @param cmdline Command line as read from the input device. |
* @param len Number of characters in cmdline. |
* @param start On entry, 'start' contains pointer to the index |
* of first unprocessed character of cmdline. |
* On successful exit, it marks beginning of the next argument. |
* @param end Undefined on entry. On exit, 'end' points to the last character |
* of the next argument. |
* |
* @return false on failure, true on success. |
*/ |
bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end) |
{ |
int i; |
bool found_start = false; |
ASSERT(start != NULL); |
ASSERT(end != NULL); |
for (i = *start; i < len; i++) { |
if (!found_start) { |
if (is_white(cmdline[i])) |
(*start)++; |
else |
found_start = true; |
} else { |
if (is_white(cmdline[i])) |
break; |
} |
} |
*end = i - 1; |
return found_start; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/console/chardev.c |
---|
0,0 → 1,79 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericconsole |
* @{ |
*/ |
/** @file |
*/ |
#include <console/chardev.h> |
#include <putchar.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
/** Initialize character device. |
* |
* @param chardev Character device. |
* @param op Implementation of character device operations. |
*/ |
void chardev_initialize(char *name,chardev_t *chardev, |
chardev_operations_t *op) |
{ |
chardev->name = name; |
waitq_initialize(&chardev->wq); |
spinlock_initialize(&chardev->lock, "chardev"); |
chardev->counter = 0; |
chardev->index = 0; |
chardev->op = op; |
} |
/** Push character read from input character device. |
* |
* @param chardev Character device. |
* @param ch Character being pushed. |
*/ |
void chardev_push_character(chardev_t *chardev, __u8 ch) |
{ |
spinlock_lock(&chardev->lock); |
chardev->counter++; |
if (chardev->counter == CHARDEV_BUFLEN - 1) { |
/* buffer full => disable device interrupt */ |
chardev->op->suspend(chardev); |
} |
chardev->buffer[chardev->index++] = ch; |
chardev->index = chardev->index % CHARDEV_BUFLEN; /* index modulo size of buffer */ |
waitq_wakeup(&chardev->wq, WAKEUP_FIRST); |
spinlock_unlock(&chardev->lock); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/console/klog.c |
---|
0,0 → 1,110 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericklog |
* @{ |
*/ |
/** @file |
*/ |
#include <mm/frame.h> |
#include <sysinfo/sysinfo.h> |
#include <console/klog.h> |
#include <print.h> |
#include <ipc/irq.h> |
/* Order of frame to be allocated for klog communication */ |
#define KLOG_ORDER 0 |
static char *klog; |
static int klogsize; |
static int klogpos; |
SPINLOCK_INITIALIZE(klog_lock); |
/** Initialize kernel logging facility |
* |
* Allocate pages that are to be shared with uspace for console data. |
* The shared area is a circular buffer. Userspace application may |
* be notified on new data with indication of position and size |
* of the data within the circular buffer. |
*/ |
void klog_init(void) |
{ |
void *faddr; |
faddr = (void *)PFN2ADDR(frame_alloc(KLOG_ORDER, FRAME_ATOMIC)); |
if (!faddr) |
panic("Cannot allocate page for klog"); |
klog = (char *)PA2KA(faddr); |
sysinfo_set_item_val("klog.faddr", NULL, (__native)faddr); |
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); |
klogsize = PAGE_SIZE << KLOG_ORDER; |
klogpos = 0; |
} |
static void klog_vprintf(const char *fmt, va_list args) |
{ |
int ret; |
va_list atst; |
va_copy(atst, args); |
spinlock_lock(&klog_lock); |
ret = vsnprintf(klog+klogpos, klogsize-klogpos, fmt, atst); |
if (ret >= klogsize-klogpos) { |
klogpos = 0; |
if (ret >= klogsize) |
goto out; |
} |
ipc_irq_send_msg(IPC_IRQ_KLOG, klogpos, ret, 0); |
klogpos += ret; |
if (klogpos >= klogsize) |
klogpos = 0; |
out: |
spinlock_unlock(&klog_lock); |
va_end(atst); |
} |
/** Printf a message to kernel-uspace log */ |
void klog_printf(const char *fmt, ...) |
{ |
va_list args; |
va_start(args, fmt); |
klog_vprintf(fmt, args); |
va_end(args); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/proc/task.c |
---|
0,0 → 1,500 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Task management. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <panic.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <security/cap.h> |
#include <memstr.h> |
#include <print.h> |
#include <elf.h> |
#include <errno.h> |
#include <syscall/copy.h> |
#include <console/klog.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Spinlock protecting the tasks_btree B+tree. */ |
SPINLOCK_INITIALIZE(tasks_lock); |
/** B+tree of active tasks. |
* |
* The task is guaranteed to exist after it was found in the tasks_btree as long as: |
* @li the tasks_lock is held, |
* @li the task's lock is held when task's lock is acquired before releasing tasks_lock or |
* @li the task's refcount is grater than 0 |
* |
*/ |
btree_t tasks_btree; |
static task_id_t task_counter = 0; |
static void ktaskclnp(void *arg); |
static void ktaskgc(void *arg); |
/** Initialize tasks |
* |
* Initialize kernel tasks support. |
* |
*/ |
void task_init(void) |
{ |
TASK = NULL; |
btree_create(&tasks_btree); |
} |
/** Create new task |
* |
* Create new task with no threads. |
* |
* @param as Task's address space. |
* @param name Symbolic name. |
* |
* @return New task's structure |
* |
*/ |
task_t *task_create(as_t *as, char *name) |
{ |
ipl_t ipl; |
task_t *ta; |
int i; |
ta = (task_t *) malloc(sizeof(task_t), 0); |
task_create_arch(ta); |
spinlock_initialize(&ta->lock, "task_ta_lock"); |
list_initialize(&ta->th_head); |
ta->as = as; |
ta->name = name; |
ta->main_thread = NULL; |
ta->refcount = 0; |
ta->capabilities = 0; |
ta->accept_new_threads = true; |
ipc_answerbox_init(&ta->answerbox); |
for (i=0; i < IPC_MAX_PHONES;i++) |
ipc_phone_init(&ta->phones[i]); |
if (ipc_phone_0) |
ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
atomic_set(&ta->active_calls, 0); |
mutex_initialize(&ta->futexes_lock); |
btree_create(&ta->futexes); |
ipl = interrupts_disable(); |
/* |
* Increment address space reference count. |
* TODO: Reconsider the locking scheme. |
*/ |
mutex_lock(&as->lock); |
as->refcount++; |
mutex_unlock(&as->lock); |
spinlock_lock(&tasks_lock); |
ta->taskid = ++task_counter; |
btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ta; |
} |
/** Destroy task. |
* |
* @param t Task to be destroyed. |
*/ |
void task_destroy(task_t *t) |
{ |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
mutex_lock_active(&t->as->lock); |
if (--t->as->refcount == 0) { |
mutex_unlock(&t->as->lock); |
as_destroy(t->as); |
/* |
* t->as is destroyed. |
*/ |
} else { |
mutex_unlock(&t->as->lock); |
} |
free(t); |
TASK = NULL; |
} |
/** Create new task with 1 thread and run it |
* |
* @param program_addr Address of program executable image. |
* @param name Program name. |
* |
* @return Task of the running program or NULL on error. |
*/ |
task_t * task_run_program(void *program_addr, char *name) |
{ |
as_t *as; |
as_area_t *a; |
int rc; |
thread_t *t1, *t2; |
task_t *task; |
uspace_arg_t *kernel_uarg; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) program_addr, as); |
if (rc != EE_OK) { |
as_destroy(as); |
return NULL; |
} |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
task = task_create(as, name); |
ASSERT(task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, |
USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL); |
/* |
* Create the main thread. |
*/ |
t1 = thread_create(uinit, kernel_uarg, task, 0, "uinit"); |
ASSERT(t1); |
/* |
* Create killer thread for the new task. |
*/ |
t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc"); |
ASSERT(t2); |
thread_ready(t2); |
thread_ready(t1); |
return task; |
} |
/** Syscall for reading task ID from userspace. |
* |
* @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
__native sys_task_get_id(task_id_t *uspace_task_id) |
{ |
/* |
* No need to acquire lock on TASK because taskid |
* remains constant for the lifespan of the task. |
*/ |
return (__native) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); |
} |
/** Find task structure corresponding to task ID. |
* |
* The tasks_lock must be already held by the caller of this function |
* and interrupts must be disabled. |
* |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task ID. |
*/ |
task_t *task_find_by_id(task_id_t id) |
{ |
btree_node_t *leaf; |
return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); |
} |
/** Kill task. |
* |
* @param id ID of the task to be killed. |
* |
* @return 0 on success or an error code from errno.h |
*/ |
int task_kill(task_id_t id) |
{ |
ipl_t ipl; |
task_t *ta; |
thread_t *t; |
link_t *cur; |
if (id == 1) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
if (!(ta = task_find_by_id(id))) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_lock(&ta->lock); |
ta->refcount++; |
spinlock_unlock(&ta->lock); |
btree_remove(&tasks_btree, ta->taskid, NULL); |
spinlock_unlock(&tasks_lock); |
t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp"); |
spinlock_lock(&ta->lock); |
ta->accept_new_threads = false; |
ta->refcount--; |
/* |
* Interrupt all threads except ktaskclnp. |
*/ |
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
thread_t *thr; |
bool sleeping = false; |
thr = list_get_instance(cur, thread_t, th_link); |
if (thr == t) |
continue; |
spinlock_lock(&thr->lock); |
thr->interrupted = true; |
if (thr->state == Sleeping) |
sleeping = true; |
spinlock_unlock(&thr->lock); |
if (sleeping) |
waitq_interrupt_sleep(thr); |
} |
spinlock_unlock(&ta->lock); |
interrupts_restore(ipl); |
if (t) |
thread_ready(t); |
return 0; |
} |
/** Print task list */ |
void task_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
task_t *t; |
int j; |
t = (task_t *) node->value[i]; |
spinlock_lock(&t->lock); |
printf("%s(%lld): address=%#zx, as=%#zx, ActiveCalls: %zd", |
t->name, t->taskid, t, t->as, atomic_get(&t->active_calls)); |
for (j=0; j < IPC_MAX_PHONES; j++) { |
if (t->phones[j].callee) |
printf(" Ph(%zd): %#zx ", j, t->phones[j].callee); |
} |
printf("\n"); |
spinlock_unlock(&t->lock); |
} |
} |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
} |
/** Kernel thread used to cleanup the task after it is killed. */ |
void ktaskclnp(void *arg) |
{ |
ipl_t ipl; |
thread_t *t = NULL, *main_thread; |
link_t *cur; |
bool again; |
thread_detach(THREAD); |
loop: |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
main_thread = TASK->main_thread; |
/* |
* Find a thread to join. |
*/ |
again = false; |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
t = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&t->lock); |
if (t == THREAD) { |
spinlock_unlock(&t->lock); |
continue; |
} else if (t == main_thread) { |
spinlock_unlock(&t->lock); |
continue; |
} else if (t->join_type != None) { |
spinlock_unlock(&t->lock); |
again = true; |
continue; |
} else { |
t->join_type = TaskClnp; |
spinlock_unlock(&t->lock); |
again = false; |
break; |
} |
} |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
if (again) { |
/* |
* Other cleanup (e.g. ktaskgc) is in progress. |
*/ |
scheduler(); |
goto loop; |
} |
if (t != THREAD) { |
ASSERT(t != main_thread); /* uninit is joined and detached in ktaskgc */ |
thread_join(t); |
thread_detach(t); |
goto loop; /* go for another thread */ |
} |
/* |
* Now there are no other threads in this task |
* and no new threads can be created. |
*/ |
ipc_cleanup(); |
futex_cleanup(); |
klog_printf("Cleanup of task %lld completed.", TASK->taskid); |
} |
/** Kernel thread used to kill the userspace task when its main thread exits. |
* |
* This thread waits until the main userspace thread (i.e. uninit) exits. |
* When this happens, the task is killed. In the meantime, exited threads |
* are garbage collected. |
* |
* @param arg Pointer to the thread structure of the task's main thread. |
*/ |
void ktaskgc(void *arg) |
{ |
thread_t *t = (thread_t *) arg; |
loop: |
/* |
* Userspace threads cannot detach themselves, |
* therefore the thread pointer is guaranteed to be valid. |
*/ |
if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */ |
ipl_t ipl; |
link_t *cur; |
thread_t *thr = NULL; |
/* |
* The join timed out. Try to do some garbage collection of Undead threads. |
*/ |
more_gc: |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
thr = list_get_instance(cur, thread_t, th_link); |
spinlock_lock(&thr->lock); |
if (thr != t && thr->state == Undead && thr->join_type == None) { |
thr->join_type = TaskGC; |
spinlock_unlock(&thr->lock); |
break; |
} |
spinlock_unlock(&thr->lock); |
thr = NULL; |
} |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
if (thr) { |
thread_join(thr); |
thread_detach(thr); |
scheduler(); |
goto more_gc; |
} |
goto loop; |
} |
thread_detach(t); |
task_kill(TASK->taskid); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/proc/thread.c |
---|
0,0 → 1,618 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Thread management functions. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/rwlock.h> |
#include <cpu.h> |
#include <func.h> |
#include <context.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <time/clock.h> |
#include <config.h> |
#include <arch/interrupt.h> |
#include <smp/ipi.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <memstr.h> |
#include <print.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <main/uinit.h> |
#include <syscall/copy.h> |
#include <errno.h> |
/** Thread states */ |
char *thread_states[] = { |
"Invalid", |
"Running", |
"Sleeping", |
"Ready", |
"Entering", |
"Exiting", |
"Undead" |
}; |
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
SPINLOCK_INITIALIZE(threads_lock); |
/** B+tree of all threads. |
* |
* When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
* as the threads_lock is held. |
*/ |
btree_t threads_btree; |
SPINLOCK_INITIALIZE(tidlock); |
__u32 last_tid = 0; |
static slab_cache_t *thread_slab; |
#ifdef ARCH_HAS_FPU |
slab_cache_t *fpu_context_slab; |
#endif |
/** Thread wrapper |
* |
* This wrapper is provided to ensure that every thread |
* makes a call to thread_exit() when its implementing |
* function returns. |
* |
* interrupts_disable() is assumed. |
* |
*/ |
static void cushion(void) |
{ |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
/* this is where each thread wakes up after its creation */ |
spinlock_unlock(&THREAD->lock); |
interrupts_enable(); |
f(arg); |
thread_exit(); |
/* not reached */ |
} |
/** Initialization and allocation for thread_t structure */ |
static int thr_constructor(void *obj, int kmflags) |
{ |
thread_t *t = (thread_t *)obj; |
pfn_t pfn; |
int status; |
spinlock_initialize(&t->lock, "thread_t_lock"); |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
#ifdef ARCH_HAS_FPU |
# ifdef CONFIG_FPU_LAZY |
t->saved_fpu_context = NULL; |
# else |
t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
if (!t->saved_fpu_context) |
return -1; |
# endif |
#endif |
pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
if (status) { |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return -1; |
} |
t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
return 0; |
} |
/** Destruction of thread_t object */ |
static int thr_destructor(void *obj) |
{ |
thread_t *t = (thread_t *)obj; |
frame_free(ADDR2PFN(KA2PA(t->kstack))); |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
slab_free(fpu_context_slab,t->saved_fpu_context); |
#endif |
return 1; /* One page freed */ |
} |
/** Initialize threads |
* |
* Initialize kernel threads support. |
* |
*/ |
void thread_init(void) |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
thread_slab = slab_cache_create("thread_slab", |
sizeof(thread_t),0, |
thr_constructor, thr_destructor, 0); |
#ifdef ARCH_HAS_FPU |
fpu_context_slab = slab_cache_create("fpu_slab", |
sizeof(fpu_context_t), |
FPU_CONTEXT_ALIGN, |
NULL, NULL, 0); |
#endif |
btree_create(&threads_btree); |
} |
/** Make thread ready |
* |
* Switch thread t to the ready state. |
* |
* @param t Thread to make ready. |
* |
*/ |
void thread_ready(thread_t *t) |
{ |
cpu_t *cpu; |
runq_t *r; |
ipl_t ipl; |
int i, avg; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(! (t->state == Ready)); |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
cpu = t->cpu; |
} |
t->state = Ready; |
spinlock_unlock(&t->lock); |
/* |
* Append t to respective ready queue on respective processor. |
*/ |
r = &cpu->rq[i]; |
spinlock_lock(&r->lock); |
list_append(&t->rq_link, &r->rq_head); |
r->n++; |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
avg = atomic_get(&nrdy) / config.cpu_active; |
atomic_inc(&cpu->nrdy); |
interrupts_restore(ipl); |
} |
/** Destroy thread memory structure |
* |
* Detach thread from all queues, cpus etc. and destroy it. |
* |
* Assume thread->lock is held!! |
*/ |
void thread_destroy(thread_t *t) |
{ |
bool destroy_task = false; |
ASSERT(t->state == Exiting || t->state == Undead); |
ASSERT(t->task); |
ASSERT(t->cpu); |
spinlock_lock(&t->cpu->lock); |
if(t->cpu->fpu_owner==t) |
t->cpu->fpu_owner=NULL; |
spinlock_unlock(&t->cpu->lock); |
spinlock_unlock(&t->lock); |
spinlock_lock(&threads_lock); |
btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
spinlock_unlock(&threads_lock); |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&t->task->lock); |
list_remove(&t->th_link); |
if (--t->task->refcount == 0) { |
t->task->accept_new_threads = false; |
destroy_task = true; |
} |
spinlock_unlock(&t->task->lock); |
if (destroy_task) |
task_destroy(t->task); |
slab_free(thread_slab, t); |
} |
/** Create new thread |
* |
* Create a new thread. |
* |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
{ |
thread_t *t; |
ipl_t ipl; |
t = (thread_t *) slab_alloc(thread_slab, 0); |
if (!t) |
return NULL; |
thread_create_arch(t); |
/* Not needed, but good for debugging */ |
memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
interrupts_restore(ipl); |
context_save(&t->saved_context); |
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
the_initialize((the_t *) t->kstack); |
ipl = interrupts_disable(); |
t->saved_context.ipl = interrupts_read(); |
interrupts_restore(ipl); |
memcpy(t->name, name, THREAD_NAME_BUFLEN); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
timeout_initialize(&t->sleep_timeout); |
t->sleep_interruptible = false; |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->in_copy_from_uspace = false; |
t->in_copy_to_uspace = false; |
t->interrupted = false; |
t->join_type = None; |
t->detached = false; |
waitq_initialize(&t->join_wq); |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
/* |
* Attach to the containing task. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&task->lock); |
if (!task->accept_new_threads) { |
spinlock_unlock(&task->lock); |
slab_free(thread_slab, t); |
interrupts_restore(ipl); |
return NULL; |
} |
list_append(&t->th_link, &task->th_head); |
if (task->refcount++ == 0) |
task->main_thread = t; |
spinlock_unlock(&task->lock); |
/* |
* Register this thread in the system-wide list. |
*/ |
spinlock_lock(&threads_lock); |
btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
return t; |
} |
/** Terminate thread. |
* |
* End current thread execution and switch it to the exiting |
* state. All pending timeouts are executed. |
* |
*/ |
void thread_exit(void) |
{ |
ipl_t ipl; |
restart: |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
/* Not reached */ |
while (1) |
; |
} |
/** Thread sleep |
* |
* Suspend execution of the current thread. |
* |
* @param sec Number of seconds to sleep. |
* |
*/ |
void thread_sleep(__u32 sec) |
{ |
thread_usleep(sec*1000000); |
} |
/** Wait for another thread to exit. |
* |
* @param t Thread to join on exit. |
* @param usec Timeout in microseconds. |
* @param flags Mode of operation. |
* |
* @return An error code from errno.h or an error code from synch.h. |
*/ |
int thread_join_timeout(thread_t *t, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
if (t == THREAD) |
return EINVAL; |
/* |
* Since thread join can only be called once on an undetached thread, |
* the thread pointer is guaranteed to be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
return rc; |
} |
/** Detach thread. |
* |
* Mark the thread as detached, if the thread is already in the Undead state, |
* deallocate its resources. |
* |
* @param t Thread to be detached. |
*/ |
void thread_detach(thread_t *t) |
{ |
ipl_t ipl; |
/* |
* Since the thread is expected to not be already detached, |
* pointer to it must be still valid. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
ASSERT(!t->detached); |
if (t->state == Undead) { |
thread_destroy(t); /* unlocks &t->lock */ |
interrupts_restore(ipl); |
return; |
} else { |
t->detached = true; |
} |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
} |
/** Thread usleep |
* |
* Suspend execution of the current thread. |
* |
* @param usec Number of microseconds to sleep. |
* |
*/ |
void thread_usleep(__u32 usec) |
{ |
waitq_t wq; |
waitq_initialize(&wq); |
(void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
} |
/** Register thread out-of-context invocation |
* |
* Register a function and its argument to be executed |
* on next context switch to the current thread. |
* |
* @param call_me Out-of-context function. |
* @param call_me_with Out-of-context function argument. |
* |
*/ |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
} |
/** Print list of threads debug info */ |
void thread_print_list(void) |
{ |
link_t *cur; |
ipl_t ipl; |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
thread_t *t; |
t = (thread_t *) node->value[i]; |
printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=", |
t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
if (t->cpu) |
printf("cpu%zd", t->cpu->id); |
else |
printf("none"); |
if (t->state == Sleeping) { |
printf(", kst=%#zx", t->kstack); |
printf(", wq=%#zx", t->sleep_queue); |
} |
printf("\n"); |
} |
} |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Check whether thread exists. |
* |
* Note that threads_lock must be already held and |
* interrupts must be already disabled. |
* |
* @param t Pointer to thread. |
* |
* @return True if thread t is known to the system, false otherwise. |
*/ |
bool thread_exists(thread_t *t) |
{ |
btree_node_t *leaf; |
return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
} |
/** Process syscall to create new thread. |
* |
*/ |
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
{ |
thread_t *t; |
char namebuf[THREAD_NAME_BUFLEN]; |
uspace_arg_t *kernel_uarg; |
__u32 tid; |
int rc; |
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
if (rc != 0) |
return (__native) rc; |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
if (rc != 0) { |
free(kernel_uarg); |
return (__native) rc; |
} |
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
tid = t->tid; |
thread_ready(t); |
return (__native) tid; |
} else { |
free(kernel_uarg); |
} |
return (__native) ENOMEM; |
} |
/** Process syscall to terminate thread. |
* |
*/ |
__native sys_thread_exit(int uspace_status) |
{ |
thread_exit(); |
/* Unreachable */ |
return 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/proc/scheduler.c |
---|
0,0 → 1,703 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Scheduler and load balancing. |
* |
* This file contains the scheduler and kcpulb kernel thread which |
* performs load-balancing of per-CPU run queues. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <time/delay.h> |
#include <arch/asm.h> |
#include <arch/faddr.h> |
#include <atomic.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <context.h> |
#include <func.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <cpu.h> |
#include <print.h> |
#include <debug.h> |
static void before_task_runs(void); |
static void before_thread_runs(void); |
static void after_thread_ran(void); |
static void scheduler_separated_stack(void); |
atomic_t nrdy; /**< Number of ready threads in the system. */ |
/** Carry out actions before new task runs. */ |
void before_task_runs(void) |
{ |
before_task_runs_arch(); |
} |
/** Take actions before new thread runs. |
* |
* Perform actions that need to be |
* taken before the newly selected |
* tread is passed control. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void before_thread_runs(void) |
{ |
before_thread_runs_arch(); |
#ifdef CONFIG_FPU_LAZY |
if(THREAD==CPU->fpu_owner) |
fpu_enable(); |
else |
fpu_disable(); |
#else |
fpu_enable(); |
if (THREAD->fpu_context_exists) |
fpu_context_restore(THREAD->saved_fpu_context); |
else { |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
#endif |
} |
/** Take actions after THREAD had run. |
* |
* Perform actions that need to be |
* taken after the running thread |
* had been preempted by the scheduler. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
} |
#ifdef CONFIG_FPU_LAZY |
void scheduler_fpu_lazy_request(void) |
{ |
restart: |
fpu_enable(); |
spinlock_lock(&CPU->lock); |
/* Save old context */ |
if (CPU->fpu_owner != NULL) { |
spinlock_lock(&CPU->fpu_owner->lock); |
fpu_context_save(CPU->fpu_owner->saved_fpu_context); |
/* don't prevent migration */ |
CPU->fpu_owner->fpu_context_engaged=0; |
spinlock_unlock(&CPU->fpu_owner->lock); |
CPU->fpu_owner = NULL; |
} |
spinlock_lock(&THREAD->lock); |
if (THREAD->fpu_context_exists) { |
fpu_context_restore(THREAD->saved_fpu_context); |
} else { |
/* Allocate FPU context */ |
if (!THREAD->saved_fpu_context) { |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
fpu_init(); |
THREAD->fpu_context_exists=1; |
} |
CPU->fpu_owner=THREAD; |
THREAD->fpu_context_engaged = 1; |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
} |
#endif |
/** Initialize scheduler |
* |
* Initialize kernel scheduler. |
* |
*/ |
void scheduler_init(void) |
{ |
} |
/** Get thread to be scheduled |
* |
* Get the optimal thread to be scheduled |
* according to thread accounting and scheduler |
* policy. |
* |
* @return Thread to be scheduled. |
* |
*/ |
static thread_t *find_best_thread(void) |
{ |
thread_t *t; |
runq_t *r; |
int i; |
ASSERT(CPU != NULL); |
loop: |
interrupts_enable(); |
if (atomic_get(&CPU->nrdy) == 0) { |
/* |
* For there was nothing to run, the CPU goes to sleep |
* until a hardware interrupt or an IPI comes. |
* This improves energy saving and hyperthreading. |
*/ |
/* |
* An interrupt might occur right now and wake up a thread. |
* In such case, the CPU will continue to go to sleep |
* even though there is a runnable thread. |
*/ |
cpu_sleep(); |
goto loop; |
} |
interrupts_disable(); |
for (i = 0; i<RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
* If this queue is empty, try a lower-priority queue. |
*/ |
spinlock_unlock(&r->lock); |
continue; |
} |
atomic_dec(&CPU->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
/* |
* Take the first thread from the queue. |
*/ |
t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
list_remove(&t->rq_link); |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->priority = i; /* correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
*/ |
t->flags &= ~X_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
} |
goto loop; |
} |
/** Prevent rq starvation |
* |
* Prevent low priority threads from starving in rq's. |
* |
* When the function decides to relink rq's, it reconnects |
* respective pointers so that in result threads with 'pri' |
* greater or equal start are moved to a higher-priority queue. |
* |
* @param start Threshold priority. |
* |
*/ |
static void relink_rq(int start) |
{ |
link_t head; |
runq_t *r; |
int i, n; |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
r->n = 0; |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&CPU->lock); |
} |
/** The scheduler |
* |
* The thread scheduling procedure. |
* Passes control directly to |
* scheduler_separated_stack(). |
* |
*/ |
void scheduler(void) |
{ |
volatile ipl_t ipl; |
ASSERT(CPU != NULL); |
ipl = interrupts_disable(); |
if (atomic_get(&haltstate)) |
halt(); |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
#ifndef CONFIG_FPU_LAZY |
fpu_context_save(THREAD->saved_fpu_context); |
#endif |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
} |
/* |
* Interrupt priority level of preempted thread is recorded here |
* to facilitate scheduler() invocations from interrupts_disable()'d |
* code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.ipl = ipl; |
} |
/* |
* Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM |
* and preemption counter. At this point THE could be coming either |
* from THREAD's or CPU's stack. |
*/ |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* We may not keep the old stack. |
* Reason: If we kept the old stack and got blocked, for instance, in |
* find_best_thread(), the old thread could get rescheduled by another |
* CPU and overwrite the part of its own stack that was also used by |
* the scheduler on this CPU. |
* |
* Moreover, we have to bypass the compiler-generated POP sequence |
* which is fooled by SP being set to the very top of the stack. |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Scheduler stack switch wrapper |
* |
* Second part of the scheduler() function |
* using new stack. Handling the actual context |
* switch to a new thread. |
* |
* Assume THREAD->lock is held. |
*/ |
void scheduler_separated_stack(void) |
{ |
int priority; |
ASSERT(CPU != NULL); |
if (THREAD) { |
/* must be run after the switch to scheduler stack */ |
after_thread_ran(); |
switch (THREAD->state) { |
case Running: |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
repeat: |
if (THREAD->detached) { |
thread_destroy(THREAD); |
} else { |
/* |
* The thread structure is kept allocated until somebody |
* calls thread_detach() on it. |
*/ |
if (!spinlock_trylock(&THREAD->join_wq.lock)) { |
/* |
* Avoid deadlock. |
*/ |
spinlock_unlock(&THREAD->lock); |
delay(10); |
spinlock_lock(&THREAD->lock); |
goto repeat; |
} |
_waitq_wakeup_unsafe(&THREAD->join_wq, false); |
spinlock_unlock(&THREAD->join_wq.lock); |
THREAD->state = Undead; |
spinlock_unlock(&THREAD->lock); |
} |
break; |
case Sleeping: |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&THREAD->lock); |
break; |
default: |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->priority; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
*/ |
if (TASK != THREAD->task) { |
as_t *as1 = NULL; |
as_t *as2; |
if (TASK) { |
spinlock_lock(&TASK->lock); |
as1 = TASK->as; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&THREAD->task->lock); |
as2 = THREAD->task->as; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one address space. |
*/ |
if (as1 != as2) { |
/* |
* Both tasks and address spaces are different. |
* Replace the old one with the new one. |
*/ |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
before_task_runs(); |
} |
spinlock_lock(&THREAD->lock); |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
* Some architectures provide late kernel PA2KA(identity) |
* mapping in a page fault handler. However, the page fault |
* handler uses the kernel stack of the running thread and |
* therefore cannot be used to map it. The kernel stack, if |
* necessary, is to be mapped in before_thread_runs(). This |
* function must be executed before the switch to the new stack. |
*/ |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Load balancing thread |
* |
* SMP load balancing thread, supervising thread supplies |
* for the CPU it's wired to. |
* |
* @param arg Generic thread argument (unused). |
* |
*/ |
void kcpulb(void *arg) |
{ |
thread_t *t; |
int count, average, i, j, k = 0; |
ipl_t ipl; |
/* |
* Detach kcpulb as nobody will call thread_join_timeout() on it. |
*/ |
thread_detach(THREAD); |
loop: |
/* |
* Work in 1s intervals. |
*/ |
thread_sleep(1); |
not_satisfied: |
/* |
* Calculate the number of threads that will be migrated/stolen from |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
average = atomic_get(&nrdy) / config.cpu_active + 1; |
count = average - atomic_get(&CPU->nrdy); |
if (count <= 0) |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
*/ |
for (j=RQ_COUNT-1; j >= 0; j--) { |
for (i=0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
cpu = &cpus[(i + k) % config.cpu_active]; |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
if (atomic_get(&cpu->nrdy) <= average) |
continue; |
ipl = interrupts_disable(); |
r = &cpu->rq[j]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
interrupts_restore(ipl); |
continue; |
} |
t = NULL; |
l = r->rq_head.prev; /* search rq from the back */ |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already stolen. |
* The latter prevents threads from migrating between CPU's without ever being run. |
* We don't want to steal threads whose FPU context is still in CPU. |
*/ |
spinlock_lock(&t->lock); |
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
/* |
* Remove t from r. |
*/ |
spinlock_unlock(&t->lock); |
atomic_dec(&cpu->nrdy); |
atomic_dec(&nrdy); |
r->n--; |
list_remove(&t->rq_link); |
break; |
} |
spinlock_unlock(&t->lock); |
l = l->prev; |
t = NULL; |
} |
spinlock_unlock(&r->lock); |
if (t) { |
/* |
* Ready t on local CPU |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
t->state = Entering; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
interrupts_restore(ipl); |
if (--count == 0) |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another CPU next time. |
*/ |
k++; |
continue; |
} |
interrupts_restore(ipl); |
} |
} |
if (atomic_get(&CPU->nrdy)) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
scheduler(); |
} else { |
/* |
* We failed to migrate a single thread. |
* Give up this turn. |
*/ |
goto loop; |
} |
goto not_satisfied; |
satisfied: |
goto loop; |
} |
#endif /* CONFIG_SMP */ |
/** Print information about threads & scheduler queues */ |
void sched_print_list(void) |
{ |
ipl_t ipl; |
int cpu,i; |
runq_t *r; |
thread_t *t; |
link_t *cur; |
/* We are going to mess with scheduler structures, |
* let's not be interrupted */ |
ipl = interrupts_disable(); |
for (cpu=0;cpu < config.cpu_count; cpu++) { |
if (!cpus[cpu].active) |
continue; |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); |
for (i=0; i<RQ_COUNT; i++) { |
r = &cpus[cpu].rq[i]; |
spinlock_lock(&r->lock); |
if (!r->n) { |
spinlock_unlock(&r->lock); |
continue; |
} |
printf("\trq[%d]: ", i); |
for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%d(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
spinlock_unlock(&r->lock); |
} |
spinlock_unlock(&cpus[cpu].lock); |
} |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/proc/the.c |
---|
0,0 → 1,77 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief THE structure functions. |
* |
* This file contains functions to manage the THE structure. |
* The THE structure exists at the base address of every kernel |
* stack and carries information about current settings |
* (e.g. current CPU, current thread, task and address space |
* and current preemption counter). |
*/ |
#include <arch.h> |
#include <typedefs.h> |
/** Initialize THE structure |
* |
* Initialize THE structure passed as argument. |
* |
* @param the THE structure to be initialized. |
*/ |
void the_initialize(the_t *the) |
{ |
the->preemption_disabled = 0; |
the->cpu = NULL; |
the->thread = NULL; |
the->task = NULL; |
the->as = NULL; |
} |
/** Copy THE structure |
* |
* Copy the source THE structure to the destination THE structure. |
* |
* @param src The source THE structure. |
* @param dst The destination THE structure. |
*/ |
void the_copy(the_t *src, the_t *dst) |
{ |
*dst = *src; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/as.c |
---|
0,0 → 1,1534 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Address space related functions. |
* |
* This file contains address space manipulation functions. |
* Roughly speaking, this is a higher-level client of |
* Virtual Address Translation (VAT) subsystem. |
* |
* Functionality provided by this file allows one to |
* create address space and create, resize and share |
* address space areas. |
* |
* @see page.c |
* |
*/ |
#include <mm/as.h> |
#include <arch/mm/as.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <mm/tlb.h> |
#include <arch/mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <mm/asid.h> |
#include <arch/mm/asid.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <adt/list.h> |
#include <adt/btree.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch/asm.h> |
#include <panic.h> |
#include <debug.h> |
#include <print.h> |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
#include <errno.h> |
#include <config.h> |
#include <align.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <syscall/copy.h> |
#include <arch/interrupt.h> |
as_operations_t *as_operations = NULL; |
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ |
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
/** |
* This list contains address spaces that are not active on any |
* processor and that have valid ASID. |
*/ |
LIST_INITIALIZE(inactive_as_with_asid_head); |
/** Kernel address space. */ |
as_t *AS_KERNEL = NULL; |
static int area_flags_to_page_flags(int aflags); |
static as_area_t *find_area_and_lock(as_t *as, __address va); |
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
static void sh_info_remove_reference(share_info_t *sh_info); |
/** Initialize address space subsystem. */ |
void as_init(void) |
{ |
as_arch_init(); |
AS_KERNEL = as_create(FLAG_AS_KERNEL); |
if (!AS_KERNEL) |
panic("can't create kernel address space\n"); |
} |
/** Create address space. |
* |
* @param flags Flags that influence way in wich the address space is created. |
*/ |
as_t *as_create(int flags) |
{ |
as_t *as; |
as = (as_t *) malloc(sizeof(as_t), 0); |
link_initialize(&as->inactive_as_with_asid_link); |
mutex_initialize(&as->lock); |
btree_create(&as->as_area_btree); |
if (flags & FLAG_AS_KERNEL) |
as->asid = ASID_KERNEL; |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
as->cpu_refcount = 0; |
as->page_table = page_table_create(flags); |
return as; |
} |
/** Destroy adress space. |
* |
* When there are no tasks referencing this address space (i.e. its refcount is zero), |
* the address space can be destroyed. |
*/ |
void as_destroy(as_t *as) |
{ |
ipl_t ipl; |
bool cond; |
ASSERT(as->refcount == 0); |
/* |
* Since there is no reference to this area, |
* it is safe not to lock its mutex. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
if (as != AS && as->cpu_refcount == 0) |
list_remove(&as->inactive_as_with_asid_link); |
asid_put(as->asid); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
/* |
* Destroy address space areas of the address space. |
* The B+tee must be walked carefully because it is |
* also being destroyed. |
*/ |
for (cond = true; cond; ) { |
btree_node_t *node; |
ASSERT(!list_empty(&as->as_area_btree.leaf_head)); |
node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link); |
if ((cond = node->keys)) { |
as_area_destroy(as, node->key[0]); |
} |
} |
btree_destroy(&as->as_area_btree); |
page_table_destroy(as->page_table); |
interrupts_restore(ipl); |
free(as); |
} |
/** Create address space area of common attributes. |
* |
* The created address space area is added to the target address space. |
* |
* @param as Target address space. |
* @param flags Flags of the area memory. |
* @param size Size of area. |
* @param base Base address of area. |
* @param attrs Attributes of the area. |
* @param backend Address space area backend. NULL if no backend is used. |
* @param backend_data NULL or a pointer to an array holding two void *. |
* |
* @return Address space area on success or NULL on failure. |
*/ |
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, |
mem_backend_t *backend, mem_backend_data_t *backend_data) |
{ |
ipl_t ipl; |
as_area_t *a; |
if (base % PAGE_SIZE) |
return NULL; |
if (!size) |
return NULL; |
/* Writeable executable areas are not supported. */ |
if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
return NULL; |
ipl = interrupts_disable(); |
mutex_lock(&as->lock); |
if (!check_area_conflicts(as, base, size, NULL)) { |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return NULL; |
} |
a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
mutex_initialize(&a->lock); |
a->as = as; |
a->flags = flags; |
a->attributes = attrs; |
a->pages = SIZE2FRAMES(size); |
a->base = base; |
a->sh_info = NULL; |
a->backend = backend; |
if (backend_data) |
a->backend_data = *backend_data; |
else |
memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0); |
btree_create(&a->used_space); |
btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return a; |
} |
/** Find address space area and change it. |
* |
* @param as Address space. |
* @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
* @param size New size of the virtual memory block starting at address. |
* @param flags Flags influencing the remap operation. Currently unused. |
* |
* @return Zero on success or a value from @ref errno.h otherwise. |
*/ |
int as_area_resize(as_t *as, __address address, size_t size, int flags) |
{ |
as_area_t *area; |
ipl_t ipl; |
size_t pages; |
ipl = interrupts_disable(); |
mutex_lock(&as->lock); |
/* |
* Locate the area. |
*/ |
area = find_area_and_lock(as, address); |
if (!area) { |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
if (area->backend == &phys_backend) { |
/* |
* Remapping of address space areas associated |
* with memory mapped devices is not supported. |
*/ |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
if (area->sh_info) { |
/* |
* Remapping of shared address space areas |
* is not supported. |
*/ |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
pages = SIZE2FRAMES((address - area->base) + size); |
if (!pages) { |
/* |
* Zero size address space areas are not allowed. |
*/ |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return EPERM; |
} |
if (pages < area->pages) { |
bool cond; |
__address start_free = area->base + pages*PAGE_SIZE; |
/* |
* Shrinking the area. |
* No need to check for overlaps. |
*/ |
/* |
* Start TLB shootdown sequence. |
*/ |
tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
/* |
* Remove frames belonging to used space starting from |
* the highest addresses downwards until an overlap with |
* the resized address space area is found. Note that this |
* is also the right way to remove part of the used_space |
* B+tree leaf list. |
*/ |
for (cond = true; cond;) { |
btree_node_t *node; |
ASSERT(!list_empty(&area->used_space.leaf_head)); |
node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); |
if ((cond = (bool) node->keys)) { |
__address b = node->key[node->keys - 1]; |
count_t c = (count_t) node->value[node->keys - 1]; |
int i = 0; |
if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { |
if (b + c*PAGE_SIZE <= start_free) { |
/* |
* The whole interval fits completely |
* in the resized address space area. |
*/ |
break; |
} |
/* |
* Part of the interval corresponding to b and c |
* overlaps with the resized address space area. |
*/ |
cond = false; /* we are almost done */ |
i = (start_free - b) >> PAGE_WIDTH; |
if (!used_space_remove(area, start_free, c - i)) |
panic("Could not remove used space."); |
} else { |
/* |
* The interval of used space can be completely removed. |
*/ |
if (!used_space_remove(area, b, c)) |
panic("Could not remove used space.\n"); |
} |
for (; i < c; i++) { |
pte_t *pte; |
page_table_lock(as, false); |
pte = page_mapping_find(as, b + i*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
if (area->backend && area->backend->frame_free) { |
area->backend->frame_free(area, |
b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); |
} |
page_mapping_remove(as, b + i*PAGE_SIZE); |
page_table_unlock(as, false); |
} |
} |
} |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
tlb_shootdown_finalize(); |
} else { |
/* |
* Growing the area. |
* Check for overlaps with other address space areas. |
*/ |
if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return EADDRNOTAVAIL; |
} |
} |
area->pages = pages; |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Destroy address space area. |
* |
* @param as Address space. |
* @param address Address withing the area to be deleted. |
* |
* @return Zero on success or a value from @ref errno.h on failure. |
*/ |
int as_area_destroy(as_t *as, __address address) |
{ |
as_area_t *area; |
__address base; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
mutex_lock(&as->lock); |
area = find_area_and_lock(as, address); |
if (!area) { |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
base = area->base; |
/* |
* Start TLB shootdown sequence. |
*/ |
tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages); |
/* |
* Visit only the pages mapped by used_space B+tree. |
*/ |
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address b = node->key[i]; |
count_t j; |
pte_t *pte; |
for (j = 0; j < (count_t) node->value[i]; j++) { |
page_table_lock(as, false); |
pte = page_mapping_find(as, b + j*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
if (area->backend && area->backend->frame_free) { |
area->backend->frame_free(area, |
b + j*PAGE_SIZE, PTE_GET_FRAME(pte)); |
} |
page_mapping_remove(as, b + j*PAGE_SIZE); |
page_table_unlock(as, false); |
} |
} |
} |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(AS->asid, area->base, area->pages); |
tlb_shootdown_finalize(); |
btree_destroy(&area->used_space); |
area->attributes |= AS_AREA_ATTR_PARTIAL; |
if (area->sh_info) |
sh_info_remove_reference(area->sh_info); |
mutex_unlock(&area->lock); |
/* |
* Remove the empty area from address space. |
*/ |
btree_remove(&AS->as_area_btree, base, NULL); |
free(area); |
mutex_unlock(&AS->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Share address space area with another or the same address space. |
* |
* Address space area mapping is shared with a new address space area. |
* If the source address space area has not been shared so far, |
* a new sh_info is created. The new address space area simply gets the |
* sh_info of the source area. The process of duplicating the |
* mapping is done through the backend share function. |
* |
* @param src_as Pointer to source address space. |
* @param src_base Base address of the source address space area. |
* @param acc_size Expected size of the source area. |
* @param dst_as Pointer to destination address space. |
* @param dst_base Target base address. |
* @param dst_flags_mask Destination address space area flags mask. |
* |
* @return Zero on success or ENOENT if there is no such task or |
* if there is no such address space area, |
* EPERM if there was a problem in accepting the area or |
* ENOMEM if there was a problem in allocating destination |
* address space area. ENOTSUP is returned if an attempt |
* to share non-anonymous address space area is detected. |
*/ |
int as_area_share(as_t *src_as, __address src_base, size_t acc_size, |
as_t *dst_as, __address dst_base, int dst_flags_mask) |
{ |
ipl_t ipl; |
int src_flags; |
size_t src_size; |
as_area_t *src_area, *dst_area; |
share_info_t *sh_info; |
mem_backend_t *src_backend; |
mem_backend_data_t src_backend_data; |
ipl = interrupts_disable(); |
mutex_lock(&src_as->lock); |
src_area = find_area_and_lock(src_as, src_base); |
if (!src_area) { |
/* |
* Could not find the source address space area. |
*/ |
mutex_unlock(&src_as->lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
if (!src_area->backend || !src_area->backend->share) { |
/* |
* There is now backend or the backend does not |
* know how to share the area. |
*/ |
mutex_unlock(&src_area->lock); |
mutex_unlock(&src_as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
src_size = src_area->pages * PAGE_SIZE; |
src_flags = src_area->flags; |
src_backend = src_area->backend; |
src_backend_data = src_area->backend_data; |
/* Share the cacheable flag from the original mapping */ |
if (src_flags & AS_AREA_CACHEABLE) |
dst_flags_mask |= AS_AREA_CACHEABLE; |
if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) { |
mutex_unlock(&src_area->lock); |
mutex_unlock(&src_as->lock); |
interrupts_restore(ipl); |
return EPERM; |
} |
/* |
* Now we are committed to sharing the area. |
* First prepare the area for sharing. |
* Then it will be safe to unlock it. |
*/ |
sh_info = src_area->sh_info; |
if (!sh_info) { |
sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
mutex_initialize(&sh_info->lock); |
sh_info->refcount = 2; |
btree_create(&sh_info->pagemap); |
src_area->sh_info = sh_info; |
} else { |
mutex_lock(&sh_info->lock); |
sh_info->refcount++; |
mutex_unlock(&sh_info->lock); |
} |
src_area->backend->share(src_area); |
mutex_unlock(&src_area->lock); |
mutex_unlock(&src_as->lock); |
/* |
* Create copy of the source address space area. |
* The destination area is created with AS_AREA_ATTR_PARTIAL |
* attribute set which prevents race condition with |
* preliminary as_page_fault() calls. |
* The flags of the source area are masked against dst_flags_mask |
* to support sharing in less privileged mode. |
*/ |
dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, |
AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); |
if (!dst_area) { |
/* |
* Destination address space area could not be created. |
*/ |
sh_info_remove_reference(sh_info); |
interrupts_restore(ipl); |
return ENOMEM; |
} |
/* |
* Now the destination address space area has been |
* fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
* attribute and set the sh_info. |
*/ |
mutex_lock(&dst_area->lock); |
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
dst_area->sh_info = sh_info; |
mutex_unlock(&dst_area->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Check access mode for address space area. |
* |
* The address space area must be locked prior to this call. |
* |
* @param area Address space area. |
* @param access Access mode. |
* |
* @return False if access violates area's permissions, true otherwise. |
*/ |
bool as_area_check_access(as_area_t *area, pf_access_t access) |
{ |
int flagmap[] = { |
[PF_ACCESS_READ] = AS_AREA_READ, |
[PF_ACCESS_WRITE] = AS_AREA_WRITE, |
[PF_ACCESS_EXEC] = AS_AREA_EXEC |
}; |
if (!(area->flags & flagmap[access])) |
return false; |
return true; |
} |
/** Handle page fault within the current address space. |
* |
* This is the high-level page fault handler. It decides |
* whether the page fault can be resolved by any backend |
* and if so, it invokes the backend to resolve the page |
* fault. |
* |
* Interrupts are assumed disabled. |
* |
* @param page Faulting page. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* @param istate Pointer to interrupted state. |
* |
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
* fault was caused by copy_to_uspace() or copy_from_uspace(). |
*/ |
int as_page_fault(__address page, pf_access_t access, istate_t *istate) |
{ |
pte_t *pte; |
as_area_t *area; |
if (!THREAD) |
return AS_PF_FAULT; |
ASSERT(AS); |
mutex_lock(&AS->lock); |
area = find_area_and_lock(AS, page); |
if (!area) { |
/* |
* No area contained mapping for 'page'. |
* Signal page fault to low-level handler. |
*/ |
mutex_unlock(&AS->lock); |
goto page_fault; |
} |
if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
/* |
* The address space area is not fully initialized. |
* Avoid possible race by returning error. |
*/ |
mutex_unlock(&area->lock); |
mutex_unlock(&AS->lock); |
goto page_fault; |
} |
if (!area->backend || !area->backend->page_fault) { |
/* |
* The address space area is not backed by any backend |
* or the backend cannot handle page faults. |
*/ |
mutex_unlock(&area->lock); |
mutex_unlock(&AS->lock); |
goto page_fault; |
} |
page_table_lock(AS, false); |
/* |
* To avoid race condition between two page faults |
* on the same address, we need to make sure |
* the mapping has not been already inserted. |
*/ |
if ((pte = page_mapping_find(AS, page))) { |
if (PTE_PRESENT(pte)) { |
if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || |
(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || |
(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { |
page_table_unlock(AS, false); |
mutex_unlock(&area->lock); |
mutex_unlock(&AS->lock); |
return AS_PF_OK; |
} |
} |
} |
/* |
* Resort to the backend page fault handler. |
*/ |
if (area->backend->page_fault(area, page, access) != AS_PF_OK) { |
page_table_unlock(AS, false); |
mutex_unlock(&area->lock); |
mutex_unlock(&AS->lock); |
goto page_fault; |
} |
page_table_unlock(AS, false); |
mutex_unlock(&area->lock); |
mutex_unlock(&AS->lock); |
return AS_PF_OK; |
page_fault: |
if (THREAD->in_copy_from_uspace) { |
THREAD->in_copy_from_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
} else if (THREAD->in_copy_to_uspace) { |
THREAD->in_copy_to_uspace = false; |
istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
} else { |
return AS_PF_FAULT; |
} |
return AS_PF_DEFER; |
} |
/** Switch address spaces. |
* |
* Note that this function cannot sleep as it is essentially a part of |
* scheduling. Sleeping here would lead to deadlock on wakeup. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old, as_t *new) |
{ |
ipl_t ipl; |
bool needs_asid = false; |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
/* |
* First, take care of the old address space. |
*/ |
if (old) { |
mutex_lock_active(&old->lock); |
ASSERT(old->cpu_refcount); |
if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) { |
/* |
* The old address space is no longer active on |
* any processor. It can be appended to the |
* list of inactive address spaces with assigned |
* ASID. |
*/ |
ASSERT(old->asid != ASID_INVALID); |
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
} |
mutex_unlock(&old->lock); |
} |
/* |
* Second, prepare the new address space. |
*/ |
mutex_lock_active(&new->lock); |
if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) { |
if (new->asid != ASID_INVALID) |
list_remove(&new->inactive_as_with_asid_link); |
else |
needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
} |
SET_PTL0_ADDRESS(new->page_table); |
mutex_unlock(&new->lock); |
if (needs_asid) { |
/* |
* Allocation of new ASID was deferred |
* until now in order to avoid deadlock. |
*/ |
asid_t asid; |
asid = asid_get(); |
mutex_lock_active(&new->lock); |
new->asid = asid; |
mutex_unlock(&new->lock); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
* (e.g. write ASID to hardware register etc.) |
*/ |
as_install_arch(new); |
AS = new; |
} |
/** Convert address space area flags to page flags. |
* |
* @param aflags Flags of some address space area. |
* |
* @return Flags to be passed to page_mapping_insert(). |
*/ |
int area_flags_to_page_flags(int aflags) |
{ |
int flags; |
flags = PAGE_USER | PAGE_PRESENT; |
if (aflags & AS_AREA_READ) |
flags |= PAGE_READ; |
if (aflags & AS_AREA_WRITE) |
flags |= PAGE_WRITE; |
if (aflags & AS_AREA_EXEC) |
flags |= PAGE_EXEC; |
if (aflags & AS_AREA_CACHEABLE) |
flags |= PAGE_CACHEABLE; |
return flags; |
} |
/** Compute flags for virtual address translation subsytem. |
* |
* The address space area must be locked. |
* Interrupts must be disabled. |
* |
* @param a Address space area. |
* |
* @return Flags to be used in page_mapping_insert(). |
*/ |
int as_area_get_flags(as_area_t *a) |
{ |
return area_flags_to_page_flags(a->flags); |
} |
/** Create page table. |
* |
* Depending on architecture, create either address space |
* private or global page table. |
* |
* @param flags Flags saying whether the page table is for kernel address space. |
* |
* @return First entry of the page table. |
*/ |
pte_t *page_table_create(int flags) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_create); |
return as_operations->page_table_create(flags); |
} |
/** Destroy page table. |
* |
* Destroy page table in architecture specific way. |
* |
* @param page_table Physical address of PTL0. |
*/ |
void page_table_destroy(pte_t *page_table) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_destroy); |
as_operations->page_table_destroy(page_table); |
} |
/** Lock page table. |
* |
* This function should be called before any page_mapping_insert(), |
* page_mapping_remove() and page_mapping_find(). |
* |
* Locking order is such that address space areas must be locked |
* prior to this call. Address space can be locked prior to this |
* call in which case the lock argument is false. |
* |
* @param as Address space. |
* @param lock If false, do not attempt to lock as->lock. |
*/ |
void page_table_lock(as_t *as, bool lock) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_lock); |
as_operations->page_table_lock(as, lock); |
} |
/** Unlock page table. |
* |
* @param as Address space. |
* @param unlock If false, do not attempt to unlock as->lock. |
*/ |
void page_table_unlock(as_t *as, bool unlock) |
{ |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_unlock); |
as_operations->page_table_unlock(as, unlock); |
} |
/** Find address space area and lock it. |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Virtual address. |
* |
* @return Locked address space area containing va on success or NULL on failure. |
*/ |
as_area_t *find_area_and_lock(as_t *as, __address va) |
{ |
as_area_t *a; |
btree_node_t *leaf, *lnode; |
int i; |
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
if (a) { |
/* va is the base address of an address space area */ |
mutex_lock(&a->lock); |
return a; |
} |
/* |
* Search the leaf node and the righmost record of its left neighbour |
* to find out whether this is a miss or va belongs to an address |
* space area found there. |
*/ |
/* First, search the leaf node itself. */ |
for (i = 0; i < leaf->keys; i++) { |
a = (as_area_t *) leaf->value[i]; |
mutex_lock(&a->lock); |
if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
return a; |
} |
mutex_unlock(&a->lock); |
} |
/* |
* Second, locate the left neighbour and test its last record. |
* Because of its position in the B+tree, it must have base < va. |
*/ |
if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) lnode->value[lnode->keys - 1]; |
mutex_lock(&a->lock); |
if (va < a->base + a->pages * PAGE_SIZE) { |
return a; |
} |
mutex_unlock(&a->lock); |
} |
return NULL; |
} |
/** Check area conflicts with other areas. |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Starting virtual address of the area being tested. |
* @param size Size of the area being tested. |
* @param avoid_area Do not touch this area. |
* |
* @return True if there is no conflict, false otherwise. |
*/ |
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
{ |
as_area_t *a; |
btree_node_t *leaf, *node; |
int i; |
/* |
* We don't want any area to have conflicts with NULL page. |
*/ |
if (overlaps(va, size, NULL, PAGE_SIZE)) |
return false; |
/* |
* The leaf node is found in O(log n), where n is proportional to |
* the number of address space areas belonging to as. |
* The check for conflicts is then attempted on the rightmost |
* record in the left neighbour, the leftmost record in the right |
* neighbour and all records in the leaf node itself. |
*/ |
if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
if (a != avoid_area) |
return false; |
} |
/* First, check the two border cases. */ |
if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) node->value[node->keys - 1]; |
mutex_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
mutex_unlock(&a->lock); |
return false; |
} |
mutex_unlock(&a->lock); |
} |
if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
a = (as_area_t *) node->value[0]; |
mutex_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
mutex_unlock(&a->lock); |
return false; |
} |
mutex_unlock(&a->lock); |
} |
/* Second, check the leaf node. */ |
for (i = 0; i < leaf->keys; i++) { |
a = (as_area_t *) leaf->value[i]; |
if (a == avoid_area) |
continue; |
mutex_lock(&a->lock); |
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
mutex_unlock(&a->lock); |
return false; |
} |
mutex_unlock(&a->lock); |
} |
/* |
* So far, the area does not conflict with other areas. |
* Check if it doesn't conflict with kernel address space. |
*/ |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
return !overlaps(va, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
} |
return true; |
} |
/** Return size of the address space area with given base. */ |
size_t as_get_size(__address base) |
{ |
ipl_t ipl; |
as_area_t *src_area; |
size_t size; |
ipl = interrupts_disable(); |
src_area = find_area_and_lock(AS, base); |
if (src_area){ |
size = src_area->pages * PAGE_SIZE; |
mutex_unlock(&src_area->lock); |
} else { |
size = 0; |
} |
interrupts_restore(ipl); |
return size; |
} |
/** Mark portion of address space area as used. |
* |
* The address space area must be already locked. |
* |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* |
* @return 0 on failure and 1 on success. |
*/ |
int used_space_insert(as_area_t *a, __address page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
int i; |
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
ASSERT(count); |
pages = (count_t) btree_search(&a->used_space, page, &leaf); |
if (pages) { |
/* |
* We hit the beginning of some used space. |
*/ |
return 0; |
} |
if (!leaf->keys) { |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
if (node) { |
__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; |
count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; |
/* |
* Examine the possibility that the interval fits |
* somewhere between the rightmost interval of |
* the left neigbour and the first interval of the leaf. |
*/ |
if (page >= right_pg) { |
/* Do nothing. */ |
} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
/* The interval intersects with the left interval. */ |
return 0; |
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
/* The interval intersects with the right interval. */ |
return 0; |
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
/* The interval can be added by merging the two already present intervals. */ |
node->value[node->keys - 1] += count + right_cnt; |
btree_remove(&a->used_space, right_pg, leaf); |
return 1; |
} else if (page == left_pg + left_cnt*PAGE_SIZE) { |
/* The interval can be added by simply growing the left interval. */ |
node->value[node->keys - 1] += count; |
return 1; |
} else if (page + count*PAGE_SIZE == right_pg) { |
/* |
* The interval can be addded by simply moving base of the right |
* interval down and increasing its size accordingly. |
*/ |
leaf->value[0] += count; |
leaf->key[0] = page; |
return 1; |
} else { |
/* |
* The interval is between both neigbouring intervals, |
* but cannot be merged with any of them. |
*/ |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
} else if (page < leaf->key[0]) { |
__address right_pg = leaf->key[0]; |
count_t right_cnt = (count_t) leaf->value[0]; |
/* |
* Investigate the border case in which the left neighbour does not |
* exist but the interval fits from the left. |
*/ |
if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
/* The interval intersects with the right interval. */ |
return 0; |
} else if (page + count*PAGE_SIZE == right_pg) { |
/* |
* The interval can be added by moving the base of the right interval down |
* and increasing its size accordingly. |
*/ |
leaf->key[0] = page; |
leaf->value[0] += count; |
return 1; |
} else { |
/* |
* The interval doesn't adjoin with the right interval. |
* It must be added individually. |
*/ |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
} |
node = btree_leaf_node_right_neighbour(&a->used_space, leaf); |
if (node) { |
__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; |
/* |
* Examine the possibility that the interval fits |
* somewhere between the leftmost interval of |
* the right neigbour and the last interval of the leaf. |
*/ |
if (page < left_pg) { |
/* Do nothing. */ |
} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
/* The interval intersects with the left interval. */ |
return 0; |
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
/* The interval intersects with the right interval. */ |
return 0; |
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
/* The interval can be added by merging the two already present intervals. */ |
leaf->value[leaf->keys - 1] += count + right_cnt; |
btree_remove(&a->used_space, right_pg, node); |
return 1; |
} else if (page == left_pg + left_cnt*PAGE_SIZE) { |
/* The interval can be added by simply growing the left interval. */ |
leaf->value[leaf->keys - 1] += count; |
return 1; |
} else if (page + count*PAGE_SIZE == right_pg) { |
/* |
* The interval can be addded by simply moving base of the right |
* interval down and increasing its size accordingly. |
*/ |
node->value[0] += count; |
node->key[0] = page; |
return 1; |
} else { |
/* |
* The interval is between both neigbouring intervals, |
* but cannot be merged with any of them. |
*/ |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
} else if (page >= leaf->key[leaf->keys - 1]) { |
__address left_pg = leaf->key[leaf->keys - 1]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
/* |
* Investigate the border case in which the right neighbour does not |
* exist but the interval fits from the right. |
*/ |
if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
/* The interval intersects with the left interval. */ |
return 0; |
} else if (left_pg + left_cnt*PAGE_SIZE == page) { |
/* The interval can be added by growing the left interval. */ |
leaf->value[leaf->keys - 1] += count; |
return 1; |
} else { |
/* |
* The interval doesn't adjoin with the left interval. |
* It must be added individually. |
*/ |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
} |
/* |
* Note that if the algorithm made it thus far, the interval can fit only |
* between two other intervals of the leaf. The two border cases were already |
* resolved. |
*/ |
for (i = 1; i < leaf->keys; i++) { |
if (page < leaf->key[i]) { |
__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; |
count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; |
/* |
* The interval fits between left_pg and right_pg. |
*/ |
if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { |
/* The interval intersects with the left interval. */ |
return 0; |
} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { |
/* The interval intersects with the right interval. */ |
return 0; |
} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { |
/* The interval can be added by merging the two already present intervals. */ |
leaf->value[i - 1] += count + right_cnt; |
btree_remove(&a->used_space, right_pg, leaf); |
return 1; |
} else if (page == left_pg + left_cnt*PAGE_SIZE) { |
/* The interval can be added by simply growing the left interval. */ |
leaf->value[i - 1] += count; |
return 1; |
} else if (page + count*PAGE_SIZE == right_pg) { |
/* |
* The interval can be addded by simply moving base of the right |
* interval down and increasing its size accordingly. |
*/ |
leaf->value[i] += count; |
leaf->key[i] = page; |
return 1; |
} else { |
/* |
* The interval is between both neigbouring intervals, |
* but cannot be merged with any of them. |
*/ |
btree_insert(&a->used_space, page, (void *) count, leaf); |
return 1; |
} |
} |
} |
panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page); |
} |
/** Mark portion of address space area as unused. |
* |
* The address space area must be already locked. |
* |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* |
* @return 0 on failure and 1 on success. |
*/ |
int used_space_remove(as_area_t *a, __address page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
int i; |
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
ASSERT(count); |
pages = (count_t) btree_search(&a->used_space, page, &leaf); |
if (pages) { |
/* |
* We are lucky, page is the beginning of some interval. |
*/ |
if (count > pages) { |
return 0; |
} else if (count == pages) { |
btree_remove(&a->used_space, page, leaf); |
return 1; |
} else { |
/* |
* Find the respective interval. |
* Decrease its size and relocate its start address. |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == page) { |
leaf->key[i] += count*PAGE_SIZE; |
leaf->value[i] -= count; |
return 1; |
} |
} |
goto error; |
} |
} |
node = btree_leaf_node_left_neighbour(&a->used_space, leaf); |
if (node && page < leaf->key[0]) { |
__address left_pg = node->key[node->keys - 1]; |
count_t left_cnt = (count_t) node->value[node->keys - 1]; |
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
/* |
* The interval is contained in the rightmost interval |
* of the left neighbour and can be removed by |
* updating the size of the bigger interval. |
*/ |
node->value[node->keys - 1] -= count; |
return 1; |
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
count_t new_cnt; |
/* |
* The interval is contained in the rightmost interval |
* of the left neighbour but its removal requires |
* both updating the size of the original interval and |
* also inserting a new interval. |
*/ |
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
node->value[node->keys - 1] -= count + new_cnt; |
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
return 1; |
} |
} |
return 0; |
} else if (page < leaf->key[0]) { |
return 0; |
} |
if (page > leaf->key[leaf->keys - 1]) { |
__address left_pg = leaf->key[leaf->keys - 1]; |
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; |
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
/* |
* The interval is contained in the rightmost interval |
* of the leaf and can be removed by updating the size |
* of the bigger interval. |
*/ |
leaf->value[leaf->keys - 1] -= count; |
return 1; |
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
count_t new_cnt; |
/* |
* The interval is contained in the rightmost interval |
* of the leaf but its removal requires both updating |
* the size of the original interval and |
* also inserting a new interval. |
*/ |
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
leaf->value[leaf->keys - 1] -= count + new_cnt; |
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
return 1; |
} |
} |
return 0; |
} |
/* |
* The border cases have been already resolved. |
* Now the interval can be only between intervals of the leaf. |
*/ |
for (i = 1; i < leaf->keys - 1; i++) { |
if (page < leaf->key[i]) { |
__address left_pg = leaf->key[i - 1]; |
count_t left_cnt = (count_t) leaf->value[i - 1]; |
/* |
* Now the interval is between intervals corresponding to (i - 1) and i. |
*/ |
if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { |
if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { |
/* |
* The interval is contained in the interval (i - 1) |
* of the leaf and can be removed by updating the size |
* of the bigger interval. |
*/ |
leaf->value[i - 1] -= count; |
return 1; |
} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { |
count_t new_cnt; |
/* |
* The interval is contained in the interval (i - 1) |
* of the leaf but its removal requires both updating |
* the size of the original interval and |
* also inserting a new interval. |
*/ |
new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; |
leaf->value[i - 1] -= count + new_cnt; |
btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); |
return 1; |
} |
} |
return 0; |
} |
} |
error: |
panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page); |
} |
/** Remove reference to address space area share info. |
* |
* If the reference count drops to 0, the sh_info is deallocated. |
* |
* @param sh_info Pointer to address space area share info. |
*/ |
void sh_info_remove_reference(share_info_t *sh_info) |
{ |
bool dealloc = false; |
mutex_lock(&sh_info->lock); |
ASSERT(sh_info->refcount); |
if (--sh_info->refcount == 0) { |
dealloc = true; |
link_t *cur; |
/* |
* Now walk carefully the pagemap B+tree and free/remove |
* reference from all frames found there. |
*/ |
for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) |
frame_free(ADDR2PFN((__address) node->value[i])); |
} |
} |
mutex_unlock(&sh_info->lock); |
if (dealloc) { |
btree_destroy(&sh_info->pagemap); |
free(sh_info); |
} |
} |
/* |
* Address space related syscalls. |
*/ |
/** Wrapper for as_area_create(). */ |
__native sys_as_area_create(__address address, size_t size, int flags) |
{ |
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
return (__native) address; |
else |
return (__native) -1; |
} |
/** Wrapper for as_area_resize. */ |
__native sys_as_area_resize(__address address, size_t size, int flags) |
{ |
return (__native) as_area_resize(AS, address, size, 0); |
} |
/** Wrapper for as_area_destroy. */ |
__native sys_as_area_destroy(__address address) |
{ |
return (__native) as_area_destroy(AS, address); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/buddy.c |
---|
0,0 → 1,326 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Buddy allocator framework. |
* |
* This file contains buddy system allocator framework. |
* Specialized functions are needed for this abstract framework |
* to be useful. |
*/ |
#include <mm/buddy.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <adt/list.h> |
#include <debug.h> |
#include <print.h> |
/** Return size needed for the buddy configuration data */ |
size_t buddy_conf_size(int max_order) |
{ |
return sizeof(buddy_system_t) + (max_order + 1) * sizeof(link_t); |
} |
/** Create buddy system |
* |
* Allocate memory for and initialize new buddy system. |
* |
* @param b Preallocated buddy system control data. |
* @param max_order The biggest allocable size will be 2^max_order. |
* @param op Operations for new buddy system. |
* @param data Pointer to be used by implementation. |
* |
* @return New buddy system. |
*/ |
void buddy_system_create(buddy_system_t *b, |
__u8 max_order, |
buddy_system_operations_t *op, |
void *data) |
{ |
int i; |
ASSERT(max_order < BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(op->find_buddy); |
ASSERT(op->set_order); |
ASSERT(op->get_order); |
ASSERT(op->bisect); |
ASSERT(op->coalesce); |
ASSERT(op->mark_busy); |
/* |
* Use memory after our own structure |
*/ |
b->order = (link_t *) (&b[1]); |
for (i = 0; i <= max_order; i++) |
list_initialize(&b->order[i]); |
b->max_order = max_order; |
b->op = op; |
b->data = data; |
} |
/** Check if buddy system can allocate block |
* |
* @param b Buddy system pointer |
* @param i Size of the block (2^i) |
* |
* @return True if block can be allocated |
*/ |
bool buddy_system_can_alloc(buddy_system_t *b, __u8 i) { |
__u8 k; |
/* |
* If requested block is greater then maximal block |
* we know immediatly that we cannot satisfy the request. |
*/ |
if (i > b->max_order) return false; |
/* |
* Check if any bigger or equal order has free elements |
*/ |
for (k=i; k <= b->max_order; k++) { |
if (!list_empty(&b->order[k])) { |
return true; |
} |
} |
return false; |
} |
/** Allocate PARTICULAR block from buddy system |
* |
* @ return Block of data or NULL if no such block was found |
*/ |
link_t *buddy_system_alloc_block(buddy_system_t *b, link_t *block) |
{ |
link_t *left,*right, *tmp; |
__u8 order; |
left = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(left); |
list_remove(left); |
while (1) { |
if (! b->op->get_order(b,left)) { |
b->op->mark_busy(b, left); |
return left; |
} |
order = b->op->get_order(b, left); |
right = b->op->bisect(b, left); |
b->op->set_order(b, left, order-1); |
b->op->set_order(b, right, order-1); |
tmp = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
if (tmp == right) { |
right = left; |
left = tmp; |
} |
ASSERT(tmp == left); |
b->op->mark_busy(b, left); |
buddy_system_free(b, right); |
b->op->mark_available(b, left); |
} |
} |
/** Allocate block from buddy system. |
* |
* @param b Buddy system pointer. |
* @param i Returned block will be 2^i big. |
* |
* @return Block of data represented by link_t. |
*/ |
link_t *buddy_system_alloc(buddy_system_t *b, __u8 i) |
{ |
link_t *res, *hlp; |
ASSERT(i <= b->max_order); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&b->order[i])) { |
res = b->order[i].next; |
list_remove(res); |
b->op->mark_busy(b, res); |
return res; |
} |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == b->max_order) |
return NULL; |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_system_alloc(b, i + 1); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
res = hlp; |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = b->op->bisect(b, res); |
b->op->set_order(b, res, i); |
b->op->set_order(b, hlp, i); |
/* |
* Return the other half to buddy system. Mark the first part |
* full, so that it won't coalesce again. |
*/ |
b->op->mark_busy(b, res); |
buddy_system_free(b, hlp); |
return res; |
} |
/** Return block to buddy system. |
* |
* @param b Buddy system pointer. |
* @param block Block to return. |
*/ |
void buddy_system_free(buddy_system_t *b, link_t *block) |
{ |
link_t *buddy, *hlp; |
__u8 i; |
/* |
* Determine block's order. |
*/ |
i = b->op->get_order(b, block); |
ASSERT(i <= b->max_order); |
if (i != b->max_order) { |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = b->op->find_buddy(b, block); |
if (buddy) { |
ASSERT(b->op->get_order(b, buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
/* |
* Invalidate order of both block and buddy. |
*/ |
b->op->set_order(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
b->op->set_order(b, buddy, BUDDY_SYSTEM_INNER_BLOCK); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = b->op->coalesce(b, block, buddy); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
b->op->set_order(b, hlp, i + 1); |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free(b, hlp); |
return; |
} |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &b->order[i]); |
} |
/** Prints out structure of buddy system |
* |
* @param b Pointer to buddy system |
* @param elem_size Element size |
*/ |
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) { |
index_t i; |
count_t cnt, elem_count = 0, block_count = 0; |
link_t * cur; |
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n"); |
printf("-----\t------\t--------\t----------\t---------------\n"); |
for (i=0;i <= b->max_order; i++) { |
cnt = 0; |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) |
cnt++; |
} |
printf("#%zd\t%5zd\t%7zdK\t%8zdK\t%6zd\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i); |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) { |
b->op->print_id(b, cur); |
printf(" "); |
} |
} |
printf("\n"); |
block_count += cnt; |
elem_count += (1 << i) * cnt; |
} |
printf("-----\t------\t--------\t----------\t---------------\n"); |
printf("Buddy system contains %zd free elements (%zd blocks)\n" , elem_count, block_count); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/frame.c |
---|
0,0 → 1,1155 |
/* |
* Copyright (C) 2001-2005 Jakub Jermar |
* Copyright (C) 2005 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Physical frame allocator. |
* |
* This file contains the physical frame allocator and memory zone management. |
* The frame allocator is built on top of the buddy allocator. |
* |
* @see buddy.c |
*/ |
/* |
* Locking order |
* |
* In order to access particular zone, the process must first lock |
* the zones.lock, then lock the zone and then unlock the zones.lock. |
* This insures, that we can fiddle with the zones in runtime without |
* affecting the processes. |
* |
*/ |
#include <typedefs.h> |
#include <arch/types.h> |
#include <mm/frame.h> |
#include <mm/as.h> |
#include <panic.h> |
#include <debug.h> |
#include <adt/list.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <print.h> |
#include <align.h> |
#include <mm/slab.h> |
#include <bitops.h> |
#include <macros.h> |
typedef struct { |
count_t refcount; /**< tracking of shared frames */ |
__u8 buddy_order; /**< buddy system block order */ |
link_t buddy_link; /**< link to the next free block inside one order */ |
void *parent; /**< If allocated by slab, this points there */ |
} frame_t; |
typedef struct { |
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
pfn_t base; /**< frame_no of the first frame in the frames array */ |
count_t count; /**< Size of zone */ |
frame_t *frames; /**< array of frame_t structures in this zone */ |
count_t free_count; /**< number of free frame_t structures */ |
count_t busy_count; /**< number of busy frame_t structures */ |
buddy_system_t * buddy_system; /**< buddy system for the zone */ |
int flags; |
} zone_t; |
/* |
* The zoneinfo.lock must be locked when accessing zoneinfo structure. |
* Some of the attributes in zone_t structures are 'read-only' |
*/ |
struct { |
SPINLOCK_DECLARE(lock); |
int count; |
zone_t *info[ZONES_MAX]; |
} zones; |
/*********************************/ |
/* Helper functions */ |
static inline index_t frame_index(zone_t *zone, frame_t *frame) |
{ |
return (index_t)(frame - zone->frames); |
} |
static inline index_t frame_index_abs(zone_t *zone, frame_t *frame) |
{ |
return (index_t)(frame - zone->frames) + zone->base; |
} |
static inline int frame_index_valid(zone_t *zone, index_t index) |
{ |
return index >= 0 && index < zone->count; |
} |
/** Compute pfn_t from frame_t pointer & zone pointer */ |
static index_t make_frame_index(zone_t *zone, frame_t *frame) |
{ |
return frame - zone->frames; |
} |
/** Initialize frame structure |
* |
* Initialize frame structure. |
* |
* @param frame Frame structure to be initialized. |
*/ |
static void frame_initialize(frame_t *frame) |
{ |
frame->refcount = 1; |
frame->buddy_order = 0; |
} |
/*************************************/ |
/* Zoneinfo functions */ |
/** |
* Insert-sort zone into zones list |
* |
* @param newzone New zone to be inserted into zone list |
* @return zone number on success, -1 on error |
*/ |
static int zones_add_zone(zone_t *newzone) |
{ |
int i,j; |
ipl_t ipl; |
zone_t *z; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
/* Try to merge */ |
if (zones.count + 1 == ZONES_MAX) |
panic("Maximum zone(%d) count exceeded.", ZONES_MAX); |
for (i = 0; i < zones.count; i++) { |
/* Check for overflow */ |
z = zones.info[i]; |
if (overlaps(newzone->base,newzone->count, |
z->base, z->count)) { |
printf("Zones overlap!\n"); |
return -1; |
} |
if (newzone->base < z->base) |
break; |
} |
/* Move other zones up */ |
for (j = i;j < zones.count; j++) |
zones.info[j + 1] = zones.info[j]; |
zones.info[i] = newzone; |
zones.count++; |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
return i; |
} |
/** |
* Try to find a zone where can we find the frame |
* |
* @param frame Frame number contained in zone |
* @param pzone If not null, it is used as zone hint. Zone index |
* is filled into the variable on success. |
* @return Pointer to LOCKED zone containing frame |
* |
* Assume interrupts disable |
*/ |
static zone_t * find_zone_and_lock(pfn_t frame, int *pzone) |
{ |
int i; |
int hint = pzone ? *pzone : 0; |
zone_t *z; |
spinlock_lock(&zones.lock); |
if (hint >= zones.count || hint < 0) |
hint = 0; |
i = hint; |
do { |
z = zones.info[i]; |
spinlock_lock(&z->lock); |
if (z->base <= frame && z->base + z->count > frame) { |
spinlock_unlock(&zones.lock); /* Unlock the global lock */ |
if (pzone) |
*pzone = i; |
return z; |
} |
spinlock_unlock(&z->lock); |
i++; |
if (i >= zones.count) |
i = 0; |
} while(i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
} |
/** @return True if zone can allocate specified order */ |
static int zone_can_alloc(zone_t *z, __u8 order) |
{ |
return buddy_system_can_alloc(z->buddy_system, order); |
} |
/** |
* Find AND LOCK zone that can allocate order frames |
* |
* Assume interrupts are disabled!! |
* |
* @param order Size (2^order) of free space we are trying to find |
* @param pzone Pointer to preferred zone or NULL, on return contains zone number |
*/ |
static zone_t * find_free_zone_lock(__u8 order, int *pzone) |
{ |
int i; |
zone_t *z; |
int hint = pzone ? *pzone : 0; |
spinlock_lock(&zones.lock); |
if (hint >= zones.count) |
hint = 0; |
i = hint; |
do { |
z = zones.info[i]; |
spinlock_lock(&z->lock); |
/* Check if the zone has 2^order frames area available */ |
if (zone_can_alloc(z, order)) { |
spinlock_unlock(&zones.lock); |
if (pzone) |
*pzone = i; |
return z; |
} |
spinlock_unlock(&z->lock); |
if (++i >= zones.count) |
i = 0; |
} while(i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
} |
/********************************************/ |
/* Buddy system functions */ |
/** Buddy system find_block implementation |
* |
* Find block that is parent of current list. |
* That means go to lower addresses, until such block is found |
* |
* @param order - Order of parent must be different then this parameter!! |
*/ |
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
__u8 order) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
frame = list_get_instance(child, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
index = frame_index(zone, frame); |
do { |
if (zone->frames[index].buddy_order != order) { |
return &zone->frames[index].buddy_link; |
} |
} while(index-- > 0); |
return NULL; |
} |
static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
index = frame_index(zone, frame); |
printf("%zd", index); |
} |
/** Buddy system find_buddy implementation |
* |
* @param b Buddy system. |
* @param block Block for which buddy should be found |
* |
* @return Buddy for given block if found |
*/ |
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) |
{ |
frame_t * frame; |
zone_t * zone; |
index_t index; |
bool is_left, is_right; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order)); |
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
ASSERT(is_left ^ is_right); |
if (is_left) { |
index = (frame_index(zone, frame)) + (1 << frame->buddy_order); |
} else { // if (is_right) |
index = (frame_index(zone, frame)) - (1 << frame->buddy_order); |
} |
if (frame_index_valid(zone, index)) { |
if (zone->frames[index].buddy_order == frame->buddy_order && |
zone->frames[index].refcount == 0) { |
return &zone->frames[index].buddy_link; |
} |
} |
return NULL; |
} |
/** Buddy system bisect implementation |
* |
* @param b Buddy system. |
* @param block Block to bisect |
* |
* @return right block |
*/ |
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
frame_t * frame_l, * frame_r; |
frame_l = list_get_instance(block, frame_t, buddy_link); |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
return &frame_r->buddy_link; |
} |
/** Buddy system coalesce implementation |
* |
* @param b Buddy system. |
* @param block_1 First block |
* @param block_2 First block's buddy |
* |
* @return Coalesced block (actually block that represents lower address) |
*/ |
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, |
link_t * block_2) |
{ |
frame_t *frame1, *frame2; |
frame1 = list_get_instance(block_1, frame_t, buddy_link); |
frame2 = list_get_instance(block_2, frame_t, buddy_link); |
return frame1 < frame2 ? block_1 : block_2; |
} |
/** Buddy system set_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* @param order Order to set |
*/ |
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
} |
/** Buddy system get_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* |
* @return Order of block |
*/ |
static __u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
} |
/** Buddy system mark_busy implementation |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
*/ |
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 1; |
} |
/** Buddy system mark_available implementation |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
*/ |
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 0; |
} |
static struct buddy_system_operations zone_buddy_system_operations = { |
.find_buddy = zone_buddy_find_buddy, |
.bisect = zone_buddy_bisect, |
.coalesce = zone_buddy_coalesce, |
.set_order = zone_buddy_set_order, |
.get_order = zone_buddy_get_order, |
.mark_busy = zone_buddy_mark_busy, |
.mark_available = zone_buddy_mark_available, |
.find_block = zone_buddy_find_block, |
.print_id = zone_buddy_print_id |
}; |
/*************************************/ |
/* Zone functions */ |
/** Allocate frame in particular zone |
* |
* Assume zone is locked |
* Panics if allocation is impossible. |
* |
* @param zone Zone to allocate from. |
* @param order Allocate exactly 2^order frames. |
* |
* @return Frame index in zone |
* |
*/ |
static pfn_t zone_frame_alloc(zone_t *zone, __u8 order) |
{ |
pfn_t v; |
link_t *tmp; |
frame_t *frame; |
/* Allocate frames from zone buddy system */ |
tmp = buddy_system_alloc(zone->buddy_system, order); |
ASSERT(tmp); |
/* Update zone information. */ |
zone->free_count -= (1 << order); |
zone->busy_count += (1 << order); |
/* Frame will be actually a first frame of the block. */ |
frame = list_get_instance(tmp, frame_t, buddy_link); |
/* get frame address */ |
v = make_frame_index(zone, frame); |
return v; |
} |
/** Free frame from zone |
* |
* Assume zone is locked |
* |
* @param zone Pointer to zone from which the frame is to be freed |
* @param frame_idx Frame index relative to zone |
*/ |
static void zone_frame_free(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
__u8 order; |
frame = &zone->frames[frame_idx]; |
/* remember frame order */ |
order = frame->buddy_order; |
ASSERT(frame->refcount); |
if (!--frame->refcount) { |
buddy_system_free(zone->buddy_system, &frame->buddy_link); |
/* Update zone information. */ |
zone->free_count += (1 << order); |
zone->busy_count -= (1 << order); |
} |
} |
/** Return frame from zone */ |
static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
{ |
ASSERT(frame_idx < zone->count); |
return &zone->frames[frame_idx]; |
} |
/** Mark frame in zone unavailable to allocation */ |
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
link_t *link; |
frame = zone_get_frame(zone, frame_idx); |
if (frame->refcount) |
return; |
link = buddy_system_alloc_block(zone->buddy_system, |
&frame->buddy_link); |
ASSERT(link); |
zone->free_count--; |
} |
/** |
* Join 2 zones |
* |
* Expect zone_t *z to point to space at least zone_conf_size large |
* |
* Assume z1 & z2 are locked |
* |
* @param z Target zone structure pointer |
* @param z1 Zone to merge |
* @param z2 Zone to merge |
*/ |
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
{ |
__u8 max_order; |
int i, z2idx; |
pfn_t frame_idx; |
frame_t *frame; |
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count)); |
ASSERT(z1->base < z2->base); |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = z1->base; |
z->count = z2->base+z2->count - z1->base; |
z->flags = z1->flags & z2->flags; |
z->free_count = z1->free_count + z2->free_count; |
z->busy_count = z1->busy_count + z2->busy_count; |
max_order = fnzb(z->count); |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
for (i = 0; i < z->count; i++) { |
/* This marks all frames busy */ |
frame_initialize(&z->frames[i]); |
} |
/* Copy frames from both zones to preserve full frame orders, |
* parents etc. Set all free frames with refcount=0 to 1, because |
* we add all free frames to buddy allocator later again, clear |
* order to 0. Don't set busy frames with refcount=0, as they |
* will not be reallocated during merge and it would make later |
* problems with allocation/free. |
*/ |
for (i=0; i<z1->count; i++) |
z->frames[i] = z1->frames[i]; |
for (i=0; i < z2->count; i++) { |
z2idx = i + (z2->base - z1->base); |
z->frames[z2idx] = z2->frames[i]; |
} |
i = 0; |
while (i < z->count) { |
if (z->frames[i].refcount) { |
/* skip busy frames */ |
i += 1 << z->frames[i].buddy_order; |
} else { /* Free frames, set refcount=1 */ |
/* All free frames have refcount=0, we need not |
* to check the order */ |
z->frames[i].refcount = 1; |
z->frames[i].buddy_order = 0; |
i++; |
} |
} |
/* Add free blocks from the 2 original zones */ |
while (zone_can_alloc(z1, 0)) { |
frame_idx = zone_frame_alloc(z1, 0); |
frame = &z->frames[frame_idx]; |
frame->refcount = 0; |
buddy_system_free(z->buddy_system, &frame->buddy_link); |
} |
while (zone_can_alloc(z2, 0)) { |
frame_idx = zone_frame_alloc(z2, 0); |
frame = &z->frames[frame_idx + (z2->base-z1->base)]; |
frame->refcount = 0; |
buddy_system_free(z->buddy_system, &frame->buddy_link); |
} |
} |
/** Return old configuration frames into the zone |
* |
* We have several cases |
* - the conf. data is outside of zone -> exit, shall we call frame_free?? |
* - the conf. data was created by zone_create or |
* updated with reduce_region -> free every frame |
* |
* @param newzone The actual zone where freeing should occur |
* @param oldzone Pointer to old zone configuration data that should |
* be freed from new zone |
*/ |
static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
{ |
pfn_t pfn; |
frame_t *frame; |
count_t cframes; |
int i; |
pfn = ADDR2PFN((__address)KA2PA(oldzone)); |
cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
return; |
frame = &newzone->frames[pfn - newzone->base]; |
ASSERT(!frame->buddy_order); |
for (i=0; i < cframes; i++) { |
newzone->busy_count++; |
zone_frame_free(newzone, pfn+i-newzone->base); |
} |
} |
/** Reduce allocated block to count of order 0 frames |
* |
* The allocated block need 2^order frames of space. Reduce all frames |
* in block to order 0 and free the unneeded frames. This means, that |
* when freeing the previously allocated block starting with frame_idx, |
* you have to free every frame. |
* |
* @param zone |
* @param frame_idx Index to block |
* @param count Allocated space in block |
*/ |
static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
{ |
count_t i; |
__u8 order; |
frame_t *frame; |
ASSERT(frame_idx+count < zone->count); |
order = zone->frames[frame_idx].buddy_order; |
ASSERT((1 << order) >= count); |
/* Reduce all blocks to order 0 */ |
for (i=0; i < (1 << order); i++) { |
frame = &zone->frames[i + frame_idx]; |
frame->buddy_order = 0; |
if (! frame->refcount) |
frame->refcount = 1; |
ASSERT(frame->refcount == 1); |
} |
/* Free unneeded frames */ |
for (i=count; i < (1 << order); i++) { |
zone_frame_free(zone, i + frame_idx); |
} |
} |
/** Merge zones z1 and z2 |
* |
* - the zones must be 2 zones with no zone existing in between, |
* which means that z2 = z1+1 |
* |
* - When you create a new zone, the frame allocator configuration does |
* not to be 2^order size. Once the allocator is running it is no longer |
* possible, merged configuration data occupies more space :-/ |
*/ |
void zone_merge(int z1, int z2) |
{ |
ipl_t ipl; |
zone_t *zone1, *zone2, *newzone; |
int cframes; |
__u8 order; |
int i; |
pfn_t pfn; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count) |
goto errout; |
/* We can join only 2 zones with none existing inbetween */ |
if (z2-z1 != 1) |
goto errout; |
zone1 = zones.info[z1]; |
zone2 = zones.info[z2]; |
spinlock_lock(&zone1->lock); |
spinlock_lock(&zone2->lock); |
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base)); |
if (cframes == 1) |
order = 0; |
else |
order = fnzb(cframes - 1) + 1; |
/* Allocate zonedata inside one of the zones */ |
if (zone_can_alloc(zone1, order)) |
pfn = zone1->base + zone_frame_alloc(zone1, order); |
else if (zone_can_alloc(zone2, order)) |
pfn = zone2->base + zone_frame_alloc(zone2, order); |
else |
goto errout2; |
newzone = (zone_t *)PA2KA(PFN2ADDR(pfn)); |
_zone_merge(newzone, zone1, zone2); |
/* Free unneeded config frames */ |
zone_reduce_region(newzone, pfn - newzone->base, cframes); |
/* Subtract zone information from busy frames */ |
newzone->busy_count -= cframes; |
/* Replace existing zones in zoneinfo list */ |
zones.info[z1] = newzone; |
for (i = z2 + 1; i < zones.count; i++) |
zones.info[i - 1] = zones.info[i]; |
zones.count--; |
/* Free old zone information */ |
return_config_frames(newzone, zone1); |
return_config_frames(newzone, zone2); |
errout2: |
/* Nobody is allowed to enter to zone, so we are safe |
* to touch the spinlocks last time */ |
spinlock_unlock(&zone1->lock); |
spinlock_unlock(&zone2->lock); |
errout: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** |
* Merge all zones into one big zone |
* |
* It is reasonable to do this on systems whose bios reports parts in chunks, |
* so that we could have 1 zone (it's faster). |
*/ |
void zone_merge_all(void) |
{ |
int count = zones.count; |
while (zones.count > 1 && --count) { |
zone_merge(0,1); |
break; |
} |
} |
/** Create frame zone |
* |
* Create new frame zone. |
* |
* @param start Physical address of the first frame within the zone. |
* @param count Count of frames in zone |
* @param z Address of configuration information of zone |
* @param flags Zone flags. |
* |
* @return Initialized zone. |
*/ |
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
{ |
int i; |
__u8 max_order; |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = start; |
z->count = count; |
z->flags = flags; |
z->free_count = count; |
z->busy_count = 0; |
/* |
* Compute order for buddy system, initialize |
*/ |
max_order = fnzb(count); |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
/* Allocate frames _after_ the conframe */ |
/* Check sizes */ |
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
for (i = 0; i<count; i++) { |
frame_initialize(&z->frames[i]); |
} |
/* Stuffing frames */ |
for (i = 0; i < count; i++) { |
z->frames[i].refcount = 0; |
buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
} |
} |
/** Compute configuration data size for zone |
* |
* @param count Size of zone in frames |
* @return Size of zone configuration info (in bytes) |
*/ |
__address zone_conf_size(count_t count) |
{ |
int size = sizeof(zone_t) + count*sizeof(frame_t); |
int max_order; |
max_order = fnzb(count); |
size += buddy_conf_size(max_order); |
return size; |
} |
/** Create and add zone to system |
* |
* @param start First frame number (absolute) |
* @param count Size of zone in frames |
* @param confframe Where configuration frames are supposed to be. |
* Automatically checks, that we will not disturb the |
* kernel and possibly init. |
* If confframe is given _outside_ this zone, it is expected, |
* that the area is already marked BUSY and big enough |
* to contain zone_conf_size() amount of data. |
* If the confframe is inside the area, the zone free frame |
* information is modified not to include it. |
* |
* @return Zone number or -1 on error |
*/ |
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
{ |
zone_t *z; |
__address addr; |
count_t confcount; |
int i; |
int znum; |
/* Theoretically we could have here 0, practically make sure |
* nobody tries to do that. If some platform requires, remove |
* the assert |
*/ |
ASSERT(confframe); |
/* If conframe is supposed to be inside our zone, then make sure |
* it does not span kernel & init |
*/ |
confcount = SIZE2FRAMES(zone_conf_size(count)); |
if (confframe >= start && confframe < start+count) { |
for (;confframe < start + count; confframe++) { |
addr = PFN2ADDR(confframe); |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) |
continue; |
bool overlap = false; |
count_t i; |
for (i = 0; i < init.cnt; i++) |
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) { |
overlap = true; |
break; |
} |
if (overlap) |
continue; |
break; |
} |
if (confframe >= start + count) |
panic("Cannot find configuration data for zone."); |
} |
z = (zone_t *)PA2KA(PFN2ADDR(confframe)); |
zone_construct(start, count, z, flags); |
znum = zones_add_zone(z); |
if (znum == -1) |
return -1; |
/* If confdata in zone, mark as unavailable */ |
if (confframe >= start && confframe < start+count) |
for (i=confframe; i<confframe+confcount; i++) { |
zone_mark_unavailable(z, i - z->base); |
} |
return znum; |
} |
/***************************************/ |
/* Frame functions */ |
/** Set parent of frame */ |
void frame_set_parent(pfn_t pfn, void *data, int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
ASSERT(zone); |
zone_get_frame(zone, pfn-zone->base)->parent = data; |
spinlock_unlock(&zone->lock); |
} |
void * frame_get_parent(pfn_t pfn, int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
void *res; |
ASSERT(zone); |
res = zone_get_frame(zone, pfn - zone->base)->parent; |
spinlock_unlock(&zone->lock); |
return res; |
} |
/** Allocate power-of-two frames of physical memory. |
* |
* @param order Allocate exactly 2^order frames. |
* @param flags Flags for host zone selection and address processing. |
* @param status Allocation status (FRAME_OK on success), unused if NULL. |
* @param pzone Preferred zone |
* |
* @return Allocated frame. |
* |
*/ |
pfn_t frame_alloc_generic(__u8 order, int flags, int *status, int *pzone) |
{ |
ipl_t ipl; |
int freed; |
pfn_t v; |
zone_t *zone; |
loop: |
ipl = interrupts_disable(); |
/* |
* First, find suitable frame zone. |
*/ |
zone = find_free_zone_lock(order, pzone); |
/* If no memory, reclaim some slab memory, |
if it does not help, reclaim all */ |
if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
freed = slab_reclaim(0); |
if (freed) |
zone = find_free_zone_lock(order, pzone); |
if (!zone) { |
freed = slab_reclaim(SLAB_RECLAIM_ALL); |
if (freed) |
zone = find_free_zone_lock(order, pzone); |
} |
} |
if (!zone) { |
if (flags & FRAME_PANIC) |
panic("Can't allocate frame.\n"); |
/* |
* TODO: Sleep until frames are available again. |
*/ |
interrupts_restore(ipl); |
if (flags & FRAME_ATOMIC) { |
ASSERT(status != NULL); |
if (status) |
*status = FRAME_NO_MEMORY; |
return NULL; |
} |
panic("Sleep not implemented.\n"); |
goto loop; |
} |
v = zone_frame_alloc(zone, order); |
v += zone->base; |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
if (status) |
*status = FRAME_OK; |
return v; |
} |
/** Free a frame. |
* |
* Find respective frame structure for supplied PFN. |
* Decrement frame reference count. |
* If it drops to zero, move the frame structure to free list. |
* |
* @param pfn Frame number of the frame to be freed. |
*/ |
void frame_free(pfn_t pfn) |
{ |
ipl_t ipl; |
zone_t *zone; |
ipl = interrupts_disable(); |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
ASSERT(zone); |
zone_frame_free(zone, pfn-zone->base); |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
} |
/** Add reference to frame. |
* |
* Find respective frame structure for supplied PFN and |
* increment frame reference count. |
* |
* @param pfn Frame number of the frame to be freed. |
*/ |
void frame_reference_add(pfn_t pfn) |
{ |
ipl_t ipl; |
zone_t *zone; |
frame_t *frame; |
ipl = interrupts_disable(); |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
ASSERT(zone); |
frame = &zone->frames[pfn-zone->base]; |
frame->refcount++; |
spinlock_unlock(&zone->lock); |
interrupts_restore(ipl); |
} |
/** Mark given range unavailable in frame zones */ |
void frame_mark_unavailable(pfn_t start, count_t count) |
{ |
int i; |
zone_t *zone; |
int prefzone = 0; |
for (i=0; i < count; i++) { |
zone = find_zone_and_lock(start+i,&prefzone); |
if (!zone) /* PFN not found */ |
continue; |
zone_mark_unavailable(zone, start+i-zone->base); |
spinlock_unlock(&zone->lock); |
} |
} |
/** Initialize physical memory management |
* |
* Initialize physical memory managemnt. |
*/ |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
zones.count = 0; |
spinlock_initialize(&zones.lock,"zones_glob_lock"); |
} |
/* Tell the architecture to create some memory */ |
frame_arch_init(); |
if (config.cpu_active == 1) { |
pfn_t firstframe = ADDR2PFN(KA2PA(config.base)); |
pfn_t lastframe = ADDR2PFN(KA2PA(config.base+config.kernel_size)); |
frame_mark_unavailable(firstframe,lastframe-firstframe+1); |
count_t i; |
for (i = 0; i < init.cnt; i++) |
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size)); |
/* Black list first frame, as allocating NULL would |
* fail on some places */ |
frame_mark_unavailable(0, 1); |
} |
} |
/** Prints list of zones |
* |
*/ |
void zone_print_list(void) { |
zone_t *zone = NULL; |
int i; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
printf("# Base address\tFree Frames\tBusy Frames\n"); |
printf(" ------------\t-----------\t-----------\n"); |
for (i = 0; i < zones.count; i++) { |
zone = zones.info[i]; |
spinlock_lock(&zone->lock); |
printf("%d: %.*p \t%10zd\t%10zd\n", i, sizeof(__address) * 2, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** Prints zone details. |
* |
* @param num Zone base address or zone number. |
*/ |
void zone_print_one(int num) { |
zone_t *zone = NULL; |
ipl_t ipl; |
int i; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
for (i = 0; i < zones.count; i++) { |
if (i == num || PFN2ADDR(zones.info[i]->base) == num) { |
zone = zones.info[i]; |
break; |
} |
} |
if (!zone) { |
printf("Zone not found.\n"); |
goto out; |
} |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n"); |
printf("Zone base address: %#.*p\n", sizeof(__address) * 2, PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
spinlock_unlock(&zone->lock); |
out: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/slab.c |
---|
0,0 → 1,912 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Slab allocator. |
* |
* The slab allocator is closely modelled after OpenSolaris slab allocator. |
* @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
* |
* with the following exceptions: |
* @li empty slabs are deallocated immediately |
* (in Linux they are kept in linked list, in Solaris ???) |
* @li empty magazines are deallocated when not needed |
* (in Solaris they are held in linked list in slab cache) |
* |
* Following features are not currently supported but would be easy to do: |
* @li cache coloring |
* @li dynamic magazine growing (different magazine sizes are already |
* supported, but we would need to adjust allocation strategy) |
* |
* The slab allocator supports per-CPU caches ('magazines') to facilitate |
* good SMP scaling. |
* |
* When a new object is being allocated, it is first checked, if it is |
* available in a CPU-bound magazine. If it is not found there, it is |
* allocated from a CPU-shared slab - if a partially full one is found, |
* it is used, otherwise a new one is allocated. |
* |
* When an object is being deallocated, it is put to a CPU-bound magazine. |
* If there is no such magazine, a new one is allocated (if this fails, |
* the object is deallocated into slab). If the magazine is full, it is |
* put into cpu-shared list of magazines and a new one is allocated. |
* |
* The CPU-bound magazine is actually a pair of magazines in order to avoid |
* thrashing when somebody is allocating/deallocating 1 item at the magazine |
* size boundary. LIFO order is enforced, which should avoid fragmentation |
* as much as possible. |
* |
* Every cache contains list of full slabs and list of partially full slabs. |
* Empty slabs are immediately freed (thrashing will be avoided because |
* of magazines). |
* |
* The slab information structure is kept inside the data area, if possible. |
* The cache can be marked that it should not use magazines. This is used |
* only for slab related caches to avoid deadlocks and infinite recursion |
* (the slab allocator uses itself for allocating all it's control structures). |
* |
* The slab allocator allocates a lot of space and does not free it. When |
* the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
* It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
* releases slabs from cpu-shared magazine-list, until at least 1 slab |
* is deallocated in each cache (this algorithm should probably change). |
* The brutal reclaim removes all cached objects, even from CPU-bound |
* magazines. |
* |
* TODO:@n |
* For better CPU-scaling the magazine allocation strategy should |
* be extended. Currently, if the cache does not have magazine, it asks |
* for non-cpu cached magazine cache to provide one. It might be feasible |
* to add cpu-cached magazine cache (which would allocate it's magazines |
* from non-cpu-cached mag. cache). This would provide a nice per-cpu |
* buffer. The other possibility is to use the per-cache |
* 'empty-magazine-list', which decreases competing for 1 per-system |
* magazine cache. |
* |
* @li it might be good to add granularity of locks even to slab level, |
* we could then try_spinlock over all partial slabs and thus improve |
* scalability even on slab level |
*/ |
#include <synch/spinlock.h> |
#include <mm/slab.h> |
#include <adt/list.h> |
#include <memstr.h> |
#include <align.h> |
#include <mm/frame.h> |
#include <config.h> |
#include <print.h> |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
#include <bitops.h> |
SPINLOCK_INITIALIZE(slab_cache_lock); |
static LIST_INITIALIZE(slab_cache_list); |
/** Magazine cache */ |
static slab_cache_t mag_cache; |
/** Cache for cache descriptors */ |
static slab_cache_t slab_cache_cache; |
/** Cache for external slab descriptors |
* This time we want per-cpu cache, so do not make it static |
* - using slab for internal slab structures will not deadlock, |
* as all slab structures are 'small' - control structures of |
* their caches do not require further allocation |
*/ |
static slab_cache_t *slab_extern_cache; |
/** Caches for malloc */ |
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
char *malloc_names[] = { |
"malloc-16","malloc-32","malloc-64","malloc-128", |
"malloc-256","malloc-512","malloc-1K","malloc-2K", |
"malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
"malloc-64K","malloc-128K","malloc-256K" |
}; |
/** Slab descriptor */ |
typedef struct { |
slab_cache_t *cache; /**< Pointer to parent cache */ |
link_t link; /* List of full/partial slabs */ |
void *start; /**< Start address of first available item */ |
count_t available; /**< Count of available items in this slab */ |
index_t nextavail; /**< The index of next available item */ |
}slab_t; |
#ifdef CONFIG_DEBUG |
static int _slab_initialized = 0; |
#endif |
/**************************************/ |
/* Slab allocation functions */ |
/** |
* Allocate frames for slab space and initialize |
* |
*/ |
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
{ |
void *data; |
slab_t *slab; |
size_t fsize; |
int i; |
int status; |
pfn_t pfn; |
int zone=0; |
pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
data = (void *) PA2KA(PFN2ADDR(pfn)); |
if (status != FRAME_OK) { |
return NULL; |
} |
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
slab = slab_alloc(slab_extern_cache, flags); |
if (!slab) { |
frame_free(ADDR2PFN(KA2PA(data))); |
return NULL; |
} |
} else { |
fsize = (PAGE_SIZE << cache->order); |
slab = data + fsize - sizeof(*slab); |
} |
/* Fill in slab structures */ |
for (i=0; i < (1 << cache->order); i++) |
frame_set_parent(pfn+i, slab, zone); |
slab->start = data; |
slab->available = cache->objects; |
slab->nextavail = 0; |
slab->cache = cache; |
for (i=0; i<cache->objects;i++) |
*((int *) (slab->start + i*cache->size)) = i+1; |
atomic_inc(&cache->allocated_slabs); |
return slab; |
} |
/** |
* Deallocate space associated with slab |
* |
* @return number of freed frames |
*/ |
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
{ |
frame_free(ADDR2PFN(KA2PA(slab->start))); |
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
slab_free(slab_extern_cache, slab); |
atomic_dec(&cache->allocated_slabs); |
return 1 << cache->order; |
} |
/** Map object to slab structure */ |
static slab_t * obj2slab(void *obj) |
{ |
return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
} |
/**************************************/ |
/* Slab functions */ |
/** |
* Return object to slab and call a destructor |
* |
* @param slab If the caller knows directly slab of the object, otherwise NULL |
* |
* @return Number of freed pages |
*/ |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
slab_t *slab) |
{ |
int freed = 0; |
if (!slab) |
slab = obj2slab(obj); |
ASSERT(slab->cache == cache); |
if (cache->destructor) |
freed = cache->destructor(obj); |
spinlock_lock(&cache->slablock); |
ASSERT(slab->available < cache->objects); |
*((int *)obj) = slab->nextavail; |
slab->nextavail = (obj - slab->start)/cache->size; |
slab->available++; |
/* Move it to correct list */ |
if (slab->available == cache->objects) { |
/* Free associated memory */ |
list_remove(&slab->link); |
spinlock_unlock(&cache->slablock); |
return freed + slab_space_free(cache, slab); |
} else if (slab->available == 1) { |
/* It was in full, move to partial */ |
list_remove(&slab->link); |
list_prepend(&slab->link, &cache->partial_slabs); |
} |
spinlock_unlock(&cache->slablock); |
return freed; |
} |
/** |
* Take new object from slab or create new if needed |
* |
* @return Object address or null |
*/ |
static void * slab_obj_create(slab_cache_t *cache, int flags) |
{ |
slab_t *slab; |
void *obj; |
spinlock_lock(&cache->slablock); |
if (list_empty(&cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the slab control structures |
* are small and do not need to allocate with anything |
* other than frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
spinlock_unlock(&cache->slablock); |
slab = slab_space_alloc(cache, flags); |
if (!slab) |
return NULL; |
spinlock_lock(&cache->slablock); |
} else { |
slab = list_get_instance(cache->partial_slabs.next, |
slab_t, |
link); |
list_remove(&slab->link); |
} |
obj = slab->start + slab->nextavail * cache->size; |
slab->nextavail = *((int *)obj); |
slab->available--; |
if (! slab->available) |
list_prepend(&slab->link, &cache->full_slabs); |
else |
list_prepend(&slab->link, &cache->partial_slabs); |
spinlock_unlock(&cache->slablock); |
if (cache->constructor && cache->constructor(obj, flags)) { |
/* Bad, bad, construction failed */ |
slab_obj_destroy(cache, obj, slab); |
return NULL; |
} |
return obj; |
} |
/**************************************/ |
/* CPU-Cache slab functions */ |
/** |
* Finds a full magazine in cache, takes it from list |
* and returns it |
* |
* @param first If true, return first, else last mag |
*/ |
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
int first) |
{ |
slab_magazine_t *mag = NULL; |
link_t *cur; |
spinlock_lock(&cache->maglock); |
if (!list_empty(&cache->magazines)) { |
if (first) |
cur = cache->magazines.next; |
else |
cur = cache->magazines.prev; |
mag = list_get_instance(cur, slab_magazine_t, link); |
list_remove(&mag->link); |
atomic_dec(&cache->magazine_counter); |
} |
spinlock_unlock(&cache->maglock); |
return mag; |
} |
/** Prepend magazine to magazine list in cache */ |
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
{ |
spinlock_lock(&cache->maglock); |
list_prepend(&mag->link, &cache->magazines); |
atomic_inc(&cache->magazine_counter); |
spinlock_unlock(&cache->maglock); |
} |
/** |
* Free all objects in magazine and free memory associated with magazine |
* |
* @return Number of freed pages |
*/ |
static count_t magazine_destroy(slab_cache_t *cache, |
slab_magazine_t *mag) |
{ |
int i; |
count_t frames = 0; |
for (i=0;i < mag->busy; i++) { |
frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
atomic_dec(&cache->cached_objs); |
} |
slab_free(&mag_cache, mag); |
return frames; |
} |
/** |
* Find full magazine, set it as current and return it |
* |
* Assume cpu_magazine lock is held |
*/ |
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag, *lastmag, *newmag; |
cmag = cache->mag_cache[CPU->id].current; |
lastmag = cache->mag_cache[CPU->id].last; |
if (cmag) { /* First try local CPU magazines */ |
if (cmag->busy) |
return cmag; |
if (lastmag && lastmag->busy) { |
cache->mag_cache[CPU->id].current = lastmag; |
cache->mag_cache[CPU->id].last = cmag; |
return lastmag; |
} |
} |
/* Local magazines are empty, import one from magazine list */ |
newmag = get_mag_from_cache(cache, 1); |
if (!newmag) |
return NULL; |
if (lastmag) |
magazine_destroy(cache, lastmag); |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = newmag; |
return newmag; |
} |
/** |
* Try to find object in CPU-cache magazines |
* |
* @return Pointer to object or NULL if not available |
*/ |
static void * magazine_obj_get(slab_cache_t *cache) |
{ |
slab_magazine_t *mag; |
void *obj; |
if (!CPU) |
return NULL; |
spinlock_lock(&cache->mag_cache[CPU->id].lock); |
mag = get_full_current_mag(cache); |
if (!mag) { |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
return NULL; |
} |
obj = mag->objs[--mag->busy]; |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
atomic_dec(&cache->cached_objs); |
return obj; |
} |
/** |
* Assure that the current magazine is empty, return pointer to it, or NULL if |
* no empty magazine is available and cannot be allocated |
* |
* Assume mag_cache[CPU->id].lock is held |
* |
* We have 2 magazines bound to processor. |
* First try the current. |
* If full, try the last. |
* If full, put to magazines list. |
* allocate new, exchange last & current |
* |
*/ |
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag,*lastmag,*newmag; |
cmag = cache->mag_cache[CPU->id].current; |
lastmag = cache->mag_cache[CPU->id].last; |
if (cmag) { |
if (cmag->busy < cmag->size) |
return cmag; |
if (lastmag && lastmag->busy < lastmag->size) { |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = lastmag; |
return lastmag; |
} |
} |
/* current | last are full | nonexistent, allocate new */ |
/* We do not want to sleep just because of caching */ |
/* Especially we do not want reclaiming to start, as |
* this would deadlock */ |
newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
if (!newmag) |
return NULL; |
newmag->size = SLAB_MAG_SIZE; |
newmag->busy = 0; |
/* Flush last to magazine list */ |
if (lastmag) |
put_mag_to_cache(cache, lastmag); |
/* Move current as last, save new as current */ |
cache->mag_cache[CPU->id].last = cmag; |
cache->mag_cache[CPU->id].current = newmag; |
return newmag; |
} |
/** |
* Put object into CPU-cache magazine |
* |
* @return 0 - success, -1 - could not get memory |
*/ |
static int magazine_obj_put(slab_cache_t *cache, void *obj) |
{ |
slab_magazine_t *mag; |
if (!CPU) |
return -1; |
spinlock_lock(&cache->mag_cache[CPU->id].lock); |
mag = make_empty_current_mag(cache); |
if (!mag) { |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
return -1; |
} |
mag->objs[mag->busy++] = obj; |
spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
atomic_inc(&cache->cached_objs); |
return 0; |
} |
/**************************************/ |
/* Slab cache functions */ |
/** Return number of objects that fit in certain cache size */ |
static int comp_objects(slab_cache_t *cache) |
{ |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
else |
return (PAGE_SIZE << cache->order) / cache->size; |
} |
/** Return wasted space in slab */ |
static int badness(slab_cache_t *cache) |
{ |
int objects; |
int ssize; |
objects = comp_objects(cache); |
ssize = PAGE_SIZE << cache->order; |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
ssize -= sizeof(slab_t); |
return ssize - objects*cache->size; |
} |
/** |
* Initialize mag_cache structure in slab cache |
*/ |
static void make_magcache(slab_cache_t *cache) |
{ |
int i; |
ASSERT(_slab_initialized >= 2); |
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
for (i=0; i < config.cpu_count; i++) { |
memsetb((__address)&cache->mag_cache[i], |
sizeof(cache->mag_cache[i]), 0); |
spinlock_initialize(&cache->mag_cache[i].lock, |
"slab_maglock_cpu"); |
} |
} |
/** Initialize allocated memory as a slab cache */ |
static void |
_slab_cache_create(slab_cache_t *cache, |
char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
{ |
int pages; |
ipl_t ipl; |
memsetb((__address)cache, sizeof(*cache), 0); |
cache->name = name; |
if (align < sizeof(__native)) |
align = sizeof(__native); |
size = ALIGN_UP(size, align); |
cache->size = size; |
cache->constructor = constructor; |
cache->destructor = destructor; |
cache->flags = flags; |
list_initialize(&cache->full_slabs); |
list_initialize(&cache->partial_slabs); |
list_initialize(&cache->magazines); |
spinlock_initialize(&cache->slablock, "slab_lock"); |
spinlock_initialize(&cache->maglock, "slab_maglock"); |
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
make_magcache(cache); |
/* Compute slab sizes, object counts in slabs etc. */ |
if (cache->size < SLAB_INSIDE_SIZE) |
cache->flags |= SLAB_CACHE_SLINSIDE; |
/* Minimum slab order */ |
pages = SIZE2FRAMES(cache->size); |
/* We need the 2^order >= pages */ |
if (pages == 1) |
cache->order = 0; |
else |
cache->order = fnzb(pages-1)+1; |
while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
cache->order += 1; |
} |
cache->objects = comp_objects(cache); |
/* If info fits in, put it inside */ |
if (badness(cache) > sizeof(slab_t)) |
cache->flags |= SLAB_CACHE_SLINSIDE; |
/* Add cache to cache list */ |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
list_append(&cache->link, &slab_cache_list); |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
} |
/** Create slab cache */ |
slab_cache_t * slab_cache_create(char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
{ |
slab_cache_t *cache; |
cache = slab_alloc(&slab_cache_cache, 0); |
_slab_cache_create(cache, name, size, align, constructor, destructor, |
flags); |
return cache; |
} |
/** |
* Reclaim space occupied by objects that are already free |
* |
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
* @return Number of freed pages |
*/ |
static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
{ |
int i; |
slab_magazine_t *mag; |
count_t frames = 0; |
int magcount; |
if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
return 0; /* Nothing to do */ |
/* We count up to original magazine count to avoid |
* endless loop |
*/ |
magcount = atomic_get(&cache->magazine_counter); |
while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
frames += magazine_destroy(cache,mag); |
if (!(flags & SLAB_RECLAIM_ALL) && frames) |
break; |
} |
if (flags & SLAB_RECLAIM_ALL) { |
/* Free cpu-bound magazines */ |
/* Destroy CPU magazines */ |
for (i=0; i<config.cpu_count; i++) { |
spinlock_lock(&cache->mag_cache[i].lock); |
mag = cache->mag_cache[i].current; |
if (mag) |
frames += magazine_destroy(cache, mag); |
cache->mag_cache[i].current = NULL; |
mag = cache->mag_cache[i].last; |
if (mag) |
frames += magazine_destroy(cache, mag); |
cache->mag_cache[i].last = NULL; |
spinlock_unlock(&cache->mag_cache[i].lock); |
} |
} |
return frames; |
} |
/** Check that there are no slabs and remove cache from system */ |
void slab_cache_destroy(slab_cache_t *cache) |
{ |
ipl_t ipl; |
/* First remove cache from link, so that we don't need |
* to disable interrupts later |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
list_remove(&cache->link); |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
/* Do not lock anything, we assume the software is correct and |
* does not touch the cache when it decides to destroy it */ |
/* Destroy all magazines */ |
_slab_reclaim(cache, SLAB_RECLAIM_ALL); |
/* All slabs must be empty */ |
if (!list_empty(&cache->full_slabs) \ |
|| !list_empty(&cache->partial_slabs)) |
panic("Destroying cache that is not empty."); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
free(cache->mag_cache); |
slab_free(&slab_cache_cache, cache); |
} |
/** Allocate new object from cache - if no flags given, always returns |
memory */ |
void * slab_alloc(slab_cache_t *cache, int flags) |
{ |
ipl_t ipl; |
void *result = NULL; |
/* Disable interrupts to avoid deadlocks with interrupt handlers */ |
ipl = interrupts_disable(); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
result = magazine_obj_get(cache); |
} |
if (!result) |
result = slab_obj_create(cache, flags); |
interrupts_restore(ipl); |
if (result) |
atomic_inc(&cache->allocated_objs); |
return result; |
} |
/** Return object to cache, use slab if known */ |
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
|| magazine_obj_put(cache, obj)) { |
slab_obj_destroy(cache, obj, slab); |
} |
interrupts_restore(ipl); |
atomic_dec(&cache->allocated_objs); |
} |
/** Return slab object to cache */ |
void slab_free(slab_cache_t *cache, void *obj) |
{ |
_slab_free(cache,obj,NULL); |
} |
/* Go through all caches and reclaim what is possible */ |
count_t slab_reclaim(int flags) |
{ |
slab_cache_t *cache; |
link_t *cur; |
count_t frames = 0; |
spinlock_lock(&slab_cache_lock); |
/* TODO: Add assert, that interrupts are disabled, otherwise |
* memory allocation from interrupts can deadlock. |
*/ |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
frames += _slab_reclaim(cache, flags); |
} |
spinlock_unlock(&slab_cache_lock); |
return frames; |
} |
/* Print list of slabs */ |
void slab_print_list(void) |
{ |
slab_cache_t *cache; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
(1 << cache->order), cache->objects, |
atomic_get(&cache->allocated_slabs), |
atomic_get(&cache->cached_objs), |
atomic_get(&cache->allocated_objs), |
cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
} |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
} |
void slab_cache_init(void) |
{ |
int i, size; |
/* Initialize magazine cache */ |
_slab_cache_create(&mag_cache, |
"slab_magazine", |
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
sizeof(__address), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize slab_cache cache */ |
_slab_cache_create(&slab_cache_cache, |
"slab_cache", |
sizeof(slab_cache_cache), |
sizeof(__address), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize external slab cache */ |
slab_extern_cache = slab_cache_create("slab_extern", |
sizeof(slab_t), |
0, NULL, NULL, |
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
/* Initialize structures for malloc */ |
for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
i++, size <<= 1) { |
malloc_caches[i] = slab_cache_create(malloc_names[i], |
size, 0, |
NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
} |
#ifdef CONFIG_DEBUG |
_slab_initialized = 1; |
#endif |
} |
/** Enable cpu_cache |
* |
* Kernel calls this function, when it knows the real number of |
* processors. |
* Allocate slab for cpucache and enable it on all existing |
* slabs that are SLAB_CACHE_MAGDEFERRED |
*/ |
void slab_enable_cpucache(void) |
{ |
link_t *cur; |
slab_cache_t *s; |
#ifdef CONFIG_DEBUG |
_slab_initialized = 2; |
#endif |
spinlock_lock(&slab_cache_lock); |
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
s = list_get_instance(cur, slab_cache_t, link); |
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
continue; |
make_magcache(s); |
s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
} |
spinlock_unlock(&slab_cache_lock); |
} |
/**************************************/ |
/* kalloc/kfree functions */ |
void * malloc(unsigned int size, int flags) |
{ |
int idx; |
ASSERT(_slab_initialized); |
ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
if (size < (1 << SLAB_MIN_MALLOC_W)) |
size = (1 << SLAB_MIN_MALLOC_W); |
idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
return slab_alloc(malloc_caches[idx], flags); |
} |
void free(void *obj) |
{ |
slab_t *slab; |
if (!obj) return; |
slab = obj2slab(obj); |
_slab_free(slab->cache, obj, slab); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/tlb.c |
---|
0,0 → 1,190 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Generic TLB shootdown algorithm. |
* |
* The algorithm implemented here is based on the CMU TLB shootdown |
* algorithm and is further simplified (e.g. all CPUs receive all TLB |
* shootdown messages). |
*/ |
#include <mm/tlb.h> |
#include <mm/asid.h> |
#include <arch/mm/tlb.h> |
#include <smp/ipi.h> |
#include <synch/spinlock.h> |
#include <typedefs.h> |
#include <atomic.h> |
#include <arch/interrupt.h> |
#include <config.h> |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
/** |
* This lock is used for synchronisation between sender and |
* recipients of TLB shootdown message. It must be acquired |
* before CPU structure lock. |
*/ |
SPINLOCK_INITIALIZE(tlblock); |
void tlb_init(void) |
{ |
tlb_arch_init(); |
} |
#ifdef CONFIG_SMP |
/** Send TLB shootdown message. |
* |
* This function attempts to deliver TLB shootdown message |
* to all other processors. |
* |
* This function must be called with interrupts disabled. |
* |
* @param type Type describing scope of shootdown. |
* @param asid Address space, if required by type. |
* @param page Virtual page address, if required by type. |
* @param count Number of pages, if required by type. |
*/ |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, __address page, count_t count) |
{ |
int i; |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
for (i = 0; i < config.cpu_count; i++) { |
cpu_t *cpu; |
if (i == CPU->id) |
continue; |
cpu = &cpus[i]; |
spinlock_lock(&cpu->lock); |
if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) { |
/* |
* The message queue is full. |
* Erase the queue and store one TLB_INVL_ALL message. |
*/ |
cpu->tlb_messages_count = 1; |
cpu->tlb_messages[0].type = TLB_INVL_ALL; |
cpu->tlb_messages[0].asid = ASID_INVALID; |
cpu->tlb_messages[0].page = 0; |
cpu->tlb_messages[0].count = 0; |
} else { |
/* |
* Enqueue the message. |
*/ |
cpu->tlb_messages[cpu->tlb_messages_count].type = type; |
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid; |
cpu->tlb_messages[cpu->tlb_messages_count].page = page; |
cpu->tlb_messages[cpu->tlb_messages_count].count = count; |
cpu->tlb_messages_count++; |
} |
spinlock_unlock(&cpu->lock); |
} |
tlb_shootdown_ipi_send(); |
busy_wait: |
for (i = 0; i < config.cpu_count; i++) |
if (cpus[i].tlb_active) |
goto busy_wait; |
} |
/** Finish TLB shootdown sequence. */ |
void tlb_shootdown_finalize(void) |
{ |
spinlock_unlock(&tlblock); |
CPU->tlb_active = 1; |
} |
void tlb_shootdown_ipi_send(void) |
{ |
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI); |
} |
/** Receive TLB shootdown message. */ |
void tlb_shootdown_ipi_recv(void) |
{ |
tlb_invalidate_type_t type; |
asid_t asid; |
__address page; |
count_t count; |
int i; |
ASSERT(CPU); |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
spinlock_unlock(&tlblock); |
spinlock_lock(&CPU->lock); |
ASSERT(CPU->tlb_messages_count <= TLB_MESSAGE_QUEUE_LEN); |
for (i = 0; i < CPU->tlb_messages_count; CPU->tlb_messages_count--) { |
type = CPU->tlb_messages[i].type; |
asid = CPU->tlb_messages[i].asid; |
page = CPU->tlb_messages[i].page; |
count = CPU->tlb_messages[i].count; |
switch (type) { |
case TLB_INVL_ALL: |
tlb_invalidate_all(); |
break; |
case TLB_INVL_ASID: |
tlb_invalidate_asid(asid); |
break; |
case TLB_INVL_PAGES: |
ASSERT(count); |
tlb_invalidate_pages(asid, page, count); |
break; |
default: |
panic("unknown type (%d)\n", type); |
break; |
} |
if (type == TLB_INVL_ALL) |
break; |
} |
spinlock_unlock(&CPU->lock); |
CPU->tlb_active = 1; |
} |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/backend_anon.c |
---|
0,0 → 1,210 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Backend for anonymous memory address space areas. |
* |
*/ |
#include <mm/as.h> |
#include <mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <synch/mutex.h> |
#include <adt/list.h> |
#include <adt/btree.h> |
#include <errno.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <align.h> |
#include <arch.h> |
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static void anon_frame_free(as_area_t *area, __address page, __address frame); |
static void anon_share(as_area_t *area); |
mem_backend_t anon_backend = { |
.page_fault = anon_page_fault, |
.frame_free = anon_frame_free, |
.share = anon_share |
}; |
/** Service a page fault in the anonymous memory address space area. |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access) |
{ |
__address frame; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
if (area->sh_info) { |
btree_node_t *leaf; |
/* |
* The area is shared, chances are that the mapping can be found |
* in the pagemap of the address space area share info structure. |
* In the case that the pagemap does not contain the respective |
* mapping, a new frame is allocated and the mapping is created. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (__address) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
bool allocate = true; |
int i; |
/* |
* Zero can be returned as a valid frame address. |
* Just a small workaround. |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { |
allocate = false; |
break; |
} |
} |
if (allocate) { |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
/* |
* Insert the address of the newly allocated frame to the pagemap. |
*/ |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf); |
} |
} |
frame_reference_add(ADDR2PFN(frame)); |
mutex_unlock(&area->sh_info->lock); |
} else { |
/* |
* In general, there can be several reasons that |
* can have caused this fault. |
* |
* - non-existent mapping: the area is an anonymous |
* area (e.g. heap or stack) and so far has not been |
* allocated a frame for the faulting page |
* |
* - non-present mapping: another possibility, |
* currently not implemented, would be frame |
* reuse; when this becomes a possibility, |
* do not forget to distinguish between |
* the different causes |
*/ |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
} |
/* |
* Map 'page' to 'frame'. |
* Note that TLB shootdown is not attempted as only new information is being |
* inserted into page tables. |
*/ |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
return AS_PF_OK; |
} |
/** Free a frame that is backed by the anonymous memory backend. |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Ignored. |
* @param page Ignored. |
* @param frame Frame to be released. |
*/ |
void anon_frame_free(as_area_t *area, __address page, __address frame) |
{ |
frame_free(ADDR2PFN(frame)); |
} |
/** Share the anonymous address space area. |
* |
* Sharing of anonymous area is done by duplicating its entire mapping |
* to the pagemap. Page faults will primarily search for frames there. |
* |
* The address space and address space area must be already locked. |
* |
* @param area Address space area to be shared. |
*/ |
void anon_share(as_area_t *area) |
{ |
link_t *cur; |
/* |
* Copy used portions of the area to sh_info's page map. |
*/ |
mutex_lock(&area->sh_info->lock); |
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |
for (j = 0; j < count; j++) { |
pte_t *pte; |
page_table_lock(area->as, false); |
pte = page_mapping_find(area->as, base + j*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base, |
(void *) PTE_GET_FRAME(pte), NULL); |
page_table_unlock(area->as, false); |
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte))); |
} |
} |
} |
mutex_unlock(&area->sh_info->lock); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/backend_phys.c |
---|
0,0 → 1,96 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Backend for address space areas backed by continuous physical memory. |
*/ |
#include <debug.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
#include <align.h> |
static int phys_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static void phys_share(as_area_t *area); |
mem_backend_t phys_backend = { |
.page_fault = phys_page_fault, |
.frame_free = NULL, |
.share = phys_share |
}; |
/** Service a page fault in the address space area backed by physical memory. |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int phys_page_fault(as_area_t *area, __address addr, pf_access_t access) |
{ |
__address base = area->backend_data.base; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE); |
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
return AS_PF_OK; |
} |
/** Share address space area backed by physical memory. |
* |
* Do actually nothing as sharing of address space areas |
* that are backed up by physical memory is very easy. |
* Note that the function must be defined so that |
* as_area_share() will succeed. |
*/ |
void phys_share(as_area_t *area) |
{ |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/page.c |
---|
0,0 → 1,144 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Virtual Address Translation subsystem. |
* |
* This file contains code for creating, destroying and searching |
* mappings between virtual addresses and physical addresses. |
* Functions here are mere wrappers that call the real implementation. |
* They however, define the single interface. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <arch/mm/asid.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <memstr.h> |
#include <debug.h> |
#include <arch.h> |
/** Virtual operations for page subsystem. */ |
page_mapping_operations_t *page_mapping_operations = NULL; |
void page_init(void) |
{ |
page_arch_init(); |
} |
/** Map memory structure |
* |
* Identity-map memory structure |
* considering possible crossings |
* of page boundaries. |
* |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(__address s, size_t size) |
{ |
int i, cnt, length; |
length = size + (s - (s & ~(PAGE_SIZE - 1))); |
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE); |
} |
/** Insert mapping of page to frame. |
* |
* Map virtual address page to physical address frame |
* using flags. Allocate and setup any missing page tables. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be mapped. |
* @param frame Physical address of memory frame to which the mapping is done. |
* @param flags Flags to be used for mapping. |
*/ |
void page_mapping_insert(as_t *as, __address page, __address frame, int flags) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_insert); |
page_mapping_operations->mapping_insert(as, page, frame, flags); |
} |
/** Remove mapping of page. |
* |
* Remove any mapping of page within address space as. |
* TLB shootdown should follow in order to make effects of |
* this call visible. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
*/ |
void page_mapping_remove(as_t *as, __address page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_remove); |
page_mapping_operations->mapping_remove(as, page); |
} |
/** Find mapping for virtual page |
* |
* Find mapping for virtual page. |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual page. |
* |
* @return NULL if there is no such mapping; requested mapping otherwise. |
*/ |
pte_t *page_mapping_find(as_t *as, __address page) |
{ |
ASSERT(page_mapping_operations); |
ASSERT(page_mapping_operations->mapping_find); |
return page_mapping_operations->mapping_find(as, page); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/mm/backend_elf.c |
---|
0,0 → 1,310 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
* @{ |
*/ |
/** |
* @file |
* @brief Backend for address space areas backed by an ELF image. |
*/ |
#include <elf.h> |
#include <debug.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <align.h> |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access); |
static void elf_frame_free(as_area_t *area, __address page, __address frame); |
static void elf_share(as_area_t *area); |
mem_backend_t elf_backend = { |
.page_fault = elf_page_fault, |
.frame_free = elf_frame_free, |
.share = elf_share |
}; |
/** Service a page fault in the ELF backend address space area. |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). |
*/ |
int elf_page_fault(as_area_t *area, __address addr, pf_access_t access) |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
btree_node_t *leaf; |
__address base, frame; |
index_t i; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); |
i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
base = (__address) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (area->sh_info) { |
bool found = false; |
/* |
* The address space area is shared. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (__address) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
int i; |
/* |
* Workaround for valid NULL address. |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { |
found = true; |
break; |
} |
} |
} |
if (frame || found) { |
frame_reference_add(ADDR2PFN(frame)); |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
mutex_unlock(&area->sh_info->lock); |
return AS_PF_OK; |
} |
} |
/* |
* The area is either not shared or the pagemap does not contain the mapping. |
*/ |
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) { |
/* |
* Initialized portion of the segment. The memory is backed |
* directly by the content of the ELF image. Pages are |
* only copied if the segment is writable so that there |
* can be more instantions of the same memory ELF image |
* used at a time. Note that this could be later done |
* as COW. |
*/ |
if (entry->p_flags & PF_W) { |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
frame = KA2PA(base + i*FRAME_SIZE); |
} |
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
/* |
* This is the uninitialized portion of the segment. |
* It is not physically present in the ELF image. |
* To resolve the situation, a frame must be allocated |
* and cleared. |
*/ |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
size_t size; |
/* |
* The mixed case. |
* The lower part is backed by the ELF image and |
* the upper part is anonymous memory. |
*/ |
size = entry->p_filesz - (i<<PAGE_WIDTH); |
frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} |
if (area->sh_info) |
mutex_unlock(&area->sh_info->lock); |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
panic("Could not insert used space.\n"); |
return AS_PF_OK; |
} |
/** Free a frame that is backed by the ELF backend. |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE. |
* @param frame Frame to be released. |
* |
*/ |
void elf_frame_free(as_area_t *area, __address page, __address frame) |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
__address base; |
index_t i; |
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); |
i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
base = (__address) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
if (entry->p_flags & PF_W) { |
/* |
* Free the frame with the copy of writable segment data. |
*/ |
frame_free(ADDR2PFN(frame)); |
} |
} else { |
/* |
* The frame is either anonymous memory or the mixed case (i.e. lower |
* part is backed by the ELF image and the upper is anonymous). |
* In any case, a frame needs to be freed. |
*/ |
frame_free(ADDR2PFN(frame)); |
} |
} |
/** Share ELF image backed address space area. |
* |
* If the area is writable, then all mapped pages are duplicated in the pagemap. |
* Otherwise only portions of the area that are not backed by the ELF image |
* are put into the pagemap. |
* |
* The address space and address space area must be locked prior to the call. |
* |
* @param area Address space area. |
*/ |
void elf_share(as_area_t *area) |
{ |
elf_segment_header_t *entry = area->backend_data.segment; |
link_t *cur; |
btree_node_t *leaf, *node; |
__address start_anon = entry->p_vaddr + entry->p_filesz; |
/* |
* Find the node in which to start linear search. |
*/ |
if (area->flags & AS_AREA_WRITE) { |
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link); |
} else { |
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf); |
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf); |
if (!node) |
node = leaf; |
} |
/* |
* Copy used anonymous portions of the area to sh_info's page map. |
*/ |
mutex_lock(&area->sh_info->lock); |
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) { |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
__address base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |
/* |
* Skip read-only areas of used space that are backed |
* by the ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + count*PAGE_SIZE <= start_anon) |
continue; |
for (j = 0; j < count; j++) { |
pte_t *pte; |
/* |
* Skip read-only pages that are backed by the ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + (j + 1)*PAGE_SIZE <= start_anon) |
continue; |
page_table_lock(area->as, false); |
pte = page_mapping_find(area->as, base + j*PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); |
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base, |
(void *) PTE_GET_FRAME(pte), NULL); |
page_table_unlock(area->as, false); |
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte))); |
} |
} |
} |
mutex_unlock(&area->sh_info->lock); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/ipc/ipc.c |
---|
0,0 → 1,535 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericipc |
* @{ |
*/ |
/** @file |
*/ |
/* Lock ordering |
* |
* First the answerbox, then the phone |
*/ |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <ipc/ipc.h> |
#include <errno.h> |
#include <mm/slab.h> |
#include <arch.h> |
#include <proc/task.h> |
#include <memstr.h> |
#include <debug.h> |
#include <print.h> |
#include <proc/thread.h> |
#include <arch/interrupt.h> |
#include <ipc/irq.h> |
/* Open channel that is assigned automatically to new tasks */ |
answerbox_t *ipc_phone_0 = NULL; |
static slab_cache_t *ipc_call_slab; |
/* Initialize new call */ |
static void _ipc_call_init(call_t *call) |
{ |
memsetb((__address)call, sizeof(*call), 0); |
call->callerbox = &TASK->answerbox; |
call->sender = TASK; |
} |
/** Allocate & initialize call structure |
* |
* The call is initialized, so that the reply will be directed |
* to TASK->answerbox |
* |
* @param flags Parameters for slab_alloc (ATOMIC, etc.) |
*/ |
call_t * ipc_call_alloc(int flags) |
{ |
call_t *call; |
call = slab_alloc(ipc_call_slab, flags); |
_ipc_call_init(call); |
return call; |
} |
/** Initialize allocated call */ |
void ipc_call_static_init(call_t *call) |
{ |
_ipc_call_init(call); |
call->flags |= IPC_CALL_STATIC_ALLOC; |
} |
/** Deallocate call stracuture */ |
void ipc_call_free(call_t *call) |
{ |
slab_free(ipc_call_slab, call); |
} |
/** Initialize answerbox structure |
*/ |
void ipc_answerbox_init(answerbox_t *box) |
{ |
spinlock_initialize(&box->lock, "ipc_box_lock"); |
spinlock_initialize(&box->irq_lock, "ipc_box_irqlock"); |
waitq_initialize(&box->wq); |
list_initialize(&box->connected_phones); |
list_initialize(&box->calls); |
list_initialize(&box->dispatched_calls); |
list_initialize(&box->answers); |
list_initialize(&box->irq_notifs); |
box->task = TASK; |
} |
/** Connect phone to answerbox */ |
void ipc_phone_connect(phone_t *phone, answerbox_t *box) |
{ |
spinlock_lock(&phone->lock); |
phone->state = IPC_PHONE_CONNECTED; |
phone->callee = box; |
spinlock_lock(&box->lock); |
list_append(&phone->link, &box->connected_phones); |
spinlock_unlock(&box->lock); |
spinlock_unlock(&phone->lock); |
} |
/** Initialize phone structure and connect phone to answerbox |
*/ |
void ipc_phone_init(phone_t *phone) |
{ |
spinlock_initialize(&phone->lock, "phone_lock"); |
phone->callee = NULL; |
phone->state = IPC_PHONE_FREE; |
atomic_set(&phone->active_calls, 0); |
} |
/** Helper function to facilitate synchronous calls */ |
void ipc_call_sync(phone_t *phone, call_t *request) |
{ |
answerbox_t sync_box; |
ipc_answerbox_init(&sync_box); |
/* We will receive data on special box */ |
request->callerbox = &sync_box; |
ipc_call(phone, request); |
ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
} |
/** Answer message that was not dispatched and is not entered in |
* any queue |
*/ |
static void _ipc_answer_free_call(call_t *call) |
{ |
answerbox_t *callerbox = call->callerbox; |
call->flags |= IPC_CALL_ANSWERED; |
spinlock_lock(&callerbox->lock); |
list_append(&call->link, &callerbox->answers); |
spinlock_unlock(&callerbox->lock); |
waitq_wakeup(&callerbox->wq, 0); |
} |
/** Answer message, that is in callee queue |
* |
* @param box Answerbox that is answering the message |
* @param call Modified request that is being sent back |
*/ |
void ipc_answer(answerbox_t *box, call_t *call) |
{ |
/* Remove from active box */ |
spinlock_lock(&box->lock); |
list_remove(&call->link); |
spinlock_unlock(&box->lock); |
/* Send back answer */ |
_ipc_answer_free_call(call); |
} |
/** Simulate sending back a message |
* |
* Most errors are better handled by forming a normal backward |
* message and sending it as a normal answer. |
*/ |
void ipc_backsend_err(phone_t *phone, call_t *call, __native err) |
{ |
call->data.phone = phone; |
atomic_inc(&phone->active_calls); |
IPC_SET_RETVAL(call->data, err); |
_ipc_answer_free_call(call); |
} |
/* Unsafe unchecking ipc_call */ |
static void _ipc_call(phone_t *phone, answerbox_t *box, call_t *call) |
{ |
if (! (call->flags & IPC_CALL_FORWARDED)) { |
atomic_inc(&phone->active_calls); |
call->data.phone = phone; |
} |
spinlock_lock(&box->lock); |
list_append(&call->link, &box->calls); |
spinlock_unlock(&box->lock); |
waitq_wakeup(&box->wq, 0); |
} |
/** Send a asynchronous request using phone to answerbox |
* |
* @param phone Phone connected to answerbox. |
* @param call Structure representing the call. |
*/ |
int ipc_call(phone_t *phone, call_t *call) |
{ |
answerbox_t *box; |
spinlock_lock(&phone->lock); |
if (phone->state != IPC_PHONE_CONNECTED) { |
spinlock_unlock(&phone->lock); |
if (call->flags & IPC_CALL_FORWARDED) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
_ipc_answer_free_call(call); |
} else { |
if (phone->state == IPC_PHONE_HUNGUP) |
ipc_backsend_err(phone, call, EHANGUP); |
else |
ipc_backsend_err(phone, call, ENOENT); |
} |
return ENOENT; |
} |
box = phone->callee; |
_ipc_call(phone, box, call); |
spinlock_unlock(&phone->lock); |
return 0; |
} |
/** Disconnect phone from answerbox |
* |
* This call leaves the phone in HUNGUP state. The change to 'free' is done |
* lazily later. |
* |
* @param phone Phone to be hung up |
* |
* @return 0 - phone disconnected, -1 - the phone was already disconnected |
*/ |
int ipc_phone_hangup(phone_t *phone) |
{ |
answerbox_t *box; |
call_t *call; |
spinlock_lock(&phone->lock); |
if (phone->state == IPC_PHONE_FREE || phone->state ==IPC_PHONE_HUNGUP \ |
|| phone->state == IPC_PHONE_CONNECTING) { |
spinlock_unlock(&phone->lock); |
return -1; |
} |
box = phone->callee; |
if (phone->state != IPC_PHONE_SLAMMED) { |
/* Remove myself from answerbox */ |
spinlock_lock(&box->lock); |
list_remove(&phone->link); |
spinlock_unlock(&box->lock); |
if (phone->state != IPC_PHONE_SLAMMED) { |
call = ipc_call_alloc(0); |
IPC_SET_METHOD(call->data, IPC_M_PHONE_HUNGUP); |
call->flags |= IPC_CALL_DISCARD_ANSWER; |
_ipc_call(phone, box, call); |
} |
} |
phone->state = IPC_PHONE_HUNGUP; |
spinlock_unlock(&phone->lock); |
return 0; |
} |
/** Forwards call from one answerbox to a new one |
* |
* @param call Call to be redirected. |
* @param newphone Phone to target answerbox. |
* @param oldbox Old answerbox |
* @return 0 on forward ok, error code, if there was error |
* |
* - the return value serves only as an information for the forwarder, |
* the original caller is notified automatically with EFORWARD |
*/ |
int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox) |
{ |
spinlock_lock(&oldbox->lock); |
list_remove(&call->link); |
spinlock_unlock(&oldbox->lock); |
return ipc_call(newphone, call); |
} |
/** Wait for phone call |
* |
* @param box Answerbox expecting the call. |
* @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for |
* decription of its special meaning. |
* @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i |
* for description of its special meaning. |
* @return Recived message address |
* - to distinguish between call and answer, look at call->flags |
*/ |
call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags) |
{ |
call_t *request; |
ipl_t ipl; |
int rc; |
restart: |
rc = waitq_sleep_timeout(&box->wq, usec, flags); |
if (SYNCH_FAILED(rc)) |
return NULL; |
spinlock_lock(&box->lock); |
if (!list_empty(&box->irq_notifs)) { |
ipl = interrupts_disable(); |
spinlock_lock(&box->irq_lock); |
request = list_get_instance(box->irq_notifs.next, call_t, link); |
list_remove(&request->link); |
spinlock_unlock(&box->irq_lock); |
interrupts_restore(ipl); |
} else if (!list_empty(&box->answers)) { |
/* Handle asynchronous answers */ |
request = list_get_instance(box->answers.next, call_t, link); |
list_remove(&request->link); |
atomic_dec(&request->data.phone->active_calls); |
} else if (!list_empty(&box->calls)) { |
/* Handle requests */ |
request = list_get_instance(box->calls.next, call_t, link); |
list_remove(&request->link); |
/* Append request to dispatch queue */ |
list_append(&request->link, &box->dispatched_calls); |
} else { |
/* This can happen regularly after ipc_cleanup */ |
spinlock_unlock(&box->lock); |
goto restart; |
} |
spinlock_unlock(&box->lock); |
return request; |
} |
/** Answer all calls from list with EHANGUP msg */ |
static void ipc_cleanup_call_list(link_t *lst) |
{ |
call_t *call; |
while (!list_empty(lst)) { |
call = list_get_instance(lst->next, call_t, link); |
list_remove(&call->link); |
IPC_SET_RETVAL(call->data, EHANGUP); |
_ipc_answer_free_call(call); |
} |
} |
/** Cleans up all IPC communication of the current task |
* |
* Note: ipc_hangup sets returning answerbox to TASK->answerbox, you |
* have to change it as well if you want to cleanup other current then current. |
*/ |
void ipc_cleanup(void) |
{ |
int i; |
call_t *call; |
phone_t *phone; |
/* Disconnect all our phones ('ipc_phone_hangup') */ |
for (i=0;i < IPC_MAX_PHONES; i++) |
ipc_phone_hangup(&TASK->phones[i]); |
/* Disconnect all connected irqs */ |
ipc_irq_cleanup(&TASK->answerbox); |
/* Disconnect all phones connected to our answerbox */ |
restart_phones: |
spinlock_lock(&TASK->answerbox.lock); |
while (!list_empty(&TASK->answerbox.connected_phones)) { |
phone = list_get_instance(TASK->answerbox.connected_phones.next, |
phone_t, link); |
if (! spinlock_trylock(&phone->lock)) { |
spinlock_unlock(&TASK->answerbox.lock); |
goto restart_phones; |
} |
/* Disconnect phone */ |
ASSERT(phone->state == IPC_PHONE_CONNECTED); |
phone->state = IPC_PHONE_SLAMMED; |
list_remove(&phone->link); |
spinlock_unlock(&phone->lock); |
} |
/* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
ipc_cleanup_call_list(&TASK->answerbox.dispatched_calls); |
ipc_cleanup_call_list(&TASK->answerbox.calls); |
spinlock_unlock(&TASK->answerbox.lock); |
/* Wait for all async answers to arrive */ |
while (1) { |
/* Go through all phones, until all are FREE... */ |
/* Locking not needed, no one else should modify |
* it, when we are in cleanup */ |
for (i=0;i < IPC_MAX_PHONES; i++) { |
if (TASK->phones[i].state == IPC_PHONE_HUNGUP && \ |
atomic_get(&TASK->phones[i].active_calls) == 0) |
TASK->phones[i].state = IPC_PHONE_FREE; |
/* Just for sure, we might have had some |
* IPC_PHONE_CONNECTING phones */ |
if (TASK->phones[i].state == IPC_PHONE_CONNECTED) |
ipc_phone_hangup(&TASK->phones[i]); |
/* If the hangup succeeded, it has sent a HANGUP |
* message, the IPC is now in HUNGUP state, we |
* wait for the reply to come */ |
if (TASK->phones[i].state != IPC_PHONE_FREE) |
break; |
} |
/* Voila, got into cleanup */ |
if (i == IPC_MAX_PHONES) |
break; |
call = ipc_wait_for_call(&TASK->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF)); |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
atomic_dec(&TASK->active_calls); |
ipc_call_free(call); |
} |
} |
/** Initilize ipc subsystem */ |
void ipc_init(void) |
{ |
ipc_call_slab = slab_cache_create("ipc_call", |
sizeof(call_t), |
0, |
NULL, NULL, 0); |
ipc_irq_make_table(IRQ_COUNT); |
} |
/** Kconsole - list answerbox contents */ |
void ipc_print_task(task_id_t taskid) |
{ |
task_t *task; |
int i; |
call_t *call; |
link_t *tmp; |
spinlock_lock(&tasks_lock); |
task = task_find_by_id(taskid); |
if (task) |
spinlock_lock(&task->lock); |
spinlock_unlock(&tasks_lock); |
if (!task) |
return; |
/* Print opened phones & details */ |
printf("PHONE:\n"); |
for (i=0; i < IPC_MAX_PHONES;i++) { |
spinlock_lock(&task->phones[i].lock); |
if (task->phones[i].state != IPC_PHONE_FREE) { |
printf("%d: ",i); |
switch (task->phones[i].state) { |
case IPC_PHONE_CONNECTING: |
printf("connecting "); |
break; |
case IPC_PHONE_CONNECTED: |
printf("connected to: %p ", |
task->phones[i].callee); |
break; |
case IPC_PHONE_SLAMMED: |
printf("slammed by: %p ", |
task->phones[i].callee); |
break; |
case IPC_PHONE_HUNGUP: |
printf("hung up - was: %p ", |
task->phones[i].callee); |
break; |
default: |
break; |
} |
printf("active: %d\n", atomic_get(&task->phones[i].active_calls)); |
} |
spinlock_unlock(&task->phones[i].lock); |
} |
/* Print answerbox - calls */ |
spinlock_lock(&task->answerbox.lock); |
printf("ABOX - CALLS:\n"); |
for (tmp=task->answerbox.calls.next; tmp != &task->answerbox.calls;tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
/* Print answerbox - calls */ |
printf("ABOX - DISPATCHED CALLS:\n"); |
for (tmp=task->answerbox.dispatched_calls.next; |
tmp != &task->answerbox.dispatched_calls; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
/* Print answerbox - calls */ |
printf("ABOX - ANSWERS:\n"); |
for (tmp=task->answerbox.answers.next; tmp != &task->answerbox.answers; tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid:%p M:%d A1:%d A2:%d A3:%d Flags:%x\n",call, |
IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags); |
} |
spinlock_unlock(&task->answerbox.lock); |
spinlock_unlock(&task->lock); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/ipc/ipcrsc.c |
---|
0,0 → 1,218 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericipc |
* @{ |
*/ |
/** @file |
*/ |
/* IPC resources management |
* |
* The goal of this source code is to properly manage IPC resources |
* and allow straight and clean clean-up procedure upon task termination. |
* |
* The pattern of usage of the resources is: |
* - allocate empty phone slot, connect | deallocate slot |
* - disconnect connected phone (some messages might be on the fly) |
* - find phone in slot and send a message using phone |
* - answer message to phone |
* - hangup phone (the caller has hung up) |
* - hangup phone (the answerbox is exiting) |
* |
* Locking strategy |
* |
* - To use a phone, disconnect a phone etc., the phone must be |
* first locked and then checked that it is connected |
* - To connect an allocated phone it need not be locked (assigning |
* pointer is atomic on all platforms) |
* |
* - To find an empty phone slot, the TASK must be locked |
* - To answer a message, the answerbox must be locked |
* - The locking of phone and answerbox is done at the ipc_ level. |
* It is perfectly correct to pass unconnected phone to these functions |
* and proper reply will be generated. |
* |
* Locking order |
* |
* - first phone, then answerbox |
* + Easy locking on calls |
* - Very hard traversing list of phones when disconnecting because |
* the phones may disconnect during traversal of list of connected phones. |
* The only possibility is try_lock with restart of list traversal. |
* |
* Destroying is less frequent, this approach is taken. |
* |
* Phone call |
* |
* *** Connect_me_to *** |
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server |
* receives 'phoneid' of the connecting phone as an ARG3. If it answers |
* with RETVAL=0, the phonecall is accepted, otherwise it is refused. |
* |
* *** Connect_to_me *** |
* The caller sends IPC_M_CONNECT_TO_ME, with special |
* The server receives an automatically |
* opened phoneid. If it accepts (RETVAL=0), it can use the phoneid |
* immediately. |
* Possible race condition can arise, when the client receives messages |
* from new connection before getting response for connect_to_me message. |
* Userspace should implement handshake protocol that would control it. |
* |
* Phone hangup |
* |
* *** The caller hangs up (sys_ipc_hangup) *** |
* - The phone is disconnected (no more messages can be sent over this phone), |
* all in-progress messages are correctly handled. The anwerbox receives |
* IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async |
* calls are answered, the phone is deallocated. |
* |
* *** The answerbox hangs up (ipc_answer(EHANGUP)) |
* - The phone is disconnected. EHANGUP response code is sent |
* to the calling task. All new calls through this phone |
* get a EHUNGUP error code, the task is expected to |
* send an sys_ipc_hangup after cleaning up it's internal structures. |
* |
* Call forwarding |
* |
* The call can be forwarded, so that the answer to call is passed directly |
* to the original sender. However, this poses special problems regarding |
* routing of hangup messages. |
* |
* sys_ipc_hangup -> IPC_M_PHONE_HUNGUP |
* - this message CANNOT be forwarded |
* |
* EHANGUP during forward |
* - The *forwarding* phone will be closed, EFORWARD is sent to receiver. |
* |
* EHANGUP, ENOENT during forward |
* - EFORWARD is sent to the receiver, ipc_forward returns error code EFORWARD |
* |
* Cleanup strategy |
* |
* 1) Disconnect all our phones ('ipc_phone_hangup'). |
* |
* 2) Disconnect all phones connected to answerbox. |
* |
* 3) Answer all messages in 'calls' and 'dispatched_calls' queues with |
* appropriate error code (EHANGUP, EFORWARD). |
* |
* 4) Wait for all async answers to arrive and dispose of them. |
* |
*/ |
#include <synch/spinlock.h> |
#include <ipc/ipc.h> |
#include <arch.h> |
#include <proc/task.h> |
#include <ipc/ipcrsc.h> |
#include <debug.h> |
/** Find call_t * in call table according to callid |
* |
* TODO: Some speedup (hash table?) |
* @return NULL on not found, otherwise pointer to call structure |
*/ |
call_t * get_call(__native callid) |
{ |
link_t *lst; |
call_t *call, *result = NULL; |
spinlock_lock(&TASK->answerbox.lock); |
for (lst = TASK->answerbox.dispatched_calls.next; |
lst != &TASK->answerbox.dispatched_calls; lst = lst->next) { |
call = list_get_instance(lst, call_t, link); |
if ((__native)call == callid) { |
result = call; |
break; |
} |
} |
spinlock_unlock(&TASK->answerbox.lock); |
return result; |
} |
/** Allocate new phone slot in current TASK structure */ |
int phone_alloc(void) |
{ |
int i; |
spinlock_lock(&TASK->lock); |
for (i=0; i < IPC_MAX_PHONES; i++) { |
if (TASK->phones[i].state == IPC_PHONE_HUNGUP && \ |
atomic_get(&TASK->phones[i].active_calls) == 0) |
TASK->phones[i].state = IPC_PHONE_FREE; |
if (TASK->phones[i].state == IPC_PHONE_FREE) { |
TASK->phones[i].state = IPC_PHONE_CONNECTING; |
break; |
} |
} |
spinlock_unlock(&TASK->lock); |
if (i >= IPC_MAX_PHONES) |
return -1; |
return i; |
} |
static void phone_deallocp(phone_t *phone) |
{ |
ASSERT(phone->state == IPC_PHONE_CONNECTING); |
/* atomic operation */ |
phone->state = IPC_PHONE_FREE; |
} |
/** Free slot from a disconnected phone |
* |
* All already sent messages will be correctly processed |
*/ |
void phone_dealloc(int phoneid) |
{ |
phone_deallocp(&TASK->phones[phoneid]); |
} |
/** Connect phone to a given answerbox |
* |
* @param phoneid The slot that will be connected |
* |
* The procedure _enforces_ that the user first marks the phone |
* busy (e.g. via phone_alloc) and then connects the phone, otherwise |
* race condition may appear. |
*/ |
void phone_connect(int phoneid, answerbox_t *box) |
{ |
phone_t *phone = &TASK->phones[phoneid]; |
ASSERT(phone->state == IPC_PHONE_CONNECTING); |
ipc_phone_connect(phone, box); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/ipc/sysipc.c |
---|
0,0 → 1,597 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericipc |
* @{ |
*/ |
/** @file |
*/ |
#include <arch.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <errno.h> |
#include <memstr.h> |
#include <debug.h> |
#include <ipc/ipc.h> |
#include <ipc/sysipc.h> |
#include <ipc/irq.h> |
#include <ipc/ipcrsc.h> |
#include <arch/interrupt.h> |
#include <print.h> |
#include <syscall/copy.h> |
#include <security/cap.h> |
#include <mm/as.h> |
#define GET_CHECK_PHONE(phone,phoneid,err) { \ |
if (phoneid > IPC_MAX_PHONES) { err; } \ |
phone = &TASK->phones[phoneid]; \ |
} |
#define STRUCT_TO_USPACE(dst,src) copy_to_uspace(dst,src,sizeof(*(src))) |
/** Return true if the method is a system method */ |
static inline int is_system_method(__native method) |
{ |
if (method <= IPC_M_LAST_SYSTEM) |
return 1; |
return 0; |
} |
/** Return true if the message with this method is forwardable |
* |
* - some system messages may be forwarded, for some of them |
* it is useless |
*/ |
static inline int is_forwardable(__native method) |
{ |
if (method == IPC_M_PHONE_HUNGUP || method == IPC_M_AS_AREA_SEND \ |
|| method == IPC_M_AS_AREA_RECV) |
return 0; /* This message is meant only for the receiver */ |
return 1; |
} |
/****************************************************/ |
/* Functions that preprocess answer before sending |
* it to the recepient |
*/ |
/** Return true if the caller (ipc_answer) should save |
* the old call contents for answer_preprocess |
*/ |
static inline int answer_need_old(call_t *call) |
{ |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) |
return 1; |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_ME_TO) |
return 1; |
if (IPC_GET_METHOD(call->data) == IPC_M_AS_AREA_SEND) |
return 1; |
if (IPC_GET_METHOD(call->data) == IPC_M_AS_AREA_RECV) |
return 1; |
return 0; |
} |
/** Interpret process answer as control information |
* |
* This function is called directly after sys_ipc_answer |
*/ |
static inline int answer_preprocess(call_t *answer, ipc_data_t *olddata) |
{ |
int phoneid; |
if (IPC_GET_RETVAL(answer->data) == EHANGUP) { |
/* In case of forward, hangup the forwared phone, |
* not the originator |
*/ |
spinlock_lock(&answer->data.phone->lock); |
spinlock_lock(&TASK->answerbox.lock); |
if (answer->data.phone->state == IPC_PHONE_CONNECTED) { |
list_remove(&answer->data.phone->link); |
answer->data.phone->state = IPC_PHONE_SLAMMED; |
} |
spinlock_unlock(&TASK->answerbox.lock); |
spinlock_unlock(&answer->data.phone->lock); |
} |
if (!olddata) |
return 0; |
if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_TO_ME) { |
phoneid = IPC_GET_ARG3(*olddata); |
if (IPC_GET_RETVAL(answer->data)) { |
/* The connection was not accepted */ |
phone_dealloc(phoneid); |
} else { |
/* The connection was accepted */ |
phone_connect(phoneid,&answer->sender->answerbox); |
/* Set 'phone identification' as arg3 of response */ |
IPC_SET_ARG3(answer->data, (__native)&TASK->phones[phoneid]); |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME_TO) { |
/* If the users accepted call, connect */ |
if (!IPC_GET_RETVAL(answer->data)) { |
ipc_phone_connect((phone_t *)IPC_GET_ARG3(*olddata), |
&TASK->answerbox); |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_AS_AREA_SEND) { |
if (!IPC_GET_RETVAL(answer->data)) { /* Accepted, handle as_area receipt */ |
ipl_t ipl; |
int rc; |
as_t *as; |
ipl = interrupts_disable(); |
spinlock_lock(&answer->sender->lock); |
as = answer->sender->as; |
spinlock_unlock(&answer->sender->lock); |
interrupts_restore(ipl); |
rc = as_area_share(as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(*olddata), |
AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata)); |
IPC_SET_RETVAL(answer->data, rc); |
return rc; |
} |
} else if (IPC_GET_METHOD(*olddata) == IPC_M_AS_AREA_RECV) { |
if (!IPC_GET_RETVAL(answer->data)) { |
ipl_t ipl; |
as_t *as; |
int rc; |
ipl = interrupts_disable(); |
spinlock_lock(&answer->sender->lock); |
as = answer->sender->as; |
spinlock_unlock(&answer->sender->lock); |
interrupts_restore(ipl); |
rc = as_area_share(AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG2(*olddata), |
as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(answer->data)); |
IPC_SET_RETVAL(answer->data, rc); |
} |
} |
return 0; |
} |
/** Called before the request is sent |
* |
* @return 0 - no error, -1 - report error to user |
*/ |
static int request_preprocess(call_t *call) |
{ |
int newphid; |
size_t size; |
switch (IPC_GET_METHOD(call->data)) { |
case IPC_M_CONNECT_ME_TO: |
newphid = phone_alloc(); |
if (newphid < 0) |
return ELIMIT; |
/* Set arg3 for server */ |
IPC_SET_ARG3(call->data, (__native)&TASK->phones[newphid]); |
call->flags |= IPC_CALL_CONN_ME_TO; |
call->private = newphid; |
break; |
case IPC_M_AS_AREA_SEND: |
size = as_get_size(IPC_GET_ARG1(call->data)); |
if (!size) { |
return EPERM; |
} |
IPC_SET_ARG2(call->data, size); |
break; |
default: |
break; |
} |
return 0; |
} |
/****************************************************/ |
/* Functions called to process received call/answer |
* before passing to uspace |
*/ |
/** Do basic kernel processing of received call answer */ |
static void process_answer(call_t *call) |
{ |
if (IPC_GET_RETVAL(call->data) == EHANGUP && \ |
call->flags & IPC_CALL_FORWARDED) |
IPC_SET_RETVAL(call->data, EFORWARD); |
if (call->flags & IPC_CALL_CONN_ME_TO) { |
if (IPC_GET_RETVAL(call->data)) |
phone_dealloc(call->private); |
else |
IPC_SET_ARG3(call->data, call->private); |
} |
} |
/** Do basic kernel processing of received call request |
* |
* @return 0 - the call should be passed to userspace, 1 - ignore call |
*/ |
static int process_request(answerbox_t *box,call_t *call) |
{ |
int phoneid; |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) { |
phoneid = phone_alloc(); |
if (phoneid < 0) { /* Failed to allocate phone */ |
IPC_SET_RETVAL(call->data, ELIMIT); |
ipc_answer(box,call); |
return -1; |
} |
IPC_SET_ARG3(call->data, phoneid); |
} |
return 0; |
} |
/** Send a call over IPC, wait for reply, return to user |
* |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
__native sys_ipc_call_sync_fast(__native phoneid, __native method, |
__native arg1, ipc_data_t *data) |
{ |
call_t call; |
phone_t *phone; |
int res; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
ipc_call_static_init(&call); |
IPC_SET_METHOD(call.data, method); |
IPC_SET_ARG1(call.data, arg1); |
if (!(res=request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
process_answer(&call); |
} else |
IPC_SET_RETVAL(call.data, res); |
STRUCT_TO_USPACE(&data->args, &call.data.args); |
return 0; |
} |
/** Synchronous IPC call allowing to send whole message */ |
__native sys_ipc_call_sync(__native phoneid, ipc_data_t *question, |
ipc_data_t *reply) |
{ |
call_t call; |
phone_t *phone; |
int res; |
int rc; |
ipc_call_static_init(&call); |
rc = copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args)); |
if (rc != 0) |
return (__native) rc; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (!(res=request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
process_answer(&call); |
} else |
IPC_SET_RETVAL(call.data, res); |
rc = STRUCT_TO_USPACE(&reply->args, &call.data.args); |
if (rc != 0) |
return rc; |
return 0; |
} |
/** Check that the task did not exceed allowed limit |
* |
* @return 0 - Limit OK, -1 - limit exceeded |
*/ |
static int check_call_limit(void) |
{ |
if (atomic_preinc(&TASK->active_calls) > IPC_MAX_ASYNC_CALLS) { |
atomic_dec(&TASK->active_calls); |
return -1; |
} |
return 0; |
} |
/** Send an asynchronous call over ipc |
* |
* @return Call identification, returns -1 on fatal error, |
-2 on 'Too many async request, handle answers first |
*/ |
__native sys_ipc_call_async_fast(__native phoneid, __native method, |
__native arg1, __native arg2) |
{ |
call_t *call; |
phone_t *phone; |
int res; |
if (check_call_limit()) |
return IPC_CALLRET_TEMPORARY; |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
IPC_SET_METHOD(call->data, method); |
IPC_SET_ARG1(call->data, arg1); |
IPC_SET_ARG2(call->data, arg2); |
IPC_SET_ARG3(call->data, 0); |
if (!(res=request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
return (__native) call; |
} |
/** Synchronous IPC call allowing to send whole message |
* |
* @return The same as sys_ipc_call_async |
*/ |
__native sys_ipc_call_async(__native phoneid, ipc_data_t *data) |
{ |
call_t *call; |
phone_t *phone; |
int res; |
int rc; |
if (check_call_limit()) |
return IPC_CALLRET_TEMPORARY; |
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL); |
call = ipc_call_alloc(0); |
rc = copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args)); |
if (rc != 0) { |
ipc_call_free(call); |
return (__native) rc; |
} |
if (!(res=request_preprocess(call))) |
ipc_call(phone, call); |
else |
ipc_backsend_err(phone, call, res); |
return (__native) call; |
} |
/** Forward received call to another destination |
* |
* The arg1 and arg2 are changed in the forwarded message |
* |
* Warning: If implementing non-fast version, make sure that |
* arg3 is not rewritten for certain system IPC |
*/ |
__native sys_ipc_forward_fast(__native callid, __native phoneid, |
__native method, __native arg1) |
{ |
call_t *call; |
phone_t *phone; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
call->flags |= IPC_CALL_FORWARDED; |
GET_CHECK_PHONE(phone, phoneid, { |
IPC_SET_RETVAL(call->data, EFORWARD); |
ipc_answer(&TASK->answerbox, call); |
return ENOENT; |
}); |
if (!is_forwardable(IPC_GET_METHOD(call->data))) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
ipc_answer(&TASK->answerbox, call); |
return EPERM; |
} |
/* Userspace is not allowed to change method of system methods |
* on forward, allow changing ARG1 and ARG2 by means of method and arg1 |
*/ |
if (is_system_method(IPC_GET_METHOD(call->data))) { |
if (IPC_GET_METHOD(call->data) == IPC_M_CONNECT_TO_ME) |
phone_dealloc(IPC_GET_ARG3(call->data)); |
IPC_SET_ARG1(call->data, method); |
IPC_SET_ARG2(call->data, arg1); |
} else { |
IPC_SET_METHOD(call->data, method); |
IPC_SET_ARG1(call->data, arg1); |
} |
return ipc_forward(call, phone, &TASK->answerbox); |
} |
/** Send IPC answer */ |
__native sys_ipc_answer_fast(__native callid, __native retval, |
__native arg1, __native arg2) |
{ |
call_t *call; |
ipc_data_t saved_data; |
int saveddata = 0; |
int rc; |
/* Do not answer notification callids */ |
if (callid & IPC_CALLID_NOTIFICATION) |
return 0; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
if (answer_need_old(call)) { |
memcpy(&saved_data, &call->data, sizeof(call->data)); |
saveddata = 1; |
} |
IPC_SET_RETVAL(call->data, retval); |
IPC_SET_ARG1(call->data, arg1); |
IPC_SET_ARG2(call->data, arg2); |
rc = answer_preprocess(call, saveddata ? &saved_data : NULL); |
ipc_answer(&TASK->answerbox, call); |
return rc; |
} |
/** Send IPC answer */ |
__native sys_ipc_answer(__native callid, ipc_data_t *data) |
{ |
call_t *call; |
ipc_data_t saved_data; |
int saveddata = 0; |
int rc; |
/* Do not answer notification callids */ |
if (callid & IPC_CALLID_NOTIFICATION) |
return 0; |
call = get_call(callid); |
if (!call) |
return ENOENT; |
if (answer_need_old(call)) { |
memcpy(&saved_data, &call->data, sizeof(call->data)); |
saveddata = 1; |
} |
rc = copy_from_uspace(&call->data.args, &data->args, |
sizeof(call->data.args)); |
if (rc != 0) |
return rc; |
rc = answer_preprocess(call, saveddata ? &saved_data : NULL); |
ipc_answer(&TASK->answerbox, call); |
return rc; |
} |
/** Hang up the phone |
* |
*/ |
__native sys_ipc_hangup(int phoneid) |
{ |
phone_t *phone; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (ipc_phone_hangup(phone)) |
return -1; |
return 0; |
} |
/** Wait for incoming ipc call or answer |
* |
* @param calldata Pointer to buffer where the call/answer data is stored |
* @param usec Timeout. See waitq_sleep_timeout() for explanation. |
* @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation. |
* |
* @return Callid, if callid & 1, then the call is answer |
*/ |
__native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags) |
{ |
call_t *call; |
restart: |
call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); |
if (!call) |
return 0; |
if (call->flags & IPC_CALL_NOTIF) { |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
/* Set in_phone_hash to the interrupt counter */ |
call->data.phone = (void *)call->private; |
STRUCT_TO_USPACE(calldata, &call->data); |
ipc_call_free(call); |
return ((__native)call) | IPC_CALLID_NOTIFICATION; |
} |
if (call->flags & IPC_CALL_ANSWERED) { |
process_answer(call); |
ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); |
atomic_dec(&TASK->active_calls); |
if (call->flags & IPC_CALL_DISCARD_ANSWER) { |
ipc_call_free(call); |
goto restart; |
} |
STRUCT_TO_USPACE(&calldata->args, &call->data.args); |
ipc_call_free(call); |
return ((__native)call) | IPC_CALLID_ANSWERED; |
} |
if (process_request(&TASK->answerbox, call)) |
goto restart; |
/* Include phone address('id') of the caller in the request, |
* copy whole call->data, not only call->data.args */ |
if (STRUCT_TO_USPACE(calldata, &call->data)) { |
return 0; |
} |
return (__native)call; |
} |
/** Connect irq handler to task */ |
__native sys_ipc_register_irq(int irq, irq_code_t *ucode) |
{ |
if (!(cap_get(TASK) & CAP_IRQ_REG)) |
return EPERM; |
if (irq >= IRQ_COUNT || irq <= -IPC_IRQ_RESERVED_VIRTUAL) |
return (__native) ELIMIT; |
irq_ipc_bind_arch(irq); |
return ipc_irq_register(&TASK->answerbox, irq, ucode); |
} |
/* Disconnect irq handler from task */ |
__native sys_ipc_unregister_irq(int irq) |
{ |
if (!(cap_get(TASK) & CAP_IRQ_REG)) |
return EPERM; |
if (irq >= IRQ_COUNT || irq <= -IPC_IRQ_RESERVED_VIRTUAL) |
return (__native) ELIMIT; |
ipc_irq_unregister(&TASK->answerbox, irq); |
return 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/ipc/irq.c |
---|
0,0 → 1,339 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericipc |
* @{ |
*/ |
/** @file |
*/ |
/** IRQ notification framework |
* |
* This framework allows applications to register to receive a notification |
* when interrupt is detected. The application may provide a simple 'top-half' |
* handler as part of its registration, which can perform simple operations |
* (read/write port/memory, add information to notification ipc message). |
* |
* The structure of a notification message is as follows: |
* - METHOD: interrupt number |
* - ARG1: payload modified by a 'top-half' handler |
* - ARG2: payload |
* - ARG3: payload |
* - in_phone_hash: interrupt counter (may be needed to assure correct order |
* in multithreaded drivers) |
*/ |
#include <arch.h> |
#include <mm/slab.h> |
#include <errno.h> |
#include <ipc/ipc.h> |
#include <ipc/irq.h> |
#include <atomic.h> |
#include <syscall/copy.h> |
#include <console/console.h> |
typedef struct { |
SPINLOCK_DECLARE(lock); |
answerbox_t *box; |
irq_code_t *code; |
atomic_t counter; |
} ipc_irq_t; |
static ipc_irq_t *irq_conns = NULL; |
static int irq_conns_size; |
#include <print.h> |
/* Execute code associated with IRQ notification */ |
static void code_execute(call_t *call, irq_code_t *code) |
{ |
int i; |
__native dstval = 0; |
if (!code) |
return; |
for (i=0; i < code->cmdcount;i++) { |
switch (code->cmds[i].cmd) { |
case CMD_MEM_READ_1: |
dstval = *((__u8 *)code->cmds[i].addr); |
break; |
case CMD_MEM_READ_2: |
dstval = *((__u16 *)code->cmds[i].addr); |
break; |
case CMD_MEM_READ_4: |
dstval = *((__u32 *)code->cmds[i].addr); |
break; |
case CMD_MEM_READ_8: |
dstval = *((__u64 *)code->cmds[i].addr); |
break; |
case CMD_MEM_WRITE_1: |
*((__u8 *)code->cmds[i].addr) = code->cmds[i].value; |
break; |
case CMD_MEM_WRITE_2: |
*((__u16 *)code->cmds[i].addr) = code->cmds[i].value; |
break; |
case CMD_MEM_WRITE_4: |
*((__u32 *)code->cmds[i].addr) = code->cmds[i].value; |
break; |
case CMD_MEM_WRITE_8: |
*((__u64 *)code->cmds[i].addr) = code->cmds[i].value; |
break; |
#if defined(ia32) || defined(amd64) |
case CMD_PORT_READ_1: |
dstval = inb((long)code->cmds[i].addr); |
break; |
case CMD_PORT_WRITE_1: |
outb((long)code->cmds[i].addr, code->cmds[i].value); |
break; |
#endif |
#if defined(ia64) |
case CMD_IA64_GETCHAR: |
dstval = _getc(&ski_uconsole); |
break; |
#endif |
#if defined(ppc32) |
case CMD_PPC32_GETCHAR: |
dstval = cuda_get_scancode(); |
break; |
#endif |
default: |
break; |
} |
if (code->cmds[i].dstarg && code->cmds[i].dstarg < 4) { |
call->data.args[code->cmds[i].dstarg] = dstval; |
} |
} |
} |
static void code_free(irq_code_t *code) |
{ |
if (code) { |
free(code->cmds); |
free(code); |
} |
} |
static irq_code_t * code_from_uspace(irq_code_t *ucode) |
{ |
irq_code_t *code; |
irq_cmd_t *ucmds; |
int rc; |
code = malloc(sizeof(*code), 0); |
rc = copy_from_uspace(code, ucode, sizeof(*code)); |
if (rc != 0) { |
free(code); |
return NULL; |
} |
if (code->cmdcount > IRQ_MAX_PROG_SIZE) { |
free(code); |
return NULL; |
} |
ucmds = code->cmds; |
code->cmds = malloc(sizeof(code->cmds[0]) * (code->cmdcount), 0); |
rc = copy_from_uspace(code->cmds, ucmds, sizeof(code->cmds[0]) * (code->cmdcount)); |
if (rc != 0) { |
free(code->cmds); |
free(code); |
return NULL; |
} |
return code; |
} |
/** Unregister task from irq */ |
void ipc_irq_unregister(answerbox_t *box, int irq) |
{ |
ipl_t ipl; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box == box) { |
irq_conns[mq].box = NULL; |
code_free(irq_conns[mq].code); |
irq_conns[mq].code = NULL; |
} |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
} |
/** Register an answerbox as a receiving end of interrupts notifications */ |
int ipc_irq_register(answerbox_t *box, int irq, irq_code_t *ucode) |
{ |
ipl_t ipl; |
irq_code_t *code; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ASSERT(irq_conns); |
if (ucode) { |
code = code_from_uspace(ucode); |
if (!code) |
return EBADMEM; |
} else |
code = NULL; |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box) { |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
code_free(code); |
return EEXISTS; |
} |
irq_conns[mq].box = box; |
irq_conns[mq].code = code; |
atomic_set(&irq_conns[mq].counter, 0); |
spinlock_unlock(&irq_conns[mq].lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Add call to proper answerbox queue |
* |
* Assume irq_conns[mq].lock is locked */ |
static void send_call(int mq, call_t *call) |
{ |
spinlock_lock(&irq_conns[mq].box->irq_lock); |
list_append(&call->link, &irq_conns[mq].box->irq_notifs); |
spinlock_unlock(&irq_conns[mq].box->irq_lock); |
waitq_wakeup(&irq_conns[mq].box->wq, 0); |
} |
/** Send notification message |
* |
*/ |
void ipc_irq_send_msg(int irq, __native a1, __native a2, __native a3) |
{ |
call_t *call; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box) { |
call = ipc_call_alloc(FRAME_ATOMIC); |
if (!call) { |
spinlock_unlock(&irq_conns[mq].lock); |
return; |
} |
call->flags |= IPC_CALL_NOTIF; |
IPC_SET_METHOD(call->data, irq); |
IPC_SET_ARG1(call->data, a1); |
IPC_SET_ARG2(call->data, a2); |
IPC_SET_ARG3(call->data, a3); |
/* Put a counter to the message */ |
call->private = atomic_preinc(&irq_conns[mq].counter); |
send_call(mq, call); |
} |
spinlock_unlock(&irq_conns[mq].lock); |
} |
/** Notify task that an irq had occurred. |
* |
* We expect interrupts to be disabled |
*/ |
void ipc_irq_send_notif(int irq) |
{ |
call_t *call; |
int mq = irq + IPC_IRQ_RESERVED_VIRTUAL; |
ASSERT(irq_conns); |
spinlock_lock(&irq_conns[mq].lock); |
if (irq_conns[mq].box) { |
call = ipc_call_alloc(FRAME_ATOMIC); |
if (!call) { |
spinlock_unlock(&irq_conns[mq].lock); |
return; |
} |
call->flags |= IPC_CALL_NOTIF; |
/* Put a counter to the message */ |
call->private = atomic_preinc(&irq_conns[mq].counter); |
/* Set up args */ |
IPC_SET_METHOD(call->data, irq); |
/* Execute code to handle irq */ |
code_execute(call, irq_conns[mq].code); |
send_call(mq, call); |
} |
spinlock_unlock(&irq_conns[mq].lock); |
} |
/** Initialize table of interrupt handlers |
* |
* @param irqcount Count of required hardware IRQs to be supported |
*/ |
void ipc_irq_make_table(int irqcount) |
{ |
int i; |
irqcount += IPC_IRQ_RESERVED_VIRTUAL; |
irq_conns_size = irqcount; |
irq_conns = malloc(irqcount * (sizeof(*irq_conns)), 0); |
for (i=0; i < irqcount; i++) { |
spinlock_initialize(&irq_conns[i].lock, "irq_ipc_lock"); |
irq_conns[i].box = NULL; |
irq_conns[i].code = NULL; |
} |
} |
/** Disconnect all irq's notifications |
* |
* TODO: It may be better to do some linked list, so that |
* we wouldn't need to go through whole array every cleanup |
*/ |
void ipc_irq_cleanup(answerbox_t *box) |
{ |
int i; |
ipl_t ipl; |
for (i=0; i < irq_conns_size; i++) { |
ipl = interrupts_disable(); |
spinlock_lock(&irq_conns[i].lock); |
if (irq_conns[i].box == box) |
irq_conns[i].box = NULL; |
spinlock_unlock(&irq_conns[i].lock); |
interrupts_restore(ipl); |
} |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/cpu/cpu.c |
---|
0,0 → 1,114 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief CPU subsystem initialization and listing. |
*/ |
#include <cpu.h> |
#include <arch.h> |
#include <arch/cpu.h> |
#include <mm/slab.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <memstr.h> |
#include <adt/list.h> |
#include <print.h> |
cpu_t *cpus; |
/** Initialize CPUs |
* |
* Initialize kernel CPUs support. |
* |
*/ |
void cpu_init(void) { |
int i, j; |
#ifdef CONFIG_SMP |
if (config.cpu_active == 1) { |
#endif /* CONFIG_SMP */ |
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, |
FRAME_ATOMIC); |
if (!cpus) |
panic("malloc/cpus"); |
/* initialize everything */ |
memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0); |
for (i=0; i < config.cpu_count; i++) { |
cpus[i].stack = (__u8 *) PA2KA(PFN2ADDR(frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_PANIC))); |
cpus[i].id = i; |
spinlock_initialize(&cpus[i].lock, "cpu_t.lock"); |
for (j = 0; j < RQ_COUNT; j++) { |
spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock"); |
list_initialize(&cpus[i].rq[j].rq_head); |
} |
} |
#ifdef CONFIG_SMP |
} |
#endif /* CONFIG_SMP */ |
CPU = &cpus[config.cpu_active-1]; |
CPU->active = 1; |
CPU->tlb_active = 1; |
cpu_identify(); |
cpu_arch_init(); |
} |
/** List all processors. */ |
void cpu_list(void) |
{ |
int i; |
for (i = 0; i < config.cpu_count; i++) { |
if (cpus[i].active) |
cpu_print_report(&cpus[i]); |
else |
printf("cpu%d: not active\n", i); |
} |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/time/delay.c |
---|
0,0 → 1,69 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup time |
* @{ |
*/ |
/** |
* @file |
* @brief Active delay function. |
*/ |
#include <time/delay.h> |
#include <arch/types.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Active delay |
* |
* Delay the execution for the given number |
* of microseconds (or slightly more). The delay |
* is implemented as CPU calibrated active loop. |
* |
* @param usec Number of microseconds to sleep. |
*/ |
void delay(__u32 usec) |
{ |
ipl_t ipl; |
/* |
* The delay loop is calibrated for each and every |
* CPU in the system. Therefore it is necessary to |
* call interrupts_disable() before calling the |
* asm_delay_loop(). |
*/ |
ipl = interrupts_disable(); |
asm_delay_loop(usec * CPU->delay_loop_const); |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/time/timeout.c |
---|
0,0 → 1,219 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup time |
* @{ |
*/ |
/** |
* @file |
* @brief Timeout management functions. |
*/ |
#include <time/timeout.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize timeouts |
* |
* Initialize kernel timeouts. |
* |
*/ |
void timeout_init(void) |
{ |
spinlock_initialize(&CPU->timeoutlock, "timeout_lock"); |
list_initialize(&CPU->timeout_active_head); |
} |
/** Reinitialize timeout |
* |
* Initialize all members except the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_reinitialize(timeout_t *t) |
{ |
t->cpu = NULL; |
t->ticks = 0; |
t->handler = NULL; |
t->arg = NULL; |
link_initialize(&t->link); |
} |
/** Initialize timeout |
* |
* Initialize all members including the lock. |
* |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_initialize(timeout_t *t) |
{ |
spinlock_initialize(&t->lock, "timeout_t_lock"); |
timeout_reinitialize(t); |
} |
/** Register timeout |
* |
* Insert timeout handler f (with argument arg) |
* to timeout list and make it execute in |
* time microseconds (or slightly more). |
* |
* @param t Timeout structure. |
* @param time Number of usec in the future to execute |
* the handler. |
* @param f Timeout handler function. |
* @param arg Timeout handler argument. |
* |
*/ |
void timeout_register(timeout_t *t, __u64 time, timeout_handler_t f, void *arg) |
{ |
timeout_t *hlp = NULL; |
link_t *l, *m; |
ipl_t ipl; |
__u64 sum; |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
if (t->cpu) |
panic("t->cpu != 0"); |
t->cpu = CPU; |
t->ticks = us2ticks(time); |
t->handler = f; |
t->arg = arg; |
/* |
* Insert t into the active timeouts list according to t->ticks. |
*/ |
sum = 0; |
l = CPU->timeout_active_head.next; |
while (l != &CPU->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
if (t->ticks < sum + hlp->ticks) { |
spinlock_unlock(&hlp->lock); |
break; |
} |
sum += hlp->ticks; |
spinlock_unlock(&hlp->lock); |
l = l->next; |
} |
m = l->prev; |
list_prepend(&t->link, m); /* avoid using l->prev */ |
/* |
* Adjust t->ticks according to ticks accumulated in h's predecessors. |
*/ |
t->ticks -= sum; |
/* |
* Decrease ticks of t's immediate succesor by t->ticks. |
*/ |
if (l != &CPU->timeout_active_head) { |
spinlock_lock(&hlp->lock); |
hlp->ticks -= t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&t->lock); |
spinlock_unlock(&CPU->timeoutlock); |
interrupts_restore(ipl); |
} |
/** Unregister timeout |
* |
* Remove timeout from timeout list. |
* |
* @param t Timeout to unregister. |
* |
* @return true on success, false on failure. |
*/ |
bool timeout_unregister(timeout_t *t) |
{ |
timeout_t *hlp; |
link_t *l; |
ipl_t ipl; |
grab_locks: |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
if (!t->cpu) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return false; |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
goto grab_locks; |
} |
/* |
* Now we know for sure that t hasn't been activated yet |
* and is lurking in t->cpu->timeout_active_head queue. |
*/ |
l = t->link.next; |
if (l != &t->cpu->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
hlp->ticks += t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
list_remove(&t->link); |
spinlock_unlock(&t->cpu->timeoutlock); |
timeout_reinitialize(t); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return true; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/time/clock.c |
---|
0,0 → 1,192 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup time |
* @{ |
*/ |
/** |
* @file |
* @brief High-level clock interrupt handler. |
* |
* This file contains the clock() function which is the source |
* of preemption. It is also responsible for executing expired |
* timeouts. |
*/ |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <func.h> |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <atomic.h> |
#include <proc/thread.h> |
#include <sysinfo/sysinfo.h> |
#include <arch/barrier.h> |
/* Pointers to public variables with time */ |
struct ptime { |
__native seconds1; |
__native useconds; |
__native seconds2; |
}; |
struct ptime *public_time; |
/* Variable holding fragment of second, so that we would update |
* seconds correctly |
*/ |
static __native secfrag = 0; |
/** Initialize realtime clock counter |
* |
* The applications (and sometimes kernel) need to access accurate |
* information about realtime data. We allocate 1 page with these |
* data and update it periodically. |
* |
* |
*/ |
void clock_counter_init(void) |
{ |
void *faddr; |
faddr = (void *)PFN2ADDR(frame_alloc(0, FRAME_ATOMIC)); |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *)PA2KA(faddr); |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds1 = 0; |
public_time->seconds2 = 0; |
public_time->useconds = 0; |
sysinfo_set_item_val("clock.faddr", NULL, (__native)faddr); |
} |
/** Update public counters |
* |
* Update it only on first processor |
* TODO: Do we really need so many write barriers? |
*/ |
static void clock_update_counters(void) |
{ |
if (CPU->id == 0) { |
secfrag += 1000000/HZ; |
if (secfrag >= 1000000) { |
secfrag -= 1000000; |
public_time->seconds1++; |
write_barrier(); |
public_time->useconds = secfrag; |
write_barrier(); |
public_time->seconds2 = public_time->seconds1; |
} else |
public_time->useconds += 1000000/HZ; |
} |
} |
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
* (assuming interrupts_disable()'d). Runs expired timeouts |
* and preemptive scheduling. |
* |
*/ |
void clock(void) |
{ |
link_t *l; |
timeout_t *h; |
timeout_handler_t f; |
void *arg; |
count_t missed_clock_ticks = CPU->missed_clock_ticks; |
int i; |
/* |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
for (i = 0; i <= missed_clock_ticks; i++) { |
clock_update_counters(); |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
spinlock_lock(&h->lock); |
if (h->ticks-- != 0) { |
spinlock_unlock(&h->lock); |
break; |
} |
list_remove(l); |
f = h->handler; |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&CPU->timeoutlock); |
f(arg); |
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&CPU->timeoutlock); |
} |
CPU->missed_clock_ticks = 0; |
/* |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (THREAD) { |
__u64 ticks; |
spinlock_lock(&CPU->lock); |
CPU->needs_relink += 1 + missed_clock_ticks; |
spinlock_unlock(&CPU->lock); |
spinlock_lock(&THREAD->lock); |
if ((ticks = THREAD->ticks)) { |
if (ticks >= 1 + missed_clock_ticks) |
THREAD->ticks -= 1 + missed_clock_ticks; |
else |
THREAD->ticks = 0; |
} |
spinlock_unlock(&THREAD->lock); |
if (!ticks && !PREEMPTION_DISABLED) { |
scheduler(); |
} |
} |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/main/kinit.c |
---|
0,0 → 1,197 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup main |
* @{ |
*/ |
/** |
* @file |
* @brief Kernel initialization thread. |
* |
* This file contains kinit kernel thread which carries out |
* high level system initialization. |
* |
* This file is responsible for finishing SMP configuration |
* and creation of userspace init tasks. |
*/ |
#include <main/kinit.h> |
#include <config.h> |
#include <arch.h> |
#include <proc/scheduler.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <panic.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <print.h> |
#include <memstr.h> |
#include <console/console.h> |
#include <interrupt.h> |
#include <console/kconsole.h> |
#include <security/cap.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/mps.h> |
#endif /* CONFIG_SMP */ |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#ifdef CONFIG_TEST |
#include <test.h> |
#endif /* CONFIG_TEST */ |
/** Kernel initialization thread. |
* |
* kinit takes care of higher level kernel |
* initialization (i.e. thread creation, |
* userspace initialization etc.). |
* |
* @param arg Not used. |
*/ |
void kinit(void *arg) |
{ |
thread_t *t; |
/* |
* Detach kinit as nobody will call thread_join_timeout() on it. |
*/ |
thread_detach(THREAD); |
interrupts_disable(); |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
/* |
* Create the kmp thread and wait for its completion. |
* cpu1 through cpuN-1 will come up consecutively and |
* not mess together with kcpulb threads. |
* Just a beautification. |
*/ |
if ((t = thread_create(kmp, NULL, TASK, 0, "kmp"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[0]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
} else |
panic("thread_create/kmp\n"); |
thread_join(t); |
thread_detach(t); |
} |
#endif /* CONFIG_SMP */ |
/* |
* Now that all CPUs are up, we can report what we've found. |
*/ |
cpu_list(); |
#ifdef CONFIG_SMP |
if (config.cpu_count > 1) { |
int i; |
/* |
* For each CPU, create its load balancing thread. |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if ((t = thread_create(kcpulb, NULL, TASK, 0, "kcpulb"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
} else |
panic("thread_create/kcpulb\n"); |
} |
} |
#endif /* CONFIG_SMP */ |
/* |
* At this point SMP, if present, is configured. |
*/ |
arch_post_smp_init(); |
/* |
* Create kernel console. |
*/ |
if ((t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole"))) |
thread_ready(t); |
else |
panic("thread_create/kconsole\n"); |
interrupts_enable(); |
#ifdef CONFIG_TEST |
test(); |
printf("\nTest finished, please reboot\n"); |
#else /* CONFIG_TEST */ |
task_t *utask; |
count_t i; |
for (i = 0; i < init.cnt; i++) { |
/* |
* Run user tasks. |
*/ |
if (init.tasks[i].addr % FRAME_SIZE) |
panic("init[%d].addr is not frame aligned", i); |
utask = task_run_program((void *) init.tasks[i].addr, "USPACE"); |
if (utask) { |
/* |
* Set capabilities to init userspace tasks. |
*/ |
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG); |
if (!ipc_phone_0) |
ipc_phone_0 = &utask->answerbox; |
} else |
printf("Init task %zd not started.\n", i); |
} |
if (!stdin) { |
while (1) { |
thread_sleep(1); |
printf("kinit... "); |
} |
} |
#endif /* CONFIG_TEST */ |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/main/main.c |
---|
0,0 → 1,328 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup main |
* @{ |
*/ |
/** |
* @file |
* @brief Main initialization kernel function for all processors. |
* |
* During kernel boot, all processors, after architecture dependent |
* initialization, start executing code found in this file. After |
* bringing up all subsystems, control is passed to scheduler(). |
* |
* The bootstrap processor starts executing main_bsp() while |
* the application processors start executing main_ap(). |
* |
* @see scheduler() |
* @see main_bsp() |
* @see main_ap() |
*/ |
#include <arch/asm.h> |
#include <context.h> |
#include <print.h> |
#include <panic.h> |
#include <debug.h> |
#include <config.h> |
#include <time/clock.h> |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <main/kinit.h> |
#include <main/version.h> |
#include <console/kconsole.h> |
#include <cpu.h> |
#include <align.h> |
#include <interrupt.h> |
#include <arch/mm/memory_init.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <mm/tlb.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <synch/waitq.h> |
#include <synch/futex.h> |
#include <arch/arch.h> |
#include <arch.h> |
#include <arch/faddr.h> |
#include <typedefs.h> |
#include <ipc/ipc.h> |
#include <macros.h> |
#include <adt/btree.h> |
#include <console/klog.h> |
#ifdef CONFIG_SMP |
#include <arch/smp/apic.h> |
#include <arch/smp/mps.h> |
#endif /* CONFIG_SMP */ |
#include <smp/smp.h> |
/** Global configuration structure. */ |
config_t config = { |
.mm_initialized = false |
}; |
/** Initial user-space tasks */ |
init_t init = { |
0 |
}; |
context_t ctx; |
/** |
* These 'hardcoded' variables will be intialized by |
* the linker or the low level assembler code with |
* appropriate sizes and addresses. |
*/ |
__address hardcoded_load_address = 0; |
size_t hardcoded_ktext_size = 0; |
size_t hardcoded_kdata_size = 0; |
void main_bsp(void); |
void main_ap(void); |
/* |
* These two functions prevent stack from underflowing during the |
* kernel boot phase when SP is set to the very top of the reserved |
* space. The stack could get corrupted by a fooled compiler-generated |
* pop sequence otherwise. |
*/ |
static void main_bsp_separated_stack(void); |
#ifdef CONFIG_SMP |
static void main_ap_separated_stack(void); |
#endif |
#define CONFIG_STACK_SIZE ((1<<STACK_FRAMES)*STACK_SIZE) |
/** Main kernel routine for bootstrap CPU. |
* |
* Initializes the kernel by bootstrap CPU. |
* This function passes control directly to |
* main_bsp_separated_stack(). |
* |
* Assuming interrupts_disable(). |
* |
*/ |
void main_bsp(void) |
{ |
__address stackaddr; |
config.cpu_count = 1; |
config.cpu_active = 1; |
config.base = hardcoded_load_address; |
config.memory_size = get_memory_size(); |
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); |
stackaddr = config.base + config.kernel_size; |
/* Avoid placing kernel on top of init */ |
count_t i; |
bool overlap = false; |
for (i = 0; i < init.cnt; i++) |
if (PA_overlaps(stackaddr, CONFIG_STACK_SIZE, init.tasks[i].addr, init.tasks[i].size)) { |
stackaddr = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, CONFIG_STACK_SIZE); |
init.tasks[i].size = ALIGN_UP(init.tasks[i].size, CONFIG_STACK_SIZE) + CONFIG_STACK_SIZE; |
overlap = true; |
} |
if (!overlap) |
config.kernel_size += CONFIG_STACK_SIZE; |
context_save(&ctx); |
context_set(&ctx, FADDR(main_bsp_separated_stack), stackaddr, THREAD_STACK_SIZE); |
context_restore(&ctx); |
/* not reached */ |
} |
/** Main kernel routine for bootstrap CPU using new stack. |
* |
* Second part of main_bsp(). |
* |
*/ |
void main_bsp_separated_stack(void) |
{ |
task_t *k; |
thread_t *t; |
count_t i; |
the_initialize(THE); |
/* |
* kconsole data structures must be initialized very early |
* because other subsystems will register their respective |
* commands. |
*/ |
kconsole_init(); |
/* |
* Exception handler initialization, before architecture |
* starts adding its own handlers |
*/ |
exc_init(); |
/* |
* Memory management subsystems initialization. |
*/ |
arch_pre_mm_init(); |
frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ |
slab_cache_init(); |
btree_init(); |
as_init(); |
page_init(); |
tlb_init(); |
config.mm_initialized = true; |
arch_post_mm_init(); |
version_print(); |
printf("%.*p: hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(__address) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10); |
arch_pre_smp_init(); |
smp_init(); |
slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */ |
printf("config.memory_size=%zdM\n", config.memory_size >> 20); |
printf("config.cpu_count=%zd\n", config.cpu_count); |
cpu_init(); |
calibrate_delay_loop(); |
clock_counter_init(); |
timeout_init(); |
scheduler_init(); |
task_init(); |
thread_init(); |
futex_init(); |
klog_init(); |
for (i = 0; i < init.cnt; i++) |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(__address) * 2, init.tasks[i].addr, i, init.tasks[i].size); |
ipc_init(); |
/* |
* Create kernel task. |
*/ |
k = task_create(AS_KERNEL, "KERNEL"); |
if (!k) |
panic("can't create kernel task\n"); |
/* |
* Create the first thread. |
*/ |
t = thread_create(kinit, NULL, k, 0, "kinit"); |
if (!t) |
panic("can't create kinit thread\n"); |
thread_ready(t); |
/* |
* This call to scheduler() will return to kinit, |
* starting the thread of kernel threads. |
*/ |
scheduler(); |
/* not reached */ |
} |
#ifdef CONFIG_SMP |
/** Main kernel routine for application CPUs. |
* |
* Executed by application processors, temporary stack |
* is at ctx.sp which was set during BP boot. |
* This function passes control directly to |
* main_ap_separated_stack(). |
* |
* Assuming interrupts_disable()'d. |
* |
*/ |
void main_ap(void) |
{ |
/* |
* Incrementing the active CPU counter will guarantee that the |
* pm_init() will not attempt to build GDT and IDT tables again. |
* Neither frame_init() will do the complete thing. Neither cpu_init() |
* will do. |
*/ |
config.cpu_active++; |
/* |
* The THE structure is well defined because ctx.sp is used as stack. |
*/ |
the_initialize(THE); |
arch_pre_mm_init(); |
frame_init(); |
page_init(); |
tlb_init(); |
arch_post_mm_init(); |
cpu_init(); |
calibrate_delay_loop(); |
l_apic_init(); |
l_apic_debug(); |
the_copy(THE, (the_t *) CPU->stack); |
/* |
* If we woke kmp up before we left the kernel stack, we could |
* collide with another CPU coming up. To prevent this, we |
* switch to this cpu's private stack prior to waking kmp up. |
*/ |
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/** Main kernel routine for application CPUs using new stack. |
* |
* Second part of main_ap(). |
* |
*/ |
void main_ap_separated_stack(void) |
{ |
/* |
* Configure timeouts for this cpu. |
*/ |
timeout_init(); |
waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); |
scheduler(); |
/* not reached */ |
} |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/main/uinit.c |
---|
0,0 → 1,68 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup main |
* @{ |
*/ |
/** |
* @file |
* @brief Userspace bootstrap thread. |
* |
* This file contains uinit kernel thread wich is used to start every |
* userspace thread including threads created by SYS_THREAD_CREATE syscall. |
* |
* @see SYS_THREAD_CREATE |
*/ |
#include <main/uinit.h> |
#include <arch/types.h> |
#include <proc/thread.h> |
#include <userspace.h> |
#include <mm/slab.h> |
/** Thread used to bring up userspace thread. |
* |
* @param arg Pointer to structure containing userspace entry and stack addresses. |
*/ |
void uinit(void *arg) |
{ |
uspace_arg_t uarg; |
uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry; |
uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack; |
uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg; |
uarg.uspace_thread_function = NULL; |
uarg.uspace_thread_arg = NULL; |
free((uspace_arg_t *) arg); |
userspace(&uarg); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/main/version.c |
---|
0,0 → 1,64 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup main |
* @{ |
*/ |
/** @file |
*/ |
#include <main/version.h> |
#include <print.h> |
char *project = "SPARTAN kernel"; |
char *copyright = "Copyright (C) 2001-2006 HelenOS project"; |
char *release = RELEASE; |
char *name = NAME; |
char *arch = ARCH; |
#ifdef REVISION |
char *revision = ", revision " REVISION; |
#else |
char *revision = ""; |
#endif |
#ifdef TIMESTAMP |
char *timestamp = " on " TIMESTAMP; |
#else |
char *timestamp = ""; |
#endif |
/** Print version information. */ |
void version_print(void) |
{ |
printf("%s, release %s (%s)%s\nBuilt%s for %s\n%s\n", project, release, name, revision, timestamp, arch, copyright); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/ddi/ddi.c |
---|
0,0 → 1,208 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericddi |
* @{ |
*/ |
/** |
* @file |
* @brief Device Driver Interface functions. |
* |
* This file contains functions that comprise the Device Driver Interface. |
* These are the functions for mapping physical memory and enabling I/O |
* space to tasks. |
*/ |
#include <ddi/ddi.h> |
#include <ddi/ddi_arg.h> |
#include <proc/task.h> |
#include <security/cap.h> |
#include <mm/frame.h> |
#include <mm/as.h> |
#include <synch/spinlock.h> |
#include <syscall/copy.h> |
#include <arch.h> |
#include <align.h> |
#include <errno.h> |
/** Map piece of physical memory into virtual address space of current task. |
* |
* @param pf Physical frame address of the starting frame. |
* @param vp Virtual page address of the starting page. |
* @param pages Number of pages to map. |
* @param flags Address space area flags for the mapping. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID and ENOMEM if |
* there was a problem in creating address space area. |
*/ |
static int ddi_physmem_map(__address pf, __address vp, count_t pages, int flags) |
{ |
ipl_t ipl; |
cap_t caps; |
mem_backend_data_t backend_data; |
backend_data.base = pf; |
backend_data.frames = pages; |
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
if (!(caps & CAP_MEM_MANAGER)) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&TASK->lock); |
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, |
&phys_backend, &backend_data)) { |
/* |
* The address space area could not have been created. |
* We report it using ENOMEM. |
*/ |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
return ENOMEM; |
} |
/* |
* Mapping is created on-demand during page fault. |
*/ |
spinlock_unlock(&TASK->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Enable range of I/O space for task. |
* |
* @param id Task ID of the destination task. |
* @param ioaddr Starting I/O address. |
* @param size Size of the enabled I/O space.. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID. |
*/ |
static int ddi_iospace_enable(task_id_t id, __address ioaddr, size_t size) |
{ |
ipl_t ipl; |
cap_t caps; |
task_t *t; |
int rc; |
/* |
* Make sure the caller is authorised to make this syscall. |
*/ |
caps = cap_get(TASK); |
if (!(caps & CAP_IO_MANAGER)) |
return EPERM; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id(id); |
if (!t) { |
/* |
* There is no task with the specified ID. |
*/ |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
/* Lock the task and release the lock protecting tasks_btree. */ |
spinlock_lock(&t->lock); |
spinlock_unlock(&tasks_lock); |
rc = ddi_iospace_enable_arch(t, ioaddr, size); |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return rc; |
} |
/** Wrapper for SYS_MAP_PHYSMEM syscall. |
* |
* @param phys_base Physical base address to map |
* @param virt_base Destination virtual address |
* @param pages Number of pages |
* @param flags Flags of newly mapped pages |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
__native sys_physmem_map(__native phys_base, __native virt_base, __native pages, |
__native flags) |
{ |
return (__native) ddi_physmem_map(ALIGN_DOWN((__address) phys_base, FRAME_SIZE), |
ALIGN_DOWN((__address) virt_base, PAGE_SIZE), (count_t) pages, |
(int) flags); |
} |
/** Wrapper for SYS_ENABLE_IOSPACE syscall. |
* |
* @param uspace_io_arg User space address of DDI argument structure. |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
__native sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) |
{ |
ddi_ioarg_t arg; |
int rc; |
rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
if (rc != 0) |
return (__native) rc; |
return (__native) ddi_iospace_enable((task_id_t) arg.task_id, (__address) arg.ioaddr, (size_t) arg.size); |
} |
/** Disable or enable preemption. |
* |
* @param enable If non-zero, the preemption counter will be decremented, leading to potential |
* enabling of preemption. Otherwise the preemption counter will be incremented, |
* preventing preemption from occurring. |
* |
* @return Zero on success or EPERM if callers capabilities are not sufficient. |
*/ |
__native sys_preempt_control(int enable) |
{ |
if (! cap_get(TASK) & CAP_PREEMPT_CONTROL) |
return EPERM; |
if (enable) |
preemption_enable(); |
else |
preemption_disable(); |
return 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/lib/func.c |
---|
0,0 → 1,197 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Miscellaneous functions. |
*/ |
#include <func.h> |
#include <print.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <typedefs.h> |
#include <console/kconsole.h> |
atomic_t haltstate = {0}; /**< Halt flag */ |
/** Halt wrapper |
* |
* Set halt flag and halt the cpu. |
* |
*/ |
void halt() |
{ |
#ifdef CONFIG_DEBUG |
bool rundebugger = false; |
// TODO test_and_set not defined on all arches |
// if (!test_and_set(&haltstate)) |
if (!atomic_get(&haltstate)) { |
atomic_set(&haltstate, 1); |
rundebugger = true; |
} |
#else |
atomic_set(&haltstate, 1); |
#endif |
interrupts_disable(); |
#ifdef CONFIG_DEBUG |
if (rundebugger) { |
printf("\n"); |
kconsole("panic"); /* Run kconsole as a last resort to user */ |
} |
#endif |
if (CPU) |
printf("cpu%d: halted\n", CPU->id); |
else |
printf("cpu: halted\n"); |
cpu_halt(); |
} |
/** Return number of characters in a string. |
* |
* @param str NULL terminated string. |
* |
* @return Number of characters in str. |
*/ |
size_t strlen(const char *str) |
{ |
int i; |
for (i = 0; str[i]; i++) |
; |
return i; |
} |
/** Compare two NULL terminated strings |
* |
* Do a char-by-char comparison of two NULL terminated strings. |
* The strings are considered equal iff they consist of the same |
* characters on the minimum of their lengths and specified maximal |
* length. |
* |
* @param src First string to compare. |
* @param dst Second string to compare. |
* @param len Maximal length for comparison. |
* |
* @return 0 if the strings are equal, -1 if first is smaller, 1 if second smaller. |
* |
*/ |
int strncmp(const char *src, const char *dst, size_t len) |
{ |
int i; |
i = 0; |
for (;*src && *dst && i < len;src++,dst++,i++) { |
if (*src < *dst) |
return -1; |
if (*src > *dst) |
return 1; |
} |
if (i == len || *src == *dst) |
return 0; |
if (!*src) |
return -1; |
return 1; |
} |
/** Copy NULL terminated string. |
* |
* Copy at most 'len' characters from string 'src' to 'dest'. |
* If 'src' is shorter than 'len', '\0' is inserted behind the |
* last copied character. |
* |
* @param src Source string. |
* @param dest Destination buffer. |
* @param len Size of destination buffer. |
*/ |
void strncpy(char *dest, const char *src, size_t len) |
{ |
int i; |
for (i = 0; i < len; i++) { |
if (!(dest[i] = src[i])) |
return; |
} |
dest[i-1] = '\0'; |
} |
/** Convert ascii representation to __native |
* |
* Supports 0x for hexa & 0 for octal notation. |
* Does not check for overflows, does not support negative numbers |
* |
* @param text Textual representation of number |
* @return Converted number or 0 if no valid number ofund |
*/ |
__native atoi(const char *text) |
{ |
int base = 10; |
__native result = 0; |
if (text[0] == '0' && text[1] == 'x') { |
base = 16; |
text += 2; |
} else if (text[0] == '0') |
base = 8; |
while (*text) { |
if (base != 16 && \ |
((*text >= 'A' && *text <= 'F' ) |
|| (*text >='a' && *text <='f'))) |
break; |
if (base == 8 && *text >='8') |
break; |
if (*text >= '0' && *text <= '9') { |
result *= base; |
result += *text - '0'; |
} else if (*text >= 'A' && *text <= 'F') { |
result *= base; |
result += *text - 'A' + 10; |
} else if (*text >= 'a' && *text <= 'f') { |
result *= base; |
result += *text - 'a' + 10; |
} else |
break; |
text++; |
} |
return result; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/lib/elf.c |
---|
0,0 → 1,226 |
/* |
* Copyright (C) 2006 Sergey Bondari |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Kernel ELF loader. |
*/ |
#include <elf.h> |
#include <debug.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <mm/slab.h> |
#include <align.h> |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
static char *error_codes[] = { |
"no error", |
"invalid image", |
"address space error", |
"incompatible image", |
"unsupported image type", |
"irrecoverable error" |
}; |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); |
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); |
/** ELF loader |
* |
* @param header Pointer to ELF header in memory |
* @param as Created and properly mapped address space |
* @return EE_OK on success |
*/ |
int elf_load(elf_header_t *header, as_t * as) |
{ |
int i, rc; |
/* Identify ELF */ |
if (header->e_ident[EI_MAG0] != ELFMAG0 || header->e_ident[EI_MAG1] != ELFMAG1 || |
header->e_ident[EI_MAG2] != ELFMAG2 || header->e_ident[EI_MAG3] != ELFMAG3) { |
return EE_INVALID; |
} |
/* Identify ELF compatibility */ |
if (header->e_ident[EI_DATA] != ELF_DATA_ENCODING || header->e_machine != ELF_MACHINE || |
header->e_ident[EI_VERSION] != EV_CURRENT || header->e_version != EV_CURRENT || |
header->e_ident[EI_CLASS] != ELF_CLASS) { |
return EE_INCOMPATIBLE; |
} |
if (header->e_phentsize != sizeof(elf_segment_header_t)) |
return EE_INCOMPATIBLE; |
if (header->e_shentsize != sizeof(elf_section_header_t)) |
return EE_INCOMPATIBLE; |
/* Check if the object type is supported. */ |
if (header->e_type != ET_EXEC) |
return EE_UNSUPPORTED; |
/* Walk through all segment headers and process them. */ |
for (i = 0; i < header->e_phnum; i++) { |
rc = segment_header(&((elf_segment_header_t *)(((__u8 *) header) + header->e_phoff))[i], header, as); |
if (rc != EE_OK) |
return rc; |
} |
/* Inspect all section headers and proccess them. */ |
for (i = 0; i < header->e_shnum; i++) { |
rc = section_header(&((elf_section_header_t *)(((__u8 *) header) + header->e_shoff))[i], header, as); |
if (rc != EE_OK) |
return rc; |
} |
return EE_OK; |
} |
/** Print error message according to error code. |
* |
* @param rc Return code returned by elf_load(). |
* |
* @return NULL terminated description of error. |
*/ |
char *elf_error(int rc) |
{ |
ASSERT(rc < sizeof(error_codes)/sizeof(char *)); |
return error_codes[rc]; |
} |
/** Process segment header. |
* |
* @param entry Segment header. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
switch (entry->p_type) { |
case PT_NULL: |
case PT_PHDR: |
break; |
case PT_LOAD: |
return load_segment(entry, elf, as); |
break; |
case PT_DYNAMIC: |
case PT_INTERP: |
case PT_SHLIB: |
case PT_NOTE: |
case PT_LOPROC: |
case PT_HIPROC: |
default: |
return EE_UNSUPPORTED; |
break; |
} |
return EE_OK; |
} |
/** Load segment described by program header entry. |
* |
* @param entry Program header entry describing segment to be loaded. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
as_area_t *a; |
int flags = 0; |
mem_backend_data_t backend_data; |
backend_data.elf = elf; |
backend_data.segment = entry; |
if (entry->p_align > 1) { |
if ((entry->p_offset % entry->p_align) != (entry->p_vaddr % entry->p_align)) { |
return EE_INVALID; |
} |
} |
if (entry->p_flags & PF_X) |
flags |= AS_AREA_EXEC; |
if (entry->p_flags & PF_W) |
flags |= AS_AREA_WRITE; |
if (entry->p_flags & PF_R) |
flags |= AS_AREA_READ; |
flags |= AS_AREA_CACHEABLE; |
/* |
* Check if the virtual address starts on page boundary. |
*/ |
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
return EE_UNSUPPORTED; |
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, |
&elf_backend, &backend_data); |
if (!a) |
return EE_MEMORY; |
/* |
* The segment will be mapped on demand by elf_page_fault(). |
*/ |
return EE_OK; |
} |
/** Process section header. |
* |
* @param entry Segment header. |
* @param elf ELF header. |
* @param as Address space into wich the ELF is being loaded. |
* |
* @return EE_OK on success, error code otherwise. |
*/ |
static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as) |
{ |
switch (entry->sh_type) { |
default: |
break; |
} |
return EE_OK; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/lib/memstr.c |
---|
0,0 → 1,120 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Memory string operations. |
* |
* This file provides architecture independent functions |
* to manipulate blocks of memory. These functions |
* are optimized as much as generic functions of |
* this type can be. However, architectures are |
* free to provide even more optimized versions of these |
* functions. |
*/ |
#include <memstr.h> |
#include <arch/types.h> |
#include <align.h> |
/** Copy block of memory |
* |
* Copy cnt bytes from src address to dst address. |
* The copying is done word-by-word and then byte-by-byte. |
* The source and destination memory areas cannot overlap. |
* |
* @param src Origin address to copy from. |
* @param dst Origin address to copy to. |
* @param cnt Number of bytes to copy. |
* |
*/ |
void *_memcpy(void * dst, const void *src, size_t cnt) |
{ |
int i, j; |
if (ALIGN_UP((__address) src, sizeof(__native)) != (__address) src || |
ALIGN_UP((__address) dst, sizeof(__native)) != (__address) dst) { |
for (i = 0; i < cnt; i++) |
((__u8 *) dst)[i] = ((__u8 *) src)[i]; |
} else { |
for (i = 0; i < cnt/sizeof(__native); i++) |
((__native *) dst)[i] = ((__native *) src)[i]; |
for (j = 0; j < cnt%sizeof(__native); j++) |
((__u8 *)(((__native *) dst) + i))[j] = ((__u8 *)(((__native *) src) + i))[j]; |
} |
return (char *)src; |
} |
/** Fill block of memory |
* |
* Fill cnt bytes at dst address with the value x. |
* The filling is done byte-by-byte. |
* |
* @param dst Origin address to fill. |
* @param cnt Number of bytes to fill. |
* @param x Value to fill. |
* |
*/ |
void _memsetb(__address dst, size_t cnt, __u8 x) |
{ |
int i; |
__u8 *p = (__u8 *) dst; |
for(i=0; i<cnt; i++) |
p[i] = x; |
} |
/** Fill block of memory |
* |
* Fill cnt words at dst address with the value x. |
* The filling is done word-by-word. |
* |
* @param dst Origin address to fill. |
* @param cnt Number of words to fill. |
* @param x Value to fill. |
* |
*/ |
void _memsetw(__address dst, size_t cnt, __u16 x) |
{ |
int i; |
__u16 *p = (__u16 *) dst; |
for(i=0; i<cnt; i++) |
p[i] = x; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/lib/sort.c |
---|
0,0 → 1,204 |
/* |
* Copyright (C) 2005 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Sorting functions. |
* |
* This files contains functions implementing several sorting |
* algorithms (e.g. quick sort and bubble sort). |
*/ |
#include <mm/slab.h> |
#include <memstr.h> |
#include <sort.h> |
#include <panic.h> |
#define EBUFSIZE 32 |
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot); |
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot); |
/** Quicksort wrapper |
* |
* This is only a wrapper that takes care of memory allocations for storing |
* the pivot and temporary elements for generic quicksort algorithm. |
* |
* This function _can_ sleep |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* |
*/ |
void qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b)) |
{ |
__u8 buf_tmp[EBUFSIZE]; |
__u8 buf_pivot[EBUFSIZE]; |
void * tmp = buf_tmp; |
void * pivot = buf_pivot; |
if (e_size > EBUFSIZE) { |
pivot = (void *) malloc(e_size, 0); |
tmp = (void *) malloc(e_size, 0); |
} |
_qsort(data, n, e_size, cmp, tmp, pivot); |
if (e_size > EBUFSIZE) { |
free(tmp); |
free(pivot); |
} |
} |
/** Quicksort |
* |
* Apply generic quicksort algorithm on supplied data, using pre-allocated buffers. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* @param tmp Pointer to scratch memory buffer e_size bytes long. |
* @param pivot Pointer to scratch memory buffer e_size bytes long. |
* |
*/ |
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot) |
{ |
if (n > 4) { |
int i = 0, j = n - 1; |
memcpy(pivot, data, e_size); |
while (1) { |
while ((cmp(data + i * e_size, pivot) < 0) && i < n) i++; |
while ((cmp(data + j * e_size, pivot) >=0) && j > 0) j--; |
if (i<j) { |
memcpy(tmp, data + i * e_size, e_size); |
memcpy(data + i * e_size, data + j * e_size, e_size); |
memcpy(data + j * e_size, tmp, e_size); |
} else { |
break; |
} |
} |
_qsort(data, j + 1, e_size, cmp, tmp, pivot); |
_qsort(data + (j + 1) * e_size, n - j - 1, e_size, cmp, tmp, pivot); |
} else { |
_bubblesort(data, n, e_size, cmp, tmp); |
} |
} |
/** Bubblesort wrapper |
* |
* This is only a wrapper that takes care of memory allocation for storing |
* the slot element for generic bubblesort algorithm. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* |
*/ |
void bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b)) |
{ |
__u8 buf_slot[EBUFSIZE]; |
void * slot = buf_slot; |
if (e_size > EBUFSIZE) { |
slot = (void *) malloc(e_size, 0); |
} |
_bubblesort(data, n, e_size, cmp, slot); |
if (e_size > EBUFSIZE) { |
free(slot); |
} |
} |
/** Bubblesort |
* |
* Apply generic bubblesort algorithm on supplied data, using pre-allocated buffer. |
* |
* @param data Pointer to data to be sorted. |
* @param n Number of elements to be sorted. |
* @param e_size Size of one element. |
* @param cmp Comparator function. |
* @param slot Pointer to scratch memory buffer e_size bytes long. |
* |
*/ |
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot) |
{ |
bool done = false; |
void * p; |
while (!done) { |
done = true; |
for (p = data; p < data + e_size * (n - 1); p = p + e_size) { |
if (cmp(p, p + e_size) == 1) { |
memcpy(slot, p, e_size); |
memcpy(p, p + e_size, e_size); |
memcpy(p + e_size, slot, e_size); |
done = false; |
} |
} |
} |
} |
/* |
* Comparator returns 1 if a > b, 0 if a == b, -1 if a < b |
*/ |
int int_cmp(void * a, void * b) |
{ |
return (* (int *) a > * (int*)b) ? 1 : (*(int *)a < * (int *)b) ? -1 : 0; |
} |
int __u8_cmp(void * a, void * b) |
{ |
return (* (__u8 *) a > * (__u8 *)b) ? 1 : (*(__u8 *)a < * (__u8 *)b) ? -1 : 0; |
} |
int __u16_cmp(void * a, void * b) |
{ |
return (* (__u16 *) a > * (__u16 *)b) ? 1 : (*(__u16 *)a < * (__u16 *)b) ? -1 : 0; |
} |
int __u32_cmp(void * a, void * b) |
{ |
return (* (__u32 *) a > * (__u32 *)b) ? 1 : (*(__u32 *)a < * (__u32 *)b) ? -1 : 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/adt/btree.c |
---|
0,0 → 1,1003 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericadt |
* @{ |
*/ |
/** |
* @file |
* @brief B+tree implementation. |
* |
* This file implements B+tree type and operations. |
* |
* The B+tree has the following properties: |
* @li it is a ballanced 3-4-5 tree (i.e. BTREE_M = 5) |
* @li values (i.e. pointers to values) are stored only in leaves |
* @li leaves are linked in a list |
* |
* Be carefull when using these trees. They need to allocate |
* and deallocate memory for their index nodes and as such |
* can sleep. |
*/ |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <mm/slab.h> |
#include <debug.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <print.h> |
static void btree_destroy_subtree(btree_node_t *root); |
static void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node); |
static void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node); |
static void node_initialize(btree_node_t *node); |
static void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree); |
static void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key); |
static void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key); |
static btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median); |
static btree_node_t *node_combine(btree_node_t *node); |
static index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right); |
static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx); |
static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx); |
static bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree); |
static bool try_rotation_from_left(btree_node_t *rnode); |
static bool try_rotation_from_right(btree_node_t *lnode); |
#define ROOT_NODE(n) (!(n)->parent) |
#define INDEX_NODE(n) ((n)->subtree[0] != NULL) |
#define LEAF_NODE(n) ((n)->subtree[0] == NULL) |
#define FILL_FACTOR ((BTREE_M-1)/2) |
#define MEDIAN_LOW_INDEX(n) (((n)->keys-1)/2) |
#define MEDIAN_HIGH_INDEX(n) ((n)->keys/2) |
#define MEDIAN_LOW(n) ((n)->key[MEDIAN_LOW_INDEX((n))]); |
#define MEDIAN_HIGH(n) ((n)->key[MEDIAN_HIGH_INDEX((n))]); |
static slab_cache_t *btree_node_slab; |
/** Initialize B-trees. */ |
void btree_init(void) |
{ |
btree_node_slab = slab_cache_create("btree_node_slab", sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); |
} |
/** Create empty B-tree. |
* |
* @param t B-tree. |
*/ |
void btree_create(btree_t *t) |
{ |
list_initialize(&t->leaf_head); |
t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node_initialize(t->root); |
list_append(&t->root->leaf_link, &t->leaf_head); |
} |
/** Destroy empty B-tree. */ |
void btree_destroy(btree_t *t) |
{ |
btree_destroy_subtree(t->root); |
} |
/** Insert key-value pair into B-tree. |
* |
* @param t B-tree. |
* @param key Key to be inserted. |
* @param value Value to be inserted. |
* @param leaf_node Leaf node where the insertion should begin. |
*/ |
void btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *leaf_node) |
{ |
btree_node_t *lnode; |
ASSERT(value); |
lnode = leaf_node; |
if (!lnode) { |
if (btree_search(t, key, &lnode)) { |
panic("B-tree %p already contains key %d\n", t, key); |
} |
} |
_btree_insert(t, key, value, NULL, lnode); |
} |
/** Destroy subtree rooted in a node. |
* |
* @param root Root of the subtree. |
*/ |
void btree_destroy_subtree(btree_node_t *root) |
{ |
int i; |
if (root->keys) { |
for (i = 0; i < root->keys + 1; i++) { |
if (root->subtree[i]) |
btree_destroy_subtree(root->subtree[i]); |
} |
} |
slab_free(btree_node_slab, root); |
} |
/** Recursively insert into B-tree. |
* |
* @param t B-tree. |
* @param key Key to be inserted. |
* @param value Value to be inserted. |
* @param rsubtree Right subtree of the inserted key. |
* @param node Start inserting into this node. |
*/ |
void _btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *rsubtree, btree_node_t *node) |
{ |
if (node->keys < BTREE_MAX_KEYS) { |
/* |
* Node conatins enough space, the key can be stored immediately. |
*/ |
node_insert_key_and_rsubtree(node, key, value, rsubtree); |
} else if (try_insert_by_rotation_to_left(node, key, value, rsubtree)) { |
/* |
* The key-value-rsubtree triplet has been inserted because |
* some keys could have been moved to the left sibling. |
*/ |
} else if (try_insert_by_rotation_to_right(node, key, value, rsubtree)) { |
/* |
* The key-value-rsubtree triplet has been inserted because |
* some keys could have been moved to the right sibling. |
*/ |
} else { |
btree_node_t *rnode; |
btree_key_t median; |
/* |
* Node is full and both siblings (if both exist) are full too. |
* Split the node and insert the smallest key from the node containing |
* bigger keys (i.e. the new node) into its parent. |
*/ |
rnode = node_split(node, key, value, rsubtree, &median); |
if (LEAF_NODE(node)) { |
list_prepend(&rnode->leaf_link, &node->leaf_link); |
} |
if (ROOT_NODE(node)) { |
/* |
* We split the root node. Create new root. |
*/ |
t->root = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node->parent = t->root; |
rnode->parent = t->root; |
node_initialize(t->root); |
/* |
* Left-hand side subtree will be the old root (i.e. node). |
* Right-hand side subtree will be rnode. |
*/ |
t->root->subtree[0] = node; |
t->root->depth = node->depth + 1; |
} |
_btree_insert(t, median, NULL, rnode, node->parent); |
} |
} |
/** Remove B-tree node. |
* |
* @param t B-tree. |
* @param key Key to be removed from the B-tree along with its associated value. |
* @param leaf_node If not NULL, pointer to the leaf node where the key is found. |
*/ |
void btree_remove(btree_t *t, btree_key_t key, btree_node_t *leaf_node) |
{ |
btree_node_t *lnode; |
lnode = leaf_node; |
if (!lnode) { |
if (!btree_search(t, key, &lnode)) { |
panic("B-tree %p does not contain key %d\n", t, key); |
} |
} |
_btree_remove(t, key, lnode); |
} |
/** Recursively remove B-tree node. |
* |
* @param t B-tree. |
* @param key Key to be removed from the B-tree along with its associated value. |
* @param node Node where the key being removed resides. |
*/ |
void _btree_remove(btree_t *t, btree_key_t key, btree_node_t *node) |
{ |
if (ROOT_NODE(node)) { |
if (node->keys == 1 && node->subtree[0]) { |
/* |
* Free the current root and set new root. |
*/ |
t->root = node->subtree[0]; |
t->root->parent = NULL; |
slab_free(btree_node_slab, node); |
} else { |
/* |
* Remove the key from the root node. |
* Note that the right subtree is removed because when |
* combining two nodes, the left-side sibling is preserved |
* and the right-side sibling is freed. |
*/ |
node_remove_key_and_rsubtree(node, key); |
} |
return; |
} |
if (node->keys <= FILL_FACTOR) { |
/* |
* If the node is below the fill factor, |
* try to borrow keys from left or right sibling. |
*/ |
if (!try_rotation_from_left(node)) |
try_rotation_from_right(node); |
} |
if (node->keys > FILL_FACTOR) { |
int i; |
/* |
* The key can be immediatelly removed. |
* |
* Note that the right subtree is removed because when |
* combining two nodes, the left-side sibling is preserved |
* and the right-side sibling is freed. |
*/ |
node_remove_key_and_rsubtree(node, key); |
for (i = 0; i < node->parent->keys; i++) { |
if (node->parent->key[i] == key) |
node->parent->key[i] = node->key[0]; |
} |
} else { |
index_t idx; |
btree_node_t *rnode, *parent; |
/* |
* The node is below the fill factor as well as its left and right sibling. |
* Resort to combining the node with one of its siblings. |
* The node which is on the left is preserved and the node on the right is |
* freed. |
*/ |
parent = node->parent; |
node_remove_key_and_rsubtree(node, key); |
rnode = node_combine(node); |
if (LEAF_NODE(rnode)) |
list_remove(&rnode->leaf_link); |
idx = find_key_by_subtree(parent, rnode, true); |
ASSERT((int) idx != -1); |
slab_free(btree_node_slab, rnode); |
_btree_remove(t, parent->key[idx], parent); |
} |
} |
/** Search key in a B-tree. |
* |
* @param t B-tree. |
* @param key Key to be searched. |
* @param leaf_node Address where to put pointer to visited leaf node. |
* |
* @return Pointer to value or NULL if there is no such key. |
*/ |
void *btree_search(btree_t *t, btree_key_t key, btree_node_t **leaf_node) |
{ |
btree_node_t *cur, *next; |
/* |
* Iteratively descend to the leaf that can contain the searched key. |
*/ |
for (cur = t->root; cur; cur = next) { |
/* Last iteration will set this with proper leaf node address. */ |
*leaf_node = cur; |
/* |
* The key can be in the leftmost subtree. |
* Test it separately. |
*/ |
if (key < cur->key[0]) { |
next = cur->subtree[0]; |
continue; |
} else { |
void *val; |
int i; |
/* |
* Now if the key is smaller than cur->key[i] |
* it can only mean that the value is in cur->subtree[i] |
* or it is not in the tree at all. |
*/ |
for (i = 1; i < cur->keys; i++) { |
if (key < cur->key[i]) { |
next = cur->subtree[i]; |
val = cur->value[i - 1]; |
if (LEAF_NODE(cur)) |
return key == cur->key[i - 1] ? val : NULL; |
goto descend; |
} |
} |
/* |
* Last possibility is that the key is in the rightmost subtree. |
*/ |
next = cur->subtree[i]; |
val = cur->value[i - 1]; |
if (LEAF_NODE(cur)) |
return key == cur->key[i - 1] ? val : NULL; |
} |
descend: |
; |
} |
/* |
* The key was not found in the *leaf_node and is smaller than any of its keys. |
*/ |
return NULL; |
} |
/** Return pointer to B-tree leaf node's left neighbour. |
* |
* @param t B-tree. |
* @param node Node whose left neighbour will be returned. |
* |
* @return Left neighbour of the node or NULL if the node does not have the left neighbour. |
*/ |
btree_node_t *btree_leaf_node_left_neighbour(btree_t *t, btree_node_t *node) |
{ |
ASSERT(LEAF_NODE(node)); |
if (node->leaf_link.prev != &t->leaf_head) |
return list_get_instance(node->leaf_link.prev, btree_node_t, leaf_link); |
else |
return NULL; |
} |
/** Return pointer to B-tree leaf node's right neighbour. |
* |
* @param t B-tree. |
* @param node Node whose right neighbour will be returned. |
* |
* @return Right neighbour of the node or NULL if the node does not have the right neighbour. |
*/ |
btree_node_t *btree_leaf_node_right_neighbour(btree_t *t, btree_node_t *node) |
{ |
ASSERT(LEAF_NODE(node)); |
if (node->leaf_link.next != &t->leaf_head) |
return list_get_instance(node->leaf_link.next, btree_node_t, leaf_link); |
else |
return NULL; |
} |
/** Initialize B-tree node. |
* |
* @param node B-tree node. |
*/ |
void node_initialize(btree_node_t *node) |
{ |
int i; |
node->keys = 0; |
/* Clean also space for the extra key. */ |
for (i = 0; i < BTREE_MAX_KEYS + 1; i++) { |
node->key[i] = 0; |
node->value[i] = NULL; |
node->subtree[i] = NULL; |
} |
node->subtree[i] = NULL; |
node->parent = NULL; |
link_initialize(&node->leaf_link); |
link_initialize(&node->bfs_link); |
node->depth = 0; |
} |
/** Insert key-value-lsubtree triplet into B-tree node. |
* |
* It is actually possible to have more keys than BTREE_MAX_KEYS. |
* This feature is used during insert by right rotation. |
* |
* @param node B-tree node into wich the new key is to be inserted. |
* @param key The key to be inserted. |
* @param value Pointer to value to be inserted. |
* @param lsubtree Pointer to the left subtree. |
*/ |
void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree) |
{ |
int i; |
for (i = 0; i < node->keys; i++) { |
if (key < node->key[i]) { |
int j; |
for (j = node->keys; j > i; j--) { |
node->key[j] = node->key[j - 1]; |
node->value[j] = node->value[j - 1]; |
node->subtree[j + 1] = node->subtree[j]; |
} |
node->subtree[j + 1] = node->subtree[j]; |
break; |
} |
} |
node->key[i] = key; |
node->value[i] = value; |
node->subtree[i] = lsubtree; |
node->keys++; |
} |
/** Insert key-value-rsubtree triplet into B-tree node. |
* |
* It is actually possible to have more keys than BTREE_MAX_KEYS. |
* This feature is used during splitting the node when the |
* number of keys is BTREE_MAX_KEYS + 1. Insert by left rotation |
* also makes use of this feature. |
* |
* @param node B-tree node into wich the new key is to be inserted. |
* @param key The key to be inserted. |
* @param value Pointer to value to be inserted. |
* @param rsubtree Pointer to the right subtree. |
*/ |
void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree) |
{ |
int i; |
for (i = 0; i < node->keys; i++) { |
if (key < node->key[i]) { |
int j; |
for (j = node->keys; j > i; j--) { |
node->key[j] = node->key[j - 1]; |
node->value[j] = node->value[j - 1]; |
node->subtree[j + 1] = node->subtree[j]; |
} |
break; |
} |
} |
node->key[i] = key; |
node->value[i] = value; |
node->subtree[i + 1] = rsubtree; |
node->keys++; |
} |
/** Remove key and its left subtree pointer from B-tree node. |
* |
* Remove the key and eliminate gaps in node->key array. |
* Note that the value pointer and the left subtree pointer |
* is removed from the node as well. |
* |
* @param node B-tree node. |
* @param key Key to be removed. |
*/ |
void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key) |
{ |
int i, j; |
for (i = 0; i < node->keys; i++) { |
if (key == node->key[i]) { |
for (j = i + 1; j < node->keys; j++) { |
node->key[j - 1] = node->key[j]; |
node->value[j - 1] = node->value[j]; |
node->subtree[j - 1] = node->subtree[j]; |
} |
node->subtree[j - 1] = node->subtree[j]; |
node->keys--; |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
} |
/** Remove key and its right subtree pointer from B-tree node. |
* |
* Remove the key and eliminate gaps in node->key array. |
* Note that the value pointer and the right subtree pointer |
* is removed from the node as well. |
* |
* @param node B-tree node. |
* @param key Key to be removed. |
*/ |
void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key) |
{ |
int i, j; |
for (i = 0; i < node->keys; i++) { |
if (key == node->key[i]) { |
for (j = i + 1; j < node->keys; j++) { |
node->key[j - 1] = node->key[j]; |
node->value[j - 1] = node->value[j]; |
node->subtree[j] = node->subtree[j + 1]; |
} |
node->keys--; |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
} |
/** Split full B-tree node and insert new key-value-right-subtree triplet. |
* |
* This function will split a node and return pointer to a newly created |
* node containing keys greater than or equal to the greater of medians |
* (or median) of the old keys and the newly added key. It will also write |
* the median key to a memory address supplied by the caller. |
* |
* If the node being split is an index node, the median will not be |
* included in the new node. If the node is a leaf node, |
* the median will be copied there. |
* |
* @param node B-tree node wich is going to be split. |
* @param key The key to be inserted. |
* @param value Pointer to the value to be inserted. |
* @param rsubtree Pointer to the right subtree of the key being added. |
* @param median Address in memory, where the median key will be stored. |
* |
* @return Newly created right sibling of node. |
*/ |
btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median) |
{ |
btree_node_t *rnode; |
int i, j; |
ASSERT(median); |
ASSERT(node->keys == BTREE_MAX_KEYS); |
/* |
* Use the extra space to store the extra node. |
*/ |
node_insert_key_and_rsubtree(node, key, value, rsubtree); |
/* |
* Compute median of keys. |
*/ |
*median = MEDIAN_HIGH(node); |
/* |
* Allocate and initialize new right sibling. |
*/ |
rnode = (btree_node_t *) slab_alloc(btree_node_slab, 0); |
node_initialize(rnode); |
rnode->parent = node->parent; |
rnode->depth = node->depth; |
/* |
* Copy big keys, values and subtree pointers to the new right sibling. |
* If this is an index node, do not copy the median. |
*/ |
i = (int) INDEX_NODE(node); |
for (i += MEDIAN_HIGH_INDEX(node), j = 0; i < node->keys; i++, j++) { |
rnode->key[j] = node->key[i]; |
rnode->value[j] = node->value[i]; |
rnode->subtree[j] = node->subtree[i]; |
/* |
* Fix parent links in subtrees. |
*/ |
if (rnode->subtree[j]) |
rnode->subtree[j]->parent = rnode; |
} |
rnode->subtree[j] = node->subtree[i]; |
if (rnode->subtree[j]) |
rnode->subtree[j]->parent = rnode; |
rnode->keys = j; /* Set number of keys of the new node. */ |
node->keys /= 2; /* Shrink the old node. */ |
return rnode; |
} |
/** Combine node with any of its siblings. |
* |
* The siblings are required to be below the fill factor. |
* |
* @param node Node to combine with one of its siblings. |
* |
* @return Pointer to the rightmost of the two nodes. |
*/ |
btree_node_t *node_combine(btree_node_t *node) |
{ |
index_t idx; |
btree_node_t *rnode; |
int i; |
ASSERT(!ROOT_NODE(node)); |
idx = find_key_by_subtree(node->parent, node, false); |
if (idx == node->parent->keys) { |
/* |
* Rightmost subtree of its parent, combine with the left sibling. |
*/ |
idx--; |
rnode = node; |
node = node->parent->subtree[idx]; |
} else { |
rnode = node->parent->subtree[idx + 1]; |
} |
/* Index nodes need to insert parent node key in between left and right node. */ |
if (INDEX_NODE(node)) |
node->key[node->keys++] = node->parent->key[idx]; |
/* Copy the key-value-subtree triplets from the right node. */ |
for (i = 0; i < rnode->keys; i++) { |
node->key[node->keys + i] = rnode->key[i]; |
node->value[node->keys + i] = rnode->value[i]; |
if (INDEX_NODE(node)) { |
node->subtree[node->keys + i] = rnode->subtree[i]; |
rnode->subtree[i]->parent = node; |
} |
} |
if (INDEX_NODE(node)) { |
node->subtree[node->keys + i] = rnode->subtree[i]; |
rnode->subtree[i]->parent = node; |
} |
node->keys += rnode->keys; |
return rnode; |
} |
/** Find key by its left or right subtree. |
* |
* @param node B-tree node. |
* @param subtree Left or right subtree of a key found in node. |
* @param right If true, subtree is a right subtree. If false, subtree is a left subtree. |
* |
* @return Index of the key associated with the subtree. |
*/ |
index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right) |
{ |
int i; |
for (i = 0; i < node->keys + 1; i++) { |
if (subtree == node->subtree[i]) |
return i - (int) (right != false); |
} |
panic("node %p does not contain subtree %p\n", node, subtree); |
} |
/** Rotate one key-value-rsubtree triplet from the left sibling to the right sibling. |
* |
* The biggest key and its value and right subtree is rotated from the left node |
* to the right. If the node is an index node, than the parent node key belonging to |
* the left node takes part in the rotation. |
* |
* @param lnode Left sibling. |
* @param rnode Right sibling. |
* @param idx Index of the parent node key that is taking part in the rotation. |
*/ |
void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx) |
{ |
btree_key_t key; |
key = lnode->key[lnode->keys - 1]; |
if (LEAF_NODE(lnode)) { |
void *value; |
value = lnode->value[lnode->keys - 1]; |
node_remove_key_and_rsubtree(lnode, key); |
node_insert_key_and_lsubtree(rnode, key, value, NULL); |
lnode->parent->key[idx] = key; |
} else { |
btree_node_t *rsubtree; |
rsubtree = lnode->subtree[lnode->keys]; |
node_remove_key_and_rsubtree(lnode, key); |
node_insert_key_and_lsubtree(rnode, lnode->parent->key[idx], NULL, rsubtree); |
lnode->parent->key[idx] = key; |
/* Fix parent link of the reconnected right subtree. */ |
rsubtree->parent = rnode; |
} |
} |
/** Rotate one key-value-lsubtree triplet from the right sibling to the left sibling. |
* |
* The smallest key and its value and left subtree is rotated from the right node |
* to the left. If the node is an index node, than the parent node key belonging to |
* the right node takes part in the rotation. |
* |
* @param lnode Left sibling. |
* @param rnode Right sibling. |
* @param idx Index of the parent node key that is taking part in the rotation. |
*/ |
void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx) |
{ |
btree_key_t key; |
key = rnode->key[0]; |
if (LEAF_NODE(rnode)) { |
void *value; |
value = rnode->value[0]; |
node_remove_key_and_lsubtree(rnode, key); |
node_insert_key_and_rsubtree(lnode, key, value, NULL); |
rnode->parent->key[idx] = rnode->key[0]; |
} else { |
btree_node_t *lsubtree; |
lsubtree = rnode->subtree[0]; |
node_remove_key_and_lsubtree(rnode, key); |
node_insert_key_and_rsubtree(lnode, rnode->parent->key[idx], NULL, lsubtree); |
rnode->parent->key[idx] = key; |
/* Fix parent link of the reconnected left subtree. */ |
lsubtree->parent = lnode; |
} |
} |
/** Insert key-value-rsubtree triplet and rotate the node to the left, if this operation can be done. |
* |
* Left sibling of the node (if it exists) is checked for free space. |
* If there is free space, the key is inserted and the smallest key of |
* the node is moved there. The index node which is the parent of both |
* nodes is fixed. |
* |
* @param node B-tree node. |
* @param inskey Key to be inserted. |
* @param insvalue Value to be inserted. |
* @param rsubtree Right subtree of inskey. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) |
{ |
index_t idx; |
btree_node_t *lnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(node)) |
return false; |
idx = find_key_by_subtree(node->parent, node, true); |
if ((int) idx == -1) { |
/* |
* If this node is the leftmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
lnode = node->parent->subtree[idx]; |
if (lnode->keys < BTREE_MAX_KEYS) { |
/* |
* The rotaion can be done. The left sibling has free space. |
*/ |
node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); |
rotate_from_right(lnode, node, idx); |
return true; |
} |
return false; |
} |
/** Insert key-value-rsubtree triplet and rotate the node to the right, if this operation can be done. |
* |
* Right sibling of the node (if it exists) is checked for free space. |
* If there is free space, the key is inserted and the biggest key of |
* the node is moved there. The index node which is the parent of both |
* nodes is fixed. |
* |
* @param node B-tree node. |
* @param inskey Key to be inserted. |
* @param insvalue Value to be inserted. |
* @param rsubtree Right subtree of inskey. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree) |
{ |
index_t idx; |
btree_node_t *rnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(node)) |
return false; |
idx = find_key_by_subtree(node->parent, node, false); |
if (idx == node->parent->keys) { |
/* |
* If this node is the rightmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
rnode = node->parent->subtree[idx + 1]; |
if (rnode->keys < BTREE_MAX_KEYS) { |
/* |
* The rotaion can be done. The right sibling has free space. |
*/ |
node_insert_key_and_rsubtree(node, inskey, insvalue, rsubtree); |
rotate_from_left(node, rnode, idx); |
return true; |
} |
return false; |
} |
/** Rotate in a key from the left sibling or from the index node, if this operation can be done. |
* |
* @param rnode Node into which to add key from its left sibling or from the index node. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_rotation_from_left(btree_node_t *rnode) |
{ |
index_t idx; |
btree_node_t *lnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(rnode)) |
return false; |
idx = find_key_by_subtree(rnode->parent, rnode, true); |
if ((int) idx == -1) { |
/* |
* If this node is the leftmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
lnode = rnode->parent->subtree[idx]; |
if (lnode->keys > FILL_FACTOR) { |
rotate_from_left(lnode, rnode, idx); |
return true; |
} |
return false; |
} |
/** Rotate in a key from the right sibling or from the index node, if this operation can be done. |
* |
* @param lnode Node into which to add key from its right sibling or from the index node. |
* |
* @return True if the rotation was performed, false otherwise. |
*/ |
bool try_rotation_from_right(btree_node_t *lnode) |
{ |
index_t idx; |
btree_node_t *rnode; |
/* |
* If this is root node, the rotation can not be done. |
*/ |
if (ROOT_NODE(lnode)) |
return false; |
idx = find_key_by_subtree(lnode->parent, lnode, false); |
if (idx == lnode->parent->keys) { |
/* |
* If this node is the rightmost subtree of its parent, |
* the rotation can not be done. |
*/ |
return false; |
} |
rnode = lnode->parent->subtree[idx + 1]; |
if (rnode->keys > FILL_FACTOR) { |
rotate_from_right(lnode, rnode, idx); |
return true; |
} |
return false; |
} |
/** Print B-tree. |
* |
* @param t Print out B-tree. |
*/ |
void btree_print(btree_t *t) |
{ |
int i, depth = t->root->depth; |
link_t head, *cur; |
printf("Printing B-tree:\n"); |
list_initialize(&head); |
list_append(&t->root->bfs_link, &head); |
/* |
* Use BFS search to print out the tree. |
* Levels are distinguished from one another by node->depth. |
*/ |
while (!list_empty(&head)) { |
link_t *hlp; |
btree_node_t *node; |
hlp = head.next; |
ASSERT(hlp != &head); |
node = list_get_instance(hlp, btree_node_t, bfs_link); |
list_remove(hlp); |
ASSERT(node); |
if (node->depth != depth) { |
printf("\n"); |
depth = node->depth; |
} |
printf("("); |
for (i = 0; i < node->keys; i++) { |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
} |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
printf(")"); |
} |
printf("\n"); |
printf("Printing list of leaves:\n"); |
for (cur = t->leaf_head.next; cur != &t->leaf_head; cur = cur->next) { |
btree_node_t *node; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
ASSERT(node); |
printf("("); |
for (i = 0; i < node->keys; i++) |
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf(")"); |
} |
printf("\n"); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/adt/hash_table.c |
---|
0,0 → 1,178 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericadt |
* @{ |
*/ |
/** |
* @file |
* @brief Implementation of generic chained hash table. |
* |
* This file contains implementation of generic chained hash table. |
*/ |
#include <adt/hash_table.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <debug.h> |
#include <mm/slab.h> |
#include <memstr.h> |
/** Create chained hash table. |
* |
* @param h Hash table structure. Will be initialized by this call. |
* @param m Number of slots in the hash table. |
* @param max_keys Maximal number of keys needed to identify an item. |
* @param op Hash table operations structure. |
*/ |
void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op) |
{ |
int i; |
ASSERT(h); |
ASSERT(op && op->hash && op->compare); |
ASSERT(max_keys > 0); |
h->entry = malloc(m * sizeof(link_t), 0); |
if (!h->entry) { |
panic("cannot allocate memory for hash table\n"); |
} |
memsetb((__address) h->entry, m * sizeof(link_t), 0); |
for (i = 0; i < m; i++) |
list_initialize(&h->entry[i]); |
h->entries = m; |
h->max_keys = max_keys; |
h->op = op; |
} |
/** Insert item into hash table. |
* |
* @param h Hash table. |
* @param key Array of all keys necessary to compute hash index. |
* @param item Item to be inserted into the hash table. |
*/ |
void hash_table_insert(hash_table_t *h, __native key[], link_t *item) |
{ |
index_t chain; |
ASSERT(item); |
ASSERT(h && h->op && h->op->hash && h->op->compare); |
chain = h->op->hash(key); |
ASSERT(chain < h->entries); |
list_append(item, &h->entry[chain]); |
} |
/** Search hash table for an item matching keys. |
* |
* @param h Hash table. |
* @param key Array of all keys needed to compute hash index. |
* |
* @return Matching item on success, NULL if there is no such item. |
*/ |
link_t *hash_table_find(hash_table_t *h, __native key[]) |
{ |
link_t *cur; |
index_t chain; |
ASSERT(h && h->op && h->op->hash && h->op->compare); |
chain = h->op->hash(key); |
ASSERT(chain < h->entries); |
for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { |
if (h->op->compare(key, h->max_keys, cur)) { |
/* |
* The entry is there. |
*/ |
return cur; |
} |
} |
return NULL; |
} |
/** Remove all matching items from hash table. |
* |
* For each removed item, h->remove_callback() is called. |
* |
* @param h Hash table. |
* @param key Array of keys that will be compared against items of the hash table. |
* @param keys Number of keys in the key array. |
*/ |
void hash_table_remove(hash_table_t *h, __native key[], count_t keys) |
{ |
index_t chain; |
link_t *cur; |
ASSERT(h && h->op && h->op->hash && h->op->compare && h->op->remove_callback); |
ASSERT(keys <= h->max_keys); |
if (keys == h->max_keys) { |
/* |
* All keys are known, hash_table_find() can be used to find the entry. |
*/ |
cur = hash_table_find(h, key); |
if (cur) { |
list_remove(cur); |
h->op->remove_callback(cur); |
} |
return; |
} |
/* |
* Fewer keys were passed. |
* Any partially matching entries are to be removed. |
*/ |
for (chain = 0; chain < h->entries; chain++) { |
for (cur = h->entry[chain].next; cur != &h->entry[chain]; cur = cur->next) { |
if (h->op->compare(key, keys, cur)) { |
link_t *hlp; |
hlp = cur; |
cur = cur->prev; |
list_remove(hlp); |
h->op->remove_callback(hlp); |
continue; |
} |
} |
} |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/adt/bitmap.c |
---|
0,0 → 1,190 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericadt |
* @{ |
*/ |
/** |
* @file |
* @brief Implementation of bitmap ADT. |
* |
* This file implements bitmap ADT and provides functions for |
* setting and clearing ranges of bits. |
*/ |
#include <adt/bitmap.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <align.h> |
#include <debug.h> |
#include <macros.h> |
#define ALL_ONES 0xff |
#define ALL_ZEROES 0x00 |
/** Initialize bitmap. |
* |
* No portion of the bitmap is set or cleared by this function. |
* |
* @param bitmap Bitmap structure. |
* @param map Address of the memory used to hold the map. |
* @param bits Number of bits stored in bitmap. |
*/ |
void bitmap_initialize(bitmap_t *bitmap, __u8 *map, count_t bits) |
{ |
bitmap->map = map; |
bitmap->bits = bits; |
} |
/** Set range of bits. |
* |
* @param bitmap Bitmap structure. |
* @param start Starting bit. |
* @param bits Number of bits to set. |
*/ |
void bitmap_set_range(bitmap_t *bitmap, index_t start, count_t bits) |
{ |
index_t i=0; |
index_t aligned_start; |
count_t lub; /* leading unaligned bits */ |
count_t amb; /* aligned middle bits */ |
count_t tab; /* trailing aligned bits */ |
ASSERT(start + bits <= bitmap->bits); |
aligned_start = ALIGN_UP(start, 8); |
lub = min(aligned_start - start, bits); |
amb = bits > lub ? bits - lub : 0; |
tab = amb % 8; |
if ( start + bits < aligned_start ) { |
/* |
* Set bits in the middle of byte |
*/ |
bitmap->map[start / 8] |= ((1 << lub)-1) << (start&7); |
return; |
} |
if (lub) { |
/* |
* Make sure to set any leading unaligned bits. |
*/ |
bitmap->map[start / 8] |= ~((1 << (8 - lub)) - 1); |
} |
for (i = 0; i < amb / 8; i++) { |
/* |
* The middle bits can be set byte by byte. |
*/ |
bitmap->map[aligned_start / 8 + i] = ALL_ONES; |
} |
if (tab) { |
/* |
* Make sure to set any trailing aligned bits. |
*/ |
bitmap->map[aligned_start / 8 + i] |= (1 << tab) - 1; |
} |
} |
/** Clear range of bits. |
* |
* @param bitmap Bitmap structure. |
* @param start Starting bit. |
* @param bits Number of bits to clear. |
*/ |
void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits) |
{ |
index_t i=0; |
index_t aligned_start; |
count_t lub; /* leading unaligned bits */ |
count_t amb; /* aligned middle bits */ |
count_t tab; /* trailing aligned bits */ |
ASSERT(start + bits <= bitmap->bits); |
aligned_start = ALIGN_UP(start, 8); |
lub = min(aligned_start - start, bits); |
amb = bits > lub ? bits - lub : 0; |
tab = amb % 8; |
if ( start + bits < aligned_start ) |
{ |
/* |
* Set bits in the middle of byte |
*/ |
bitmap->map[start / 8] &= ~(((1 << lub)-1) << (start&7)); |
return; |
} |
if (lub) { |
/* |
* Make sure to clear any leading unaligned bits. |
*/ |
bitmap->map[start / 8] &= (1 << (8 - lub)) - 1; |
} |
for (i = 0; i < amb / 8; i++) { |
/* |
* The middle bits can be cleared byte by byte. |
*/ |
bitmap->map[aligned_start / 8 + i] = ALL_ZEROES; |
} |
if (tab) { |
/* |
* Make sure to clear any trailing aligned bits. |
*/ |
bitmap->map[aligned_start / 8 + i] &= ~((1 << tab) - 1); |
} |
} |
/** Copy portion of one bitmap into another bitmap. |
* |
* @param dst Destination bitmap. |
* @param src Source bitmap. |
* @param bits Number of bits to copy. |
*/ |
void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits) |
{ |
index_t i; |
ASSERT(bits <= dst->bits); |
ASSERT(bits <= src->bits); |
for (i = 0; i < bits / 8; i++) |
dst->map[i] = src->map[i]; |
if (bits % 8) { |
bitmap_clear_range(dst, i * 8, bits % 8); |
dst->map[i] |= src->map[i] & ((1 << (bits % 8)) - 1); |
} |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/adt/list.c |
---|
0,0 → 1,95 |
/* |
* Copyright (C) 2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericadt |
* @{ |
*/ |
/** |
* @file |
* @brief Functions completing doubly linked circular list implementaion. |
* |
* This file contains some of the functions implementing doubly linked circular lists. |
* However, this ADT is mostly implemented in @ref list.h. |
*/ |
#include <adt/list.h> |
/** Check for membership |
* |
* Check whether link is contained in the list head. |
* The membership is defined as pointer equivalence. |
* |
* @param link Item to look for. |
* @param head List to look in. |
* |
* @return true if link is contained in head, false otherwise. |
* |
*/ |
bool list_member(const link_t *link, const link_t *head) |
{ |
bool found = false; |
link_t *hlp = head->next; |
while (hlp != head) { |
if (hlp == link) { |
found = true; |
break; |
} |
hlp = hlp->next; |
} |
return found; |
} |
/** Concatenate two lists |
* |
* Concatenate lists head1 and head2, producing a single |
* list head1 containing items from both (in head1, head2 |
* order) and empty list head2. |
* |
* @param head1 First list and concatenated output |
* @param head2 Second list and empty output. |
* |
*/ |
void list_concat(link_t *head1, link_t *head2) |
{ |
if (list_empty(head2)) |
return; |
head2->next->prev = head1->prev; |
head2->prev->next = head1; |
head1->prev->next = head2->next; |
head1->prev = head2->prev; |
list_initialize(head2); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/syscall/copy.c |
---|
0,0 → 1,133 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Copying between kernel and userspace. |
* |
* This file contains sanitized functions for copying data |
* between kernel and userspace. |
*/ |
#include <syscall/copy.h> |
#include <proc/thread.h> |
#include <mm/as.h> |
#include <macros.h> |
#include <arch.h> |
#include <errno.h> |
#include <typedefs.h> |
/** Copy data from userspace to kernel. |
* |
* Provisions are made to return value even after page fault. |
* |
* This function can be called only from syscall. |
* |
* @param dst Destination kernel address. |
* @param uspace_src Source userspace address. |
* @param size Size of the data to be copied. |
* |
* @return 0 on success or error code from @ref errno.h. |
*/ |
int copy_from_uspace(void *dst, const void *uspace_src, size_t size) |
{ |
ipl_t ipl; |
int rc; |
ASSERT(THREAD); |
ASSERT(!THREAD->in_copy_from_uspace); |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
if (overlaps((__address) uspace_src, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START)) { |
/* |
* The userspace source block conflicts with kernel address space. |
*/ |
return EPERM; |
} |
} |
ipl = interrupts_disable(); |
THREAD->in_copy_from_uspace = true; |
rc = memcpy_from_uspace(dst, uspace_src, size); |
THREAD->in_copy_from_uspace = false; |
interrupts_restore(ipl); |
return !rc ? EPERM : 0; |
} |
/** Copy data from kernel to userspace. |
* |
* Provisions are made to return value even after page fault. |
* |
* This function can be called only from syscall. |
* |
* @param uspace_dst Destination userspace address. |
* @param src Source kernel address. |
* @param size Size of the data to be copied. |
* |
* @return 0 on success or error code from @ref errno.h. |
*/ |
int copy_to_uspace(void *uspace_dst, const void *src, size_t size) |
{ |
ipl_t ipl; |
int rc; |
ASSERT(THREAD); |
ASSERT(!THREAD->in_copy_from_uspace); |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
if (overlaps((__address) uspace_dst, size, |
KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START)) { |
/* |
* The userspace destination block conflicts with kernel address space. |
*/ |
return EPERM; |
} |
} |
ipl = interrupts_disable(); |
THREAD->in_copy_from_uspace = true; |
rc = memcpy_to_uspace(uspace_dst, src, size); |
THREAD->in_copy_from_uspace = false; |
interrupts_restore(ipl); |
return !rc ? EPERM : 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/syscall/syscall.c |
---|
0,0 → 1,164 |
/* |
* Copyright (C) 2005 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Syscall table and syscall wrappers. |
*/ |
#include <syscall/syscall.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/as.h> |
#include <print.h> |
#include <putchar.h> |
#include <errno.h> |
#include <arch.h> |
#include <debug.h> |
#include <ipc/sysipc.h> |
#include <synch/futex.h> |
#include <ddi/ddi.h> |
#include <security/cap.h> |
#include <syscall/copy.h> |
#include <sysinfo/sysinfo.h> |
#include <console/console.h> |
#include <console/klog.h> |
/** Print using kernel facility |
* |
* Some simulators can print only through kernel. Userspace can use |
* this syscall to facilitate it. |
*/ |
static __native sys_io(int fd, const void * buf, size_t count) |
{ |
size_t i; |
char *data; |
int rc; |
if (count > PAGE_SIZE) |
return ELIMIT; |
data = malloc(count, 0); |
if (!data) |
return ENOMEM; |
rc = copy_from_uspace(data, buf, count); |
if (rc) { |
free(data); |
return rc; |
} |
for (i = 0; i < count; i++) |
putchar(data[i]); |
free(data); |
return count; |
} |
/** Tell kernel to get keyboard/console access again */ |
static __native sys_debug_enable_console(void) |
{ |
arch_grab_console(); |
return 0; |
} |
/** Dispatch system call */ |
__native syscall_handler(__native a1, __native a2, __native a3, |
__native a4, __native id) |
{ |
__native rc; |
if (id < SYSCALL_END) |
rc = syscall_table[id](a1,a2,a3,a4); |
else { |
klog_printf("TASK %lld: Unknown syscall id %d",TASK->taskid,id); |
task_kill(TASK->taskid); |
thread_exit(); |
} |
if (THREAD->interrupted) |
thread_exit(); |
return rc; |
} |
syshandler_t syscall_table[SYSCALL_END] = { |
sys_io, |
sys_tls_set, |
/* Thread and task related syscalls. */ |
sys_thread_create, |
sys_thread_exit, |
sys_task_get_id, |
/* Synchronization related syscalls. */ |
sys_futex_sleep_timeout, |
sys_futex_wakeup, |
/* Address space related syscalls. */ |
sys_as_area_create, |
sys_as_area_resize, |
sys_as_area_destroy, |
/* IPC related syscalls. */ |
sys_ipc_call_sync_fast, |
sys_ipc_call_sync, |
sys_ipc_call_async_fast, |
sys_ipc_call_async, |
sys_ipc_answer_fast, |
sys_ipc_answer, |
sys_ipc_forward_fast, |
sys_ipc_wait_for_call, |
sys_ipc_hangup, |
sys_ipc_register_irq, |
sys_ipc_unregister_irq, |
/* Capabilities related syscalls. */ |
sys_cap_grant, |
sys_cap_revoke, |
/* DDI related syscalls. */ |
sys_physmem_map, |
sys_iospace_enable, |
sys_preempt_control, |
/* Sysinfo syscalls */ |
sys_sysinfo_valid, |
sys_sysinfo_value, |
/* Debug calls */ |
sys_debug_enable_console |
}; |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/condvar.c |
---|
0,0 → 1,109 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Condition variables. |
*/ |
#include <synch/condvar.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <arch.h> |
#include <typedefs.h> |
/** Initialize condition variable. |
* |
* @param cv Condition variable. |
*/ |
void condvar_initialize(condvar_t *cv) |
{ |
waitq_initialize(&cv->wq); |
} |
/** |
* Signal the condition has become true |
* to the first waiting thread by waking it up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_signal(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_FIRST); |
} |
/** |
* Signal the condition has become true |
* to all waiting threads by waking them up. |
* |
* @param cv Condition variable. |
*/ |
void condvar_broadcast(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_ALL); |
} |
/** Wait for the condition becoming true. |
* |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of meaning of possible combinations |
* of usec and flags, see comment for waitq_sleep_timeout(). |
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, |
* ESYNCH_WOULD_BLOCK is always returned. |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
ipl = waitq_sleep_prepare(&cv->wq); |
mutex_unlock(mtx); |
cv->wq.missed_wakeups = 0; /* Enforce blocking. */ |
rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); |
mutex_lock(mtx); |
waitq_sleep_finish(&cv->wq, rc, ipl); |
return rc; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/rwlock.c |
---|
0,0 → 1,389 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Reader/Writer locks. |
* |
* A reader/writer lock can be held by multiple readers at a time. |
* Or it can be exclusively held by a sole writer at a time. |
* |
* These locks are not recursive. |
* Because technique called direct hand-off is used, neither readers |
* nor writers will suffer starvation. |
* |
* If there is a writer followed by a reader waiting for the rwlock |
* and the writer times out, all leading readers are automatically woken up |
* and allowed in. |
*/ |
/* |
* NOTE ON rwlock_holder_type |
* This field is set on an attempt to acquire the exclusive mutex |
* to the respective value depending whether the caller is a reader |
* or a writer. The field is examined only if the thread had been |
* previously blocked on the exclusive mutex. Thus it is save |
* to store the rwlock type in the thread structure, because |
* each thread can block on only one rwlock at a time. |
*/ |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <adt/list.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <proc/thread.h> |
#include <panic.h> |
#define ALLOW_ALL 0 |
#define ALLOW_READERS_ONLY 1 |
static void let_others_in(rwlock_t *rwl, int readers_only); |
static void release_spinlock(void *arg); |
/** Initialize reader/writer lock |
* |
* Initialize reader/writer lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock, "rwlock_t"); |
mutex_initialize(&rwl->exclusive); |
rwl->readers_in = 0; |
} |
/** Acquire reader/writer lock for reading |
* |
* Acquire reader/writer lock for reading. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
/* |
* Writers take the easy part. |
* They just need to acquire the exclusive mutex. |
*/ |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
if (SYNCH_FAILED(rc)) { |
/* |
* Lock operation timed out or was interrupted. |
* The state of rwl is UNKNOWN at this point. |
* No claims about its holder can be made. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
/* |
* Now when rwl is locked, we can inspect it again. |
* If it is held by some readers already, we can let |
* readers from the head of the wait queue in. |
*/ |
if (rwl->readers_in) |
let_others_in(rwl, ALLOW_READERS_ONLY); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
return rc; |
} |
/** Acquire reader/writer lock for writing |
* |
* Acquire reader/writer lock for writing. |
* Timeout and willingness to block may be specified. |
* |
* @param rwl Reader/Writer lock. |
* @param usec Timeout in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) |
{ |
int rc; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&rwl->lock); |
/* |
* Find out whether we can get what we want without blocking. |
*/ |
rc = mutex_trylock(&rwl->exclusive); |
if (SYNCH_FAILED(rc)) { |
/* |
* 'exclusive' mutex is being held by someone else. |
* If the holder is a reader and there is no one |
* else waiting for it, we can enter the critical |
* section. |
*/ |
if (rwl->readers_in) { |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (list_empty(&rwl->exclusive.sem.wq.head)) { |
/* |
* We can enter. |
*/ |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
goto shortcut; |
} |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/* |
* In order to prevent a race condition when a reader |
* could block another reader at the head of the waitq, |
* we register a function to unlock rwl->lock |
* after this thread is put asleep. |
*/ |
#ifdef CONFIG_SMP |
thread_register_call_me(release_spinlock, &rwl->lock); |
#else |
thread_register_call_me(release_spinlock, NULL); |
#endif |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
/* |
* release_spinlock() wasn't called |
*/ |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
case ESYNCH_INTERRUPTED: |
/* |
* The sleep timed out. |
* We just restore interrupt priority level. |
*/ |
case ESYNCH_OK_BLOCKED: |
/* |
* We were woken with rwl->readers_in already incremented. |
* Note that this arrangement avoids race condition between |
* two concurrent readers. (Race is avoided if 'exclusive' is |
* locked at the same time as 'readers_in' is incremented. |
* Same time means both events happen atomically when |
* rwl->lock is held.) |
*/ |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
break; |
default: |
panic("invalid ESYNCH\n"); |
break; |
} |
return rc; |
} |
shortcut: |
/* |
* We can increment readers_in only if we didn't go to sleep. |
* For sleepers, rwlock_let_others_in() will do the job. |
*/ |
rwl->readers_in++; |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
/** Release reader/writer lock held by writer |
* |
* Release reader/writer lock held by writer. |
* Handoff reader/writer lock ownership directly |
* to waiting readers or a writer. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_write_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Release reader/writer lock held by reader |
* |
* Release reader/writer lock held by reader. |
* Handoff reader/writer lock ownership directly |
* to a waiting writer or don't do anything if more |
* readers poses the lock. |
* |
* @param rwl Reader/Writer lock. |
*/ |
void rwlock_read_unlock(rwlock_t *rwl) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
if (!--rwl->readers_in) |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
interrupts_restore(ipl); |
} |
/** Direct handoff of reader/writer lock ownership. |
* |
* Direct handoff of reader/writer lock ownership |
* to waiting readers or a writer. |
* |
* Must be called with rwl->lock locked. |
* Must be called with interrupts_disable()'d. |
* |
* @param rwl Reader/Writer lock. |
* @param readers_only See the description below. |
* |
* If readers_only is false: (unlock scenario) |
* Let the first sleeper on 'exclusive' mutex in, no matter |
* whether it is a reader or a writer. If there are more leading |
* readers in line, let each of them in. |
* |
* Otherwise: (timeout scenario) |
* Let all leading readers in. |
*/ |
void let_others_in(rwlock_t *rwl, int readers_only) |
{ |
rwlock_type_t type = RWLOCK_NONE; |
thread_t *t = NULL; |
bool one_more = true; |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (!list_empty(&rwl->exclusive.sem.wq.head)) |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
do { |
if (t) { |
spinlock_lock(&t->lock); |
type = t->rwlock_holder_type; |
spinlock_unlock(&t->lock); |
} |
/* |
* If readers_only is true, we wake all leading readers |
* if and only if rwl is locked by another reader. |
* Assumption: readers_only ==> rwl->readers_in |
*/ |
if (readers_only && (type != RWLOCK_READER)) |
break; |
if (type == RWLOCK_READER) { |
/* |
* Waking up a reader. |
* We are responsible for incrementing rwl->readers_in for it. |
*/ |
rwl->readers_in++; |
} |
/* |
* Only the last iteration through this loop can increment |
* rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
* iterations will wake up a thread. |
*/ |
/* We call the internal version of waitq_wakeup, which |
* relies on the fact that the waitq is already locked. |
*/ |
_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
t = NULL; |
if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
if (t) { |
spinlock_lock(&t->lock); |
if (t->rwlock_holder_type != RWLOCK_READER) |
one_more = false; |
spinlock_unlock(&t->lock); |
} |
} |
} while ((type == RWLOCK_READER) && t && one_more); |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/** Release spinlock callback |
* |
* This is a callback function invoked from the scheduler. |
* The callback is registered in _rwlock_read_lock_timeout(). |
* |
* @param arg Spinlock. |
*/ |
void release_spinlock(void *arg) |
{ |
spinlock_unlock((spinlock_t *) arg); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/mutex.c |
---|
0,0 → 1,86 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Mutexes. |
*/ |
#include <synch/mutex.h> |
#include <synch/semaphore.h> |
#include <synch/synch.h> |
/** Initialize mutex |
* |
* Initialize mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_initialize(mutex_t *mtx) |
{ |
semaphore_initialize(&mtx->sem, 1); |
} |
/** Acquire mutex |
* |
* Acquire mutex. |
* Timeout mode and non-blocking mode can be requested. |
* |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, flags); |
} |
/** Release mutex |
* |
* Release mutex. |
* |
* @param mtx Mutex. |
*/ |
void mutex_unlock(mutex_t *mtx) |
{ |
semaphore_up(&mtx->sem); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/spinlock.c |
---|
0,0 → 1,163 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Spinlocks. |
*/ |
#include <synch/spinlock.h> |
#include <atomic.h> |
#include <arch/barrier.h> |
#include <arch.h> |
#include <preemption.h> |
#include <print.h> |
#include <debug.h> |
#include <symtab.h> |
#ifdef CONFIG_FB |
#include <genarch/fb/fb.h> |
#endif |
#ifdef CONFIG_SMP |
/** Initialize spinlock |
* |
* Initialize spinlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
void spinlock_initialize(spinlock_t *sl, char *name) |
{ |
atomic_set(&sl->val, 0); |
#ifdef CONFIG_DEBUG_SPINLOCK |
sl->name = name; |
#endif |
} |
/** Lock spinlock |
* |
* Lock spinlock. |
* This version has limitted ability to report |
* possible occurence of deadlock. |
* |
* @param sl Pointer to spinlock_t structure. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define DEADLOCK_THRESHOLD 100000000 |
void spinlock_lock_debug(spinlock_t *sl) |
{ |
count_t i = 0; |
char *symbol; |
bool deadlock_reported = false; |
preemption_disable(); |
while (test_and_set(&sl->val)) { |
/* |
* We need to be careful about printflock and fb_lock. |
* Both of them are used to report deadlocks via |
* printf() and fb_putchar(). |
* |
* We trust our code that there is no possible deadlock |
* caused by these two locks (except when an exception |
* is triggered for instance by printf() or fb_putchar()). |
* However, we encountered false positives caused by very |
* slow VESA framebuffer interaction (especially when |
* run in a simulator) that caused problems with both |
* printflock and fb_lock. |
* |
* Possible deadlocks on both printflock and fb_lock |
* are therefore not reported as they would cause an |
* infinite recursion. |
*/ |
if (sl == &printflock) |
continue; |
#ifdef CONFIG_FB |
if (sl == &fb_lock) |
continue; |
#endif |
if (i++ > DEADLOCK_THRESHOLD) { |
printf("cpu%d: looping on spinlock %.*p:%s, caller=%.*p", |
CPU->id, sizeof(__address) * 2, sl, sl->name, sizeof(__address) * 2, CALLER); |
symbol = get_symtab_entry(CALLER); |
if (symbol) |
printf("(%s)", symbol); |
printf("\n"); |
i = 0; |
deadlock_reported = true; |
} |
} |
if (deadlock_reported) |
printf("cpu%d: not deadlocked\n", CPU->id); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
} |
#endif |
/** Lock spinlock conditionally |
* |
* Lock spinlock conditionally. |
* If the spinlock is not available at the moment, |
* signal failure. |
* |
* @param sl Pointer to spinlock_t structure. |
* |
* @return Zero on failure, non-zero otherwise. |
*/ |
int spinlock_trylock(spinlock_t *sl) |
{ |
int rc; |
preemption_disable(); |
rc = !test_and_set(&sl->val); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
CS_ENTER_BARRIER(); |
if (!rc) |
preemption_enable(); |
return rc; |
} |
#endif |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/semaphore.c |
---|
0,0 → 1,99 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Semaphores. |
*/ |
#include <synch/semaphore.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <arch/asm.h> |
#include <arch.h> |
/** Initialize semaphore |
* |
* Initialize semaphore. |
* |
* @param s Semaphore. |
* @param val Maximal number of threads allowed to enter critical section. |
*/ |
void semaphore_initialize(semaphore_t *s, int val) |
{ |
ipl_t ipl; |
waitq_initialize(&s->wq); |
ipl = interrupts_disable(); |
spinlock_lock(&s->wq.lock); |
s->wq.missed_wakeups = val; |
spinlock_unlock(&s->wq.lock); |
interrupts_restore(ipl); |
} |
/** Semaphore down |
* |
* Semaphore down. |
* Conditional mode and mode with timeout can be requested. |
* |
* @param s Semaphore. |
* @param usec Timeout in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags) |
{ |
return waitq_sleep_timeout(&s->wq, usec, flags); |
} |
/** Semaphore up |
* |
* Semaphore up. |
* |
* @param s Semaphore. |
*/ |
void semaphore_up(semaphore_t *s) |
{ |
waitq_wakeup(&s->wq, WAKEUP_FIRST); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/waitq.c |
---|
0,0 → 1,455 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Wait queue. |
* |
* Wait queue is the basic synchronization primitive upon which all |
* other synchronization primitives build. |
* |
* It allows threads to wait for an event in first-come, first-served |
* fashion. Conditional operation as well as timeouts and interruptions |
* are supported. |
*/ |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <proc/thread.h> |
#include <proc/scheduler.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <time/timeout.h> |
#include <arch.h> |
#include <context.h> |
#include <adt/list.h> |
static void waitq_timeouted_sleep(void *data); |
/** Initialize wait queue |
* |
* Initialize wait queue. |
* |
* @param wq Pointer to wait queue to be initialized. |
*/ |
void waitq_initialize(waitq_t *wq) |
{ |
spinlock_initialize(&wq->lock, "waitq_lock"); |
list_initialize(&wq->head); |
wq->missed_wakeups = 0; |
} |
/** Handle timeout during waitq_sleep_timeout() call |
* |
* This routine is called when waitq_sleep_timeout() timeouts. |
* Interrupts are disabled. |
* |
* It is supposed to try to remove 'its' thread from the wait queue; |
* it can eventually fail to achieve this goal when these two events |
* overlap. In that case it behaves just as though there was no |
* timeout at all. |
* |
* @param data Pointer to the thread that called waitq_sleep_timeout(). |
*/ |
void waitq_timeouted_sleep(void *data) |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
bool do_wakeup = false; |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_timeout_context; |
do_wakeup = true; |
t->sleep_queue = NULL; |
spinlock_unlock(&wq->lock); |
} |
t->timeout_pending = false; |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
} |
/** Interrupt sleeping thread. |
* |
* This routine attempts to interrupt a thread from its sleep in a waitqueue. |
* If the thread is not found sleeping, no action is taken. |
* |
* @param t Thread to be interrupted. |
*/ |
void waitq_interrupt_sleep(thread_t *t) |
{ |
waitq_t *wq; |
bool do_wakeup = false; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
if (!thread_exists(t)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if ((wq = t->sleep_queue)) { /* assignment */ |
if (!(t->sleep_interruptible)) { |
/* |
* The sleep cannot be interrupted. |
*/ |
spinlock_unlock(&t->lock); |
goto out; |
} |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = false; |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_interruption_context; |
do_wakeup = true; |
t->sleep_queue = NULL; |
spinlock_unlock(&wq->lock); |
} |
spinlock_unlock(&t->lock); |
if (do_wakeup) |
thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
interrupts_restore(ipl); |
} |
/** Sleep until either wakeup, timeout or interruption occurs |
* |
* This is a sleep implementation which allows itself to time out or to be |
* interrupted from the sleep, restoring a failover context. |
* |
* Sleepers are organised in a FIFO fashion in a structure called wait queue. |
* |
* This function is really basic in that other functions as waitq_sleep() |
* and all the *_timeout() functions use it. |
* |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of the sleep. |
* |
* The sleep can be interrupted only if the |
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
* If usec is greater than zero, regardless of the value of the |
* SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, |
* interruption or wakeup comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call |
* will not return until wakeup or interruption comes. |
* |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will |
* immediately return, reporting either success or failure. |
* |
* @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time |
* of the call there was no pending wakeup. |
* |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* |
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
* |
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
* a pending wakeup at the time of the call. The caller was not put |
* asleep at all. |
* |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
*/ |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) |
{ |
ipl_t ipl; |
int rc; |
ipl = waitq_sleep_prepare(wq); |
rc = waitq_sleep_timeout_unsafe(wq, usec, flags); |
waitq_sleep_finish(wq, rc, ipl); |
return rc; |
} |
/** Prepare to sleep in a waitq. |
* |
* This function will return holding the lock of the wait queue |
* and interrupts disabled. |
* |
* @param wq Wait queue. |
* |
* @return Interrupt level as it existed on entry to this function. |
*/ |
ipl_t waitq_sleep_prepare(waitq_t *wq) |
{ |
ipl_t ipl; |
restart: |
ipl = interrupts_disable(); |
if (THREAD) { /* needed during system initiailzation */ |
/* |
* Busy waiting for a delayed timeout. |
* This is an important fix for the race condition between |
* a delayed timeout and a next call to waitq_sleep_timeout(). |
* Simply, the thread is not allowed to go to sleep if |
* there are timeouts in progress. |
*/ |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(ipl); |
goto restart; |
} |
spinlock_unlock(&THREAD->lock); |
} |
spinlock_lock(&wq->lock); |
return ipl; |
} |
/** Finish waiting in a wait queue. |
* |
* This function restores interrupts to the state that existed prior |
* to the call to waitq_sleep_prepare(). If necessary, the wait queue |
* lock is released. |
* |
* @param wq Wait queue. |
* @param rc Return code of waitq_sleep_timeout_unsafe(). |
* @param ipl Interrupt level returned by waitq_sleep_prepare(). |
*/ |
void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
{ |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
case ESYNCH_OK_ATOMIC: |
spinlock_unlock(&wq->lock); |
break; |
default: |
break; |
} |
interrupts_restore(ipl); |
} |
/** Internal implementation of waitq_sleep_timeout(). |
* |
* This function implements logic of sleeping in a wait queue. |
* This call must be preceeded by a call to waitq_sleep_prepare() |
* and followed by a call to waitq_slee_finish(). |
* |
* @param wq See waitq_sleep_timeout(). |
* @param usec See waitq_sleep_timeout(). |
* @param flags See waitq_sleep_timeout(). |
* |
* @return See waitq_sleep_timeout(). |
*/ |
int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) |
{ |
/* checks whether to go to sleep at all */ |
if (wq->missed_wakeups) { |
wq->missed_wakeups--; |
return ESYNCH_OK_ATOMIC; |
} |
else { |
if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
return ESYNCH_WOULD_BLOCK; |
} |
} |
/* |
* Now we are firmly decided to go to sleep. |
*/ |
spinlock_lock(&THREAD->lock); |
if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { |
/* |
* If the thread was already interrupted, |
* don't go to sleep at all. |
*/ |
if (THREAD->interrupted) { |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&wq->lock); |
return ESYNCH_INTERRUPTED; |
} |
/* |
* Set context that will be restored if the sleep |
* of this thread is ever interrupted. |
*/ |
THREAD->sleep_interruptible = true; |
if (!context_save(&THREAD->sleep_interruption_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_INTERRUPTED; |
} |
} else { |
THREAD->sleep_interruptible = false; |
} |
if (usec) { |
/* We use the timeout variant. */ |
if (!context_save(&THREAD->sleep_timeout_context)) { |
/* Short emulation of scheduler() return code. */ |
spinlock_unlock(&THREAD->lock); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
/* |
* Suspend execution. |
*/ |
THREAD->state = Sleeping; |
THREAD->sleep_queue = wq; |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
return ESYNCH_OK_BLOCKED; |
} |
/** Wake up first thread sleeping in a wait queue |
* |
* Wake up first thread sleeping in a wait queue. |
* This is the SMP- and IRQ-safe wrapper meant for |
* general use. |
* |
* Besides its 'normal' wakeup operation, it attempts |
* to unregister possible timeout. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void waitq_wakeup(waitq_t *wq, bool all) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
spinlock_unlock(&wq->lock); |
interrupts_restore(ipl); |
} |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
* |
* This is the internal SMP- and IRQ-unsafe version |
* of waitq_wakeup(). It assumes wq->lock is already |
* locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param all If this is non-zero, all sleeping threads |
* will be woken up and missed count will be zeroed. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, bool all) |
{ |
thread_t *t; |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) |
wq->missed_wakeups = 0; |
return; |
} |
t = list_get_instance(wq->head.next, thread_t, wq_link); |
/* |
* Lock the thread prior to removing it from the wq. |
* This is not necessary because of mutual exclusion |
* (the link belongs to the wait queue), but because |
* of synchronization with waitq_timeouted_sleep() |
* and waitq_interrupt_sleep(). |
* |
* In order for these two functions to work, the following |
* invariant must hold: |
* |
* t->sleep_queue != NULL <=> t sleeps in a wait queue |
* |
* For an observer who locks the thread, the invariant |
* holds only when the lock is held prior to removing |
* it from the wait queue. |
*/ |
spinlock_lock(&t->lock); |
list_remove(&t->wq_link); |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = false; |
t->sleep_queue = NULL; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
if (all) |
goto loop; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/synch/futex.c |
---|
0,0 → 1,340 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Kernel backend for futexes. |
*/ |
#include <synch/futex.h> |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/synch.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/slab.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/page_ht.h> |
#include <adt/hash_table.h> |
#include <adt/list.h> |
#include <arch.h> |
#include <align.h> |
#include <panic.h> |
#include <errno.h> |
#include <print.h> |
#define FUTEX_HT_SIZE 1024 /* keep it a power of 2 */ |
static void futex_initialize(futex_t *futex); |
static futex_t *futex_find(__address paddr); |
static index_t futex_ht_hash(__native *key); |
static bool futex_ht_compare(__native *key, count_t keys, link_t *item); |
static void futex_ht_remove_callback(link_t *item); |
/** |
* Read-write lock protecting global futex hash table. |
* It is also used to serialize access to all futex_t structures. |
* Must be acquired before the task futex B+tree lock. |
*/ |
static rwlock_t futex_ht_lock; |
/** Futex hash table. */ |
static hash_table_t futex_ht; |
/** Futex hash table operations. */ |
static hash_table_operations_t futex_ht_ops = { |
.hash = futex_ht_hash, |
.compare = futex_ht_compare, |
.remove_callback = futex_ht_remove_callback |
}; |
/** Initialize futex subsystem. */ |
void futex_init(void) |
{ |
rwlock_initialize(&futex_ht_lock); |
hash_table_create(&futex_ht, FUTEX_HT_SIZE, 1, &futex_ht_ops); |
} |
/** Initialize kernel futex structure. |
* |
* @param futex Kernel futex structure. |
*/ |
void futex_initialize(futex_t *futex) |
{ |
waitq_initialize(&futex->wq); |
link_initialize(&futex->ht_link); |
futex->paddr = 0; |
futex->refcount = 1; |
} |
/** Sleep in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* @param usec If non-zero, number of microseconds this thread is willing to sleep. |
* @param flags Select mode of operation. |
* |
* @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h. |
* If there is no physical mapping for uaddr ENOENT is returned. |
*/ |
__native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags) |
{ |
futex_t *futex; |
__address paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); |
} |
/** Wakeup one thread waiting in futex wait queue. |
* |
* @param uaddr Userspace address of the futex counter. |
* |
* @return ENOENT if there is no physical mapping for uaddr. |
*/ |
__native sys_futex_wakeup(__address uaddr) |
{ |
futex_t *futex; |
__address paddr; |
pte_t *t; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/* |
* Find physical address of futex counter. |
*/ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); |
if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); |
page_table_unlock(AS, true); |
interrupts_restore(ipl); |
futex = futex_find(paddr); |
waitq_wakeup(&futex->wq, WAKEUP_FIRST); |
return 0; |
} |
/** Find kernel address of the futex structure corresponding to paddr. |
* |
* If the structure does not exist already, a new one is created. |
* |
* @param paddr Physical address of the userspace futex counter. |
* |
* @return Address of the kernel futex structure. |
*/ |
futex_t *futex_find(__address paddr) |
{ |
link_t *item; |
futex_t *futex; |
btree_node_t *leaf; |
/* |
* Find the respective futex structure |
* or allocate new one if it does not exist already. |
*/ |
rwlock_read_lock(&futex_ht_lock); |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
/* |
* See if the current task knows this futex. |
*/ |
mutex_lock(&TASK->futexes_lock); |
if (!btree_search(&TASK->futexes, paddr, &leaf)) { |
/* |
* The futex is new to the current task. |
* However, we only have read access. |
* Gain write access and try again. |
*/ |
mutex_unlock(&TASK->futexes_lock); |
goto gain_write_access; |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_read_unlock(&futex_ht_lock); |
} else { |
gain_write_access: |
/* |
* Upgrade to writer is not currently supported, |
* therefore, it is necessary to release the read lock |
* and reacquire it as a writer. |
*/ |
rwlock_read_unlock(&futex_ht_lock); |
rwlock_write_lock(&futex_ht_lock); |
/* |
* Avoid possible race condition by searching |
* the hash table once again with write access. |
*/ |
item = hash_table_find(&futex_ht, &paddr); |
if (item) { |
futex = hash_table_get_instance(item, futex_t, ht_link); |
/* |
* See if this futex is known to the current task. |
*/ |
mutex_lock(&TASK->futexes_lock); |
if (!btree_search(&TASK->futexes, paddr, &leaf)) { |
/* |
* The futex is new to the current task. |
* Upgrade its reference count and put it to the |
* current task's B+tree of known futexes. |
*/ |
futex->refcount++; |
btree_insert(&TASK->futexes, paddr, futex, leaf); |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} else { |
futex = (futex_t *) malloc(sizeof(futex_t), 0); |
futex_initialize(futex); |
futex->paddr = paddr; |
hash_table_insert(&futex_ht, &paddr, &futex->ht_link); |
/* |
* This is the first task referencing the futex. |
* It can be directly inserted into its |
* B+tree of known futexes. |
*/ |
mutex_lock(&TASK->futexes_lock); |
btree_insert(&TASK->futexes, paddr, futex, NULL); |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} |
} |
return futex; |
} |
/** Compute hash index into futex hash table. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is stored. |
* |
* @return Index into futex hash table. |
*/ |
index_t futex_ht_hash(__native *key) |
{ |
return *key & (FUTEX_HT_SIZE-1); |
} |
/** Compare futex hash table item with a key. |
* |
* @param key Address where the key (i.e. physical address of futex counter) is stored. |
* |
* @return True if the item matches the key. False otherwise. |
*/ |
bool futex_ht_compare(__native *key, count_t keys, link_t *item) |
{ |
futex_t *futex; |
ASSERT(keys == 1); |
futex = hash_table_get_instance(item, futex_t, ht_link); |
return *key == futex->paddr; |
} |
/** Callback for removal items from futex hash table. |
* |
* @param item Item removed from the hash table. |
*/ |
void futex_ht_remove_callback(link_t *item) |
{ |
futex_t *futex; |
futex = hash_table_get_instance(item, futex_t, ht_link); |
free(futex); |
} |
/** Remove references from futexes known to the current task. */ |
void futex_cleanup(void) |
{ |
link_t *cur; |
rwlock_write_lock(&futex_ht_lock); |
mutex_lock(&TASK->futexes_lock); |
for (cur = TASK->futexes.leaf_head.next; cur != &TASK->futexes.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
futex_t *ftx; |
__address paddr = node->key[i]; |
ftx = (futex_t *) node->value[i]; |
if (--ftx->refcount == 0) |
hash_table_remove(&futex_ht, &paddr, 1); |
} |
} |
mutex_unlock(&TASK->futexes_lock); |
rwlock_write_unlock(&futex_ht_lock); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/debug/symtab.c |
---|
0,0 → 1,202 |
/* |
* Copyright (C) 2005 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericdebug |
* @{ |
*/ |
/** |
* @file |
* @brief Kernel symbol resolver. |
*/ |
#include <symtab.h> |
#include <typedefs.h> |
#include <arch/byteorder.h> |
#include <func.h> |
#include <print.h> |
/** Return entry that seems most likely to correspond to argument. |
* |
* Return entry that seems most likely to correspond |
* to address passed in the argument. |
* |
* @param addr Address. |
* |
* @return Pointer to respective symbol string on success, NULL otherwise. |
*/ |
char * get_symtab_entry(__native addr) |
{ |
count_t i; |
for (i=1;symbol_table[i].address_le;++i) { |
if (addr < __u64_le2host(symbol_table[i].address_le)) |
break; |
} |
if (addr >= __u64_le2host(symbol_table[i-1].address_le)) |
return symbol_table[i-1].symbol_name; |
return NULL; |
} |
/** Find symbols that match the parameter forward and print them. |
* |
* @param name - search string |
* @param startpos - starting position, changes to found position |
* @return Pointer to the part of string that should be completed or NULL |
*/ |
static char * symtab_search_one(const char *name, int *startpos) |
{ |
int namelen = strlen(name); |
char *curname; |
int i,j; |
int colonoffset = -1; |
for (i=0;name[i];i++) |
if (name[i] == ':') { |
colonoffset = i; |
break; |
} |
for (i=*startpos;symbol_table[i].address_le;++i) { |
/* Find a ':' in name */ |
curname = symbol_table[i].symbol_name; |
for (j=0; curname[j] && curname[j] != ':'; j++) |
; |
if (!curname[j]) |
continue; |
j -= colonoffset; |
curname += j; |
if (strlen(curname) < namelen) |
continue; |
if (strncmp(curname, name, namelen) == 0) { |
*startpos = i; |
return curname+namelen; |
} |
} |
return NULL; |
} |
/** Return address that corresponds to the entry |
* |
* Search symbol table, and if there is one match, return it |
* |
* @param name Name of the symbol |
* @return 0 - Not found, -1 - Duplicate symbol, other - address of symbol |
*/ |
__address get_symbol_addr(const char *name) |
{ |
count_t found = 0; |
__address addr = NULL; |
char *hint; |
int i; |
i = 0; |
while ((hint=symtab_search_one(name, &i))) { |
if (!strlen(hint)) { |
addr = __u64_le2host(symbol_table[i].address_le); |
found++; |
} |
i++; |
} |
if (found > 1) |
return ((__address) -1); |
return addr; |
} |
/** Find symbols that match parameter and prints them */ |
void symtab_print_search(const char *name) |
{ |
int i; |
__address addr; |
char *realname; |
i = 0; |
while (symtab_search_one(name, &i)) { |
addr = __u64_le2host(symbol_table[i].address_le); |
realname = symbol_table[i].symbol_name; |
printf("%.*p: %s\n", sizeof(__address) * 2, addr, realname); |
i++; |
} |
} |
/** Symtab completion |
* |
* @param input - Search string, completes to symbol name |
* @returns - 0 - nothing found, 1 - success, >1 print duplicates |
*/ |
int symtab_compl(char *input) |
{ |
char output[MAX_SYMBOL_NAME+1]; |
int startpos = 0; |
char *foundtxt; |
int found = 0; |
int i; |
char *name = input; |
/* Allow completion of pointers */ |
if (name[0] == '*' || name[0] == '&') |
name++; |
/* Do not print everything */ |
if (!strlen(name)) |
return 0; |
output[0] = '\0'; |
while ((foundtxt = symtab_search_one(name, &startpos))) { |
startpos++; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
else { |
for (i=0; output[i] && foundtxt[i] && output[i]==foundtxt[i]; i++) |
; |
output[i] = '\0'; |
} |
found++; |
} |
if (!found) |
return 0; |
if (found > 1 && !strlen(output)) { |
printf("\n"); |
startpos = 0; |
while ((foundtxt = symtab_search_one(name, &startpos))) { |
printf("%s\n", symbol_table[startpos].symbol_name); |
startpos++; |
} |
} |
strncpy(input, output, MAX_SYMBOL_NAME); |
return found; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/interrupt/interrupt.c |
---|
0,0 → 1,155 |
/* |
* Copyright (C) 2005 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericinterrupt |
* @{ |
*/ |
/** |
* @file |
* @brief Interrupt redirector. |
* |
* This file provides means of registering interrupt handlers |
* by kernel functions and calling the handlers when interrupts |
* occur. |
*/ |
#include <interrupt.h> |
#include <debug.h> |
#include <console/kconsole.h> |
#include <console/console.h> |
#include <console/chardev.h> |
#include <console/cmd.h> |
#include <panic.h> |
#include <print.h> |
#include <symtab.h> |
static struct { |
const char *name; |
iroutine f; |
} exc_table[IVT_ITEMS]; |
SPINLOCK_INITIALIZE(exctbl_lock); |
/** Register exception handler |
* |
* @param n Exception number |
* @param name Description |
* @param f Exception handler |
*/ |
iroutine exc_register(int n, const char *name, iroutine f) |
{ |
ASSERT(n < IVT_ITEMS); |
iroutine old; |
spinlock_lock(&exctbl_lock); |
old = exc_table[n].f; |
exc_table[n].f = f; |
exc_table[n].name = name; |
spinlock_unlock(&exctbl_lock); |
return old; |
} |
/** Dispatch exception according to exception table |
* |
* Called directly from the assembler code. |
* CPU is interrupts_disable()'d. |
*/ |
void exc_dispatch(int n, istate_t *istate) |
{ |
ASSERT(n < IVT_ITEMS); |
exc_table[n].f(n + IVT_FIRST, istate); |
/* This is a safe place to exit exiting thread */ |
if (THREAD && THREAD->interrupted && istate_from_uspace(istate)) |
thread_exit(); |
} |
/** Default 'null' exception handler */ |
static void exc_undef(int n, istate_t *istate) |
{ |
fault_if_from_uspace(istate, "Unhandled exception %d.", n); |
panic("Unhandled exception %d.", n); |
} |
/** kconsole cmd - print all exceptions */ |
static int exc_print_cmd(cmd_arg_t *argv) |
{ |
int i; |
char *symbol; |
spinlock_lock(&exctbl_lock); |
printf("Exc Description Handler\n"); |
for (i=0; i < IVT_ITEMS; i++) { |
symbol = get_symtab_entry((__native)exc_table[i].f); |
if (!symbol) |
symbol = "not found"; |
printf("%d %s %.*p(%s)\n", i + IVT_FIRST, exc_table[i].name, |
sizeof(__address) * 2, exc_table[i].f,symbol); |
if (!((i+1) % 20)) { |
printf("Press any key to continue."); |
spinlock_unlock(&exctbl_lock); |
getc(stdin); |
spinlock_lock(&exctbl_lock); |
printf("\n"); |
} |
} |
spinlock_unlock(&exctbl_lock); |
return 1; |
} |
static cmd_info_t exc_info = { |
.name = "exc", |
.description = "Print exception table.", |
.func = exc_print_cmd, |
.help = NULL, |
.argc = 0, |
.argv = NULL |
}; |
/** Initialize generic exception handling support */ |
void exc_init(void) |
{ |
int i; |
for (i=0;i < IVT_ITEMS; i++) |
exc_register(i, "undef", (iroutine) exc_undef); |
cmd_initialize(&exc_info); |
if (!cmd_register(&exc_info)) |
panic("could not register command %s\n", exc_info.name); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/smp/ipi.c |
---|
0,0 → 1,71 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Generic IPI interface. |
*/ |
#ifdef CONFIG_SMP |
#include <smp/ipi.h> |
#include <config.h> |
/** Broadcast IPI message |
* |
* Broadcast IPI message to all CPUs. |
* |
* @param ipi Message to broadcast. |
* |
* @bug The decision whether to actually send the IPI must be based |
* on a different criterion. The current version has |
* problems when some of the detected CPUs are marked |
* disabled in machine configuration. |
*/ |
void ipi_broadcast(int ipi) |
{ |
/* |
* Provisions must be made to avoid sending IPI: |
* - before all CPU's were configured to accept the IPI |
* - if there is only one CPU but the kernel was compiled with CONFIG_SMP |
*/ |
if ((config.cpu_active > 1) && (config.cpu_active == config.cpu_count)) |
ipi_broadcast_arch(ipi); |
} |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/printf.c |
---|
0,0 → 1,54 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
int printf(const char *fmt, ...) |
{ |
int ret; |
va_list args; |
va_start(args, fmt); |
ret = vprintf(fmt, args); |
va_end(args); |
return ret; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/snprintf.c |
---|
0,0 → 1,53 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
#include <printf/printf_core.h> |
int snprintf(char *str, size_t size, const char *fmt, ...) |
{ |
int ret; |
va_list args; |
va_start(args, fmt); |
ret = vsnprintf(str, size, fmt, args); |
va_end(args); |
return ret; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/sprintf.c |
---|
0,0 → 1,52 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
int sprintf(char *str, const char *fmt, ...) |
{ |
int ret; |
va_list args; |
va_start(args, fmt); |
ret = vsprintf(str, fmt, args); |
va_end(args); |
return ret; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/vprintf.c |
---|
0,0 → 1,60 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
#include <printf/printf_core.h> |
#include <putchar.h> |
int vprintf_write(const char *str, size_t count, void *unused); |
int vprintf_write(const char *str, size_t count, void *unused) |
{ |
size_t i = 0; |
for (; i < count; i++) |
putchar(str[i]); |
return i; |
} |
int vprintf(const char *fmt, va_list ap) |
{ |
struct printf_spec ps = {(int(*)(void *, size_t, void *))vprintf_write, NULL}; |
return printf_core(fmt, &ps, ap); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/vsnprintf.c |
---|
0,0 → 1,105 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
#include <printf/printf_core.h> |
#include <memstr.h> |
struct vsnprintf_data { |
size_t size; /* total space for string */ |
size_t len; /* count of currently used characters */ |
char *string; /* destination string */ |
}; |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data); |
/** Write string to given buffer. |
* Write at most data->size characters including trailing zero. According to C99, snprintf() has to return number |
* of characters that would have been written if enough space had been available. Hence the return value is not |
* number of really printed characters but size of the input string. Number of really used characters |
* is stored in data->len. |
* @param str source string to print |
* @param count size of source string |
* @param data structure with destination string, counter of used space and total string size. |
* @return number of characters to print (not characters really printed!) |
*/ |
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data) |
{ |
size_t i; |
i = data->size - data->len; |
if (i == 0) { |
return count; |
} |
if (i == 1) { |
/* We have only one free byte left in buffer => write there trailing zero */ |
data->string[data->size - 1] = 0; |
data->len = data->size; |
return count; |
} |
if (i <= count) { |
/* We have not enought space for whole string with the trailing zero => print only a part of string */ |
memcpy((void *)(data->string + data->len), (void *)str, i - 1); |
data->string[data->size - 1] = 0; |
data->len = data->size; |
return count; |
} |
/* Buffer is big enought to print whole string */ |
memcpy((void *)(data->string + data->len), (void *)str, count); |
data->len += count; |
/* Put trailing zero at end, but not count it into data->len so it could be rewritten next time */ |
data->string[data->len] = 0; |
return count; |
} |
int vsnprintf(char *str, size_t size, const char *fmt, va_list ap) |
{ |
struct vsnprintf_data data = {size, 0, str}; |
struct printf_spec ps = {(int(*)(void *, size_t, void *))vsnprintf_write, &data}; |
/* Print 0 at end of string - fix the case that nothing will be printed */ |
if (size > 0) |
str[0] = 0; |
/* vsnprintf_write ensures that str will be terminated by zero. */ |
return printf_core(fmt, &ps, ap); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/printf_core.c |
---|
0,0 → 1,724 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file |
* @brief Printing functions. |
*/ |
#include <printf/printf_core.h> |
#include <putchar.h> |
#include <print.h> |
#include <synch/spinlock.h> |
#include <arch/arg.h> |
#include <arch/asm.h> |
#include <arch.h> |
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */ |
#define __PRINTF_FLAG_PREFIX 0x00000001 /**< show prefixes 0x or 0*/ |
#define __PRINTF_FLAG_SIGNED 0x00000002 /**< signed / unsigned number */ |
#define __PRINTF_FLAG_ZEROPADDED 0x00000004 /**< print leading zeroes */ |
#define __PRINTF_FLAG_LEFTALIGNED 0x00000010 /**< align to left */ |
#define __PRINTF_FLAG_SHOWPLUS 0x00000020 /**< always show + sign */ |
#define __PRINTF_FLAG_SPACESIGN 0x00000040 /**< print space instead of plus */ |
#define __PRINTF_FLAG_BIGCHARS 0x00000080 /**< show big characters */ |
#define __PRINTF_FLAG_NEGATIVE 0x00000100 /**< number has - sign */ |
#define PRINT_NUMBER_BUFFER_SIZE (64+5) /**< Buffer big enought for 64 bit number |
* printed in base 2, sign, prefix and |
* 0 to terminate string.. (last one is only for better testing |
* end of buffer by zero-filling subroutine)*/ |
/** Enumeration of possible arguments types. |
*/ |
typedef enum { |
PrintfQualifierByte = 0, |
PrintfQualifierShort, |
PrintfQualifierInt, |
PrintfQualifierLong, |
PrintfQualifierLongLong, |
PrintfQualifierNative, |
PrintfQualifierPointer |
} qualifier_t; |
static char digits_small[] = "0123456789abcdef"; /**< Small hexadecimal characters */ |
static char digits_big[] = "0123456789ABCDEF"; /**< Big hexadecimal characters */ |
/** Checks c for a digit. |
* @param c One character. |
* @return nonzero if c is from interval '0 to '9'. |
*/ |
static inline int isdigit(int c) |
{ |
return ((c >= '0' )&&( c <= '9')); |
} |
/** Compute length of given zero terminated string. |
* @param str Pointer to valid string. |
* @return string length without trailing zero. |
*/ |
static __native strlen(const char *str) |
{ |
__native counter = 0; |
while (str[counter] != 0) { |
counter++; |
} |
return counter; |
} |
/** Print count chars from buffer without adding newline |
* @param buf Buffer with size at least count bytes - NULL pointer NOT allowed! |
* @param count |
* @param ps output method and its data |
* @return number of printed characters |
*/ |
static int printf_putnchars(const char * buf, size_t count, struct printf_spec *ps) |
{ |
return ps->write((void *)buf, count, ps->data); |
} |
/** Print string without added newline |
* @param str string to print |
* @param ps write function specification and support data |
* @return number of printed characters |
*/ |
static int printf_putstr(const char * str, struct printf_spec *ps) |
{ |
size_t count; |
if (str == NULL) { |
return printf_putnchars("(NULL)", 6, ps); |
} |
count = strlen(str); |
return ps->write((void *) str, count, ps->data); |
} |
/** Print one character to output |
* @param c one character |
* @param ps output method |
* @return number of printed characters |
*/ |
static int printf_putchar(int c, struct printf_spec *ps) |
{ |
unsigned char ch = c; |
return ps->write((void *) &ch, 1, ps->data); |
} |
/** Print one formatted character |
* @param c character to print |
* @param width |
* @param flags |
* @return number of printed characters, negative value on fail |
*/ |
static int print_char(char c, int width, __u64 flags, struct printf_spec *ps) |
{ |
int counter = 0; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (--width > 0) { /* one space is consumed by character itself hence predecrement */ |
if (printf_putchar(' ', ps) > 0) |
++counter; |
} |
} |
if (printf_putchar(c, ps) > 0) |
counter++; |
while (--width > 0) { /* one space is consumed by character itself hence predecrement */ |
if (printf_putchar(' ', ps) > 0) |
++counter; |
} |
return ++counter; |
} |
/** Print one string |
* @param s string |
* @param width |
* @param precision |
* @param flags |
* @return number of printed characters or negative value on fail |
*/ |
static int print_string(char *s, int width, int precision, __u64 flags, struct printf_spec *ps) |
{ |
int counter = 0; |
size_t size; |
int retval; |
if (s == NULL) { |
return printf_putstr("(NULL)", ps); |
} |
size = strlen(s); |
/* print leading spaces */ |
if (precision == 0) |
precision = size; |
width -= precision; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (width-- > 0) { |
if (printf_putchar(' ', ps) == 1) |
counter++; |
} |
} |
while (precision > size) { |
precision--; |
if (printf_putchar(' ', ps) == 1) |
++counter; |
} |
if ((retval = printf_putnchars(s, precision, ps)) < 0) { |
return -counter; |
} |
counter += retval; |
while (width-- > 0) { |
if (printf_putchar(' ', ps) == 1) |
++counter; |
} |
return counter; |
} |
/** Print number in given base |
* |
* Print significant digits of a number in given |
* base. |
* |
* @param num Number to print. |
* @param width |
* @param precision |
* @param base Base to print the number in (should |
* be in range 2 .. 16). |
* @param flags output modifiers |
* @return number of written characters or EOF |
* |
*/ |
static int print_number(__u64 num, int width, int precision, int base , __u64 flags, struct printf_spec *ps) |
{ |
char *digits = digits_small; |
char d[PRINT_NUMBER_BUFFER_SIZE]; /* this is good enough even for base == 2, prefix and sign */ |
char *ptr = &d[PRINT_NUMBER_BUFFER_SIZE - 1]; |
int size = 0; /* size of number with all prefixes and signs */ |
int number_size; /* size of plain number */ |
char sgn; |
int retval; |
int counter = 0; |
if (flags & __PRINTF_FLAG_BIGCHARS) |
digits = digits_big; |
*ptr-- = 0; /* Put zero at end of string */ |
if (num == 0) { |
*ptr-- = '0'; |
size++; |
} else { |
do { |
*ptr-- = digits[num % base]; |
size++; |
} while (num /= base); |
} |
number_size = size; |
/* Collect sum of all prefixes/signs/... to calculate padding and leading zeroes */ |
if (flags & __PRINTF_FLAG_PREFIX) { |
switch(base) { |
case 2: /* Binary formating is not standard, but usefull */ |
size += 2; |
break; |
case 8: |
size++; |
break; |
case 16: |
size += 2; |
break; |
} |
} |
sgn = 0; |
if (flags & __PRINTF_FLAG_SIGNED) { |
if (flags & __PRINTF_FLAG_NEGATIVE) { |
sgn = '-'; |
size++; |
} else if (flags & __PRINTF_FLAG_SHOWPLUS) { |
sgn = '+'; |
size++; |
} else if (flags & __PRINTF_FLAG_SPACESIGN) { |
sgn = ' '; |
size++; |
} |
} |
if (flags & __PRINTF_FLAG_LEFTALIGNED) { |
flags &= ~__PRINTF_FLAG_ZEROPADDED; |
} |
/* if number is leftaligned or precision is specified then zeropadding is ignored */ |
if (flags & __PRINTF_FLAG_ZEROPADDED) { |
if ((precision == 0) && (width > size)) { |
precision = width - size + number_size; |
} |
} |
/* print leading spaces */ |
if (number_size > precision) /* We must print whole number not only a part */ |
precision = number_size; |
width -= precision + size - number_size; |
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) { |
while (width-- > 0) { |
if (printf_putchar(' ', ps) == 1) |
counter++; |
} |
} |
/* print sign */ |
if (sgn) { |
if (printf_putchar(sgn, ps) == 1) |
counter++; |
} |
/* print prefix */ |
if (flags & __PRINTF_FLAG_PREFIX) { |
switch(base) { |
case 2: /* Binary formating is not standard, but usefull */ |
if (printf_putchar('0', ps) == 1) |
counter++; |
if (flags & __PRINTF_FLAG_BIGCHARS) { |
if (printf_putchar('B', ps) == 1) |
counter++; |
} else { |
if (printf_putchar('b', ps) == 1) |
counter++; |
} |
break; |
case 8: |
if (printf_putchar('o', ps) == 1) |
counter++; |
break; |
case 16: |
if (printf_putchar('0', ps) == 1) |
counter++; |
if (flags & __PRINTF_FLAG_BIGCHARS) { |
if (printf_putchar('X', ps) == 1) |
counter++; |
} else { |
if (printf_putchar('x', ps) == 1) |
counter++; |
} |
break; |
} |
} |
/* print leading zeroes */ |
precision -= number_size; |
while (precision-- > 0) { |
if (printf_putchar('0', ps) == 1) |
counter++; |
} |
/* print number itself */ |
if ((retval = printf_putstr(++ptr, ps)) > 0) { |
counter += retval; |
} |
/* print ending spaces */ |
while (width-- > 0) { |
if (printf_putchar(' ', ps) == 1) |
counter++; |
} |
return counter; |
} |
/** Print formatted string. |
* |
* Print string formatted according to the fmt parameter |
* and variadic arguments. Each formatting directive |
* must have the following form: |
* |
* \% [ FLAGS ] [ WIDTH ] [ .PRECISION ] [ TYPE ] CONVERSION |
* |
* FLAGS:@n |
* - "#" Force to print prefix. |
* For conversion \%o the prefix is 0, for %x and \%X prefixes are 0x and 0X |
* and for conversion \%b the prefix is 0b. |
* |
* - "-" Align to left. |
* |
* - "+" Print positive sign just as negative. |
* |
* - " " If the printed number is positive and "+" flag is not set, print space in |
* place of sign. |
* |
* - "0" Print 0 as padding instead of spaces. Zeroes are placed between sign and the |
* rest of the number. This flag is ignored if "-" flag is specified. |
* |
* WIDTH:@n |
* - Specify minimal width of printed argument. If it is bigger, width is ignored. |
* If width is specified with a "*" character instead of number, width is taken from |
* parameter list. And integer parameter is expected before parameter for processed |
* conversion specification. If this value is negative its absolute value is taken |
* and the "-" flag is set. |
* |
* PRECISION:@n |
* - Value precision. For numbers it specifies minimum valid numbers. |
* Smaller numbers are printed with leading zeroes. Bigger numbers are not affected. |
* Strings with more than precision characters are cut off. |
* Just as with width, an "*" can be used used instead of a number. |
* An integer value is then expected in parameters. When both width and precision |
* are specified using "*", the first parameter is used for width and the second one |
* for precision. |
* |
* TYPE:@n |
* - "hh" Signed or unsigned char.@n |
* - "h" Signed or usigned short.@n |
* - "" Signed or usigned int (default value).@n |
* - "l" Signed or usigned long int.@n |
* - "ll" Signed or usigned long long int.@n |
* - "z" __native (non-standard extension).@n |
* |
* |
* CONVERSION:@n |
* - % Print percentile character itself. |
* |
* - c Print single character. |
* |
* - s Print zero terminated string. If a NULL value is passed as value, "(NULL)" is printed instead. |
* |
* - P, p Print value of a pointer. Void * value is expected and it is printed in hexadecimal notation with prefix |
* (as with \%#X or \%#x for 32bit or \%#X / \%#x for 64bit long pointers). |
* |
* - b Print value as unsigned binary number. Prefix is not printed by default. (Nonstandard extension.) |
* |
* - o Print value as unsigned octal number. Prefix is not printed by default. |
* |
* - d,i Print signed decimal number. There is no difference between d and i conversion. |
* |
* - u Print unsigned decimal number. |
* |
* - X, x Print hexadecimal number with upper- or lower-case. Prefix is not printed by default. |
* |
* All other characters from fmt except the formatting directives |
* are printed in verbatim. |
* |
* @param fmt Formatting NULL terminated string. |
* @return Number of printed characters or negative value on failure. |
*/ |
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap) |
{ |
int irqpri; |
int i = 0, j = 0; /**< i is index of currently processed char from fmt, j is index to the first not printed nonformating character */ |
int end; |
int counter; /**< counter of printed characters */ |
int retval; /**< used to store return values from called functions */ |
char c; |
qualifier_t qualifier; /* type of argument */ |
int base; /**< base in which will be parameter (numbers only) printed */ |
__u64 number; /**< argument value */ |
size_t size; /**< byte size of integer parameter */ |
int width, precision; |
__u64 flags; |
counter = 0; |
irqpri = interrupts_disable(); |
spinlock_lock(&printflock); |
while ((c = fmt[i])) { |
/* control character */ |
if (c == '%' ) { |
/* print common characters if any processed */ |
if (i > j) { |
if ((retval = printf_putnchars(&fmt[j], (size_t)(i - j), ps)) < 0) { /* error */ |
counter = -counter; |
goto out; |
} |
counter += retval; |
} |
j = i; |
/* parse modifiers */ |
flags = 0; |
end = 0; |
do { |
++i; |
switch (c = fmt[i]) { |
case '#': flags |= __PRINTF_FLAG_PREFIX; break; |
case '-': flags |= __PRINTF_FLAG_LEFTALIGNED; break; |
case '+': flags |= __PRINTF_FLAG_SHOWPLUS; break; |
case ' ': flags |= __PRINTF_FLAG_SPACESIGN; break; |
case '0': flags |= __PRINTF_FLAG_ZEROPADDED; break; |
default: end = 1; |
}; |
} while (end == 0); |
/* width & '*' operator */ |
width = 0; |
if (isdigit(fmt[i])) { |
while (isdigit(fmt[i])) { |
width *= 10; |
width += fmt[i++] - '0'; |
} |
} else if (fmt[i] == '*') { |
/* get width value from argument list*/ |
i++; |
width = (int)va_arg(ap, int); |
if (width < 0) { |
/* negative width means to set '-' flag */ |
width *= -1; |
flags |= __PRINTF_FLAG_LEFTALIGNED; |
} |
} |
/* precision and '*' operator */ |
precision = 0; |
if (fmt[i] == '.') { |
++i; |
if (isdigit(fmt[i])) { |
while (isdigit(fmt[i])) { |
precision *= 10; |
precision += fmt[i++] - '0'; |
} |
} else if (fmt[i] == '*') { |
/* get precision value from argument list*/ |
i++; |
precision = (int)va_arg(ap, int); |
if (precision < 0) { |
/* negative precision means to ignore it */ |
precision = 0; |
} |
} |
} |
switch (fmt[i++]) { |
/** TODO: unimplemented qualifiers: |
* t ptrdiff_t - ISO C 99 |
*/ |
case 'h': /* char or short */ |
qualifier = PrintfQualifierShort; |
if (fmt[i] == 'h') { |
i++; |
qualifier = PrintfQualifierByte; |
} |
break; |
case 'l': /* long or long long*/ |
qualifier = PrintfQualifierLong; |
if (fmt[i] == 'l') { |
i++; |
qualifier = PrintfQualifierLongLong; |
} |
break; |
case 'z': /* __native */ |
qualifier = PrintfQualifierNative; |
break; |
default: |
qualifier = PrintfQualifierInt; /* default type */ |
--i; |
} |
base = 10; |
switch (c = fmt[i]) { |
/* |
* String and character conversions. |
*/ |
case 's': |
if ((retval = print_string(va_arg(ap, char*), width, precision, flags, ps)) < 0) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
goto next_char; |
case 'c': |
c = va_arg(ap, unsigned int); |
if ((retval = print_char(c, width, flags, ps)) < 0) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
goto next_char; |
/* |
* Integer values |
*/ |
case 'P': /* pointer */ |
flags |= __PRINTF_FLAG_BIGCHARS; |
case 'p': |
flags |= __PRINTF_FLAG_PREFIX; |
base = 16; |
qualifier = PrintfQualifierPointer; |
break; |
case 'b': |
base = 2; |
break; |
case 'o': |
base = 8; |
break; |
case 'd': |
case 'i': |
flags |= __PRINTF_FLAG_SIGNED; |
case 'u': |
break; |
case 'X': |
flags |= __PRINTF_FLAG_BIGCHARS; |
case 'x': |
base = 16; |
break; |
/* percentile itself */ |
case '%': |
j = i; |
goto next_char; |
/* |
* Bad formatting. |
*/ |
default: |
/* Unknown format |
* now, the j is index of '%' so we will |
* print whole bad format sequence |
*/ |
goto next_char; |
} |
/* Print integers */ |
/* print number */ |
switch (qualifier) { |
case PrintfQualifierByte: |
size = sizeof(unsigned char); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierShort: |
size = sizeof(unsigned short); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierInt: |
size = sizeof(unsigned int); |
number = (__u64)va_arg(ap, unsigned int); |
break; |
case PrintfQualifierLong: |
size = sizeof(unsigned long); |
number = (__u64)va_arg(ap, unsigned long); |
break; |
case PrintfQualifierLongLong: |
size = sizeof(unsigned long long); |
number = (__u64)va_arg(ap, unsigned long long); |
break; |
case PrintfQualifierPointer: |
size = sizeof(void *); |
number = (__u64)(unsigned long)va_arg(ap, void *); |
break; |
case PrintfQualifierNative: |
size = sizeof(__native); |
number = (__u64)va_arg(ap, __native); |
break; |
default: /* Unknown qualifier */ |
counter = -counter; |
goto out; |
} |
if (flags & __PRINTF_FLAG_SIGNED) { |
if (number & (0x1 << (size*8 - 1))) { |
flags |= __PRINTF_FLAG_NEGATIVE; |
if (size == sizeof(__u64)) { |
number = -((__s64)number); |
} else { |
number = ~number; |
number &= (~((0xFFFFFFFFFFFFFFFFll) << (size * 8))); |
number++; |
} |
} |
} |
if ((retval = print_number(number, width, precision, base, flags, ps)) < 0) { |
counter = -counter; |
goto out; |
}; |
counter += retval; |
j = i + 1; |
} |
next_char: |
++i; |
} |
if (i > j) { |
if ((retval = printf_putnchars(&fmt[j], (__native)(i - j), ps)) < 0) { /* error */ |
counter = -counter; |
goto out; |
} |
counter += retval; |
} |
out: |
spinlock_unlock(&printflock); |
interrupts_restore(irqpri); |
return counter; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/printf/vsprintf.c |
---|
0,0 → 1,45 |
/* |
* Copyright (C) 2006 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#include <print.h> |
int vsprintf(char *str, const char *fmt, va_list ap) |
{ |
return vsnprintf(str, (size_t)-1, fmt, ap); |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/security/cap.c |
---|
0,0 → 1,185 |
/* |
* Copyright (C) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file cap.c |
* @brief Capabilities control. |
* |
* @see cap.h |
*/ |
#include <security/cap.h> |
#include <proc/task.h> |
#include <synch/spinlock.h> |
#include <syscall/sysarg64.h> |
#include <syscall/copy.h> |
#include <arch.h> |
#include <typedefs.h> |
#include <errno.h> |
/** Set capabilities. |
* |
* @param t Task whose capabilities are to be changed. |
* @param caps New set of capabilities. |
*/ |
void cap_set(task_t *t, cap_t caps) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
t->capabilities = caps; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
} |
/** Get capabilities. |
* |
* @param t Task whose capabilities are to be returned. |
* @return Task's capabilities. |
*/ |
cap_t cap_get(task_t *t) |
{ |
ipl_t ipl; |
cap_t caps; |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
caps = t->capabilities; |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
return caps; |
} |
/** Grant capabilities to a task. |
* |
* The calling task must have the CAP_CAP capability. |
* |
* @param uspace_taskid_arg Userspace structure holding destination task ID. |
* @param caps Capabilities to grant. |
* |
* @return Zero on success or an error code from @ref errno.h. |
*/ |
__native sys_cap_grant(sysarg64_t *uspace_taskid_arg, cap_t caps) |
{ |
sysarg64_t taskid_arg; |
task_t *t; |
ipl_t ipl; |
int rc; |
if (!(cap_get(TASK) & CAP_CAP)) |
return (__native) EPERM; |
rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); |
if (rc != 0) |
return (__native) rc; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id((task_id_t) taskid_arg.value); |
if (!t) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
spinlock_lock(&t->lock); |
cap_set(t, cap_get(t) | caps); |
spinlock_unlock(&t->lock); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Revoke capabilities from a task. |
* |
* The calling task must have the CAP_CAP capability or the caller must |
* attempt to revoke capabilities from itself. |
* |
* @param uspace_taskid_arg Userspace structure holding destination task ID. |
* @param caps Capabilities to revoke. |
* |
* @return Zero on success or an error code from @ref errno.h. |
*/ |
__native sys_cap_revoke(sysarg64_t *uspace_taskid_arg, cap_t caps) |
{ |
sysarg64_t taskid_arg; |
task_t *t; |
ipl_t ipl; |
int rc; |
rc = copy_from_uspace(&taskid_arg, uspace_taskid_arg, sizeof(sysarg64_t)); |
if (rc != 0) |
return (__native) rc; |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
t = task_find_by_id((task_id_t) taskid_arg.value); |
if (!t) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
/* |
* Revoking capabilities is different from granting them in that |
* a task can revoke capabilities from itself even if it |
* doesn't have CAP_CAP. |
*/ |
if (!(cap_get(TASK) & CAP_CAP) || !(t == TASK)) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return (__native) EPERM; |
} |
spinlock_lock(&t->lock); |
cap_set(t, cap_get(t) & ~caps); |
spinlock_unlock(&t->lock); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** @} |
*/ |
/tags/0.2.0.2/kernel/generic/src/preempt/preemption.c |
---|
0,0 → 1,61 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** |
* @file preemption.c |
* @brief Preemption control. |
*/ |
#include <preemption.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/barrier.h> |
#include <debug.h> |
/** Increment preemption disabled counter. */ |
void preemption_disable(void) |
{ |
THE->preemption_disabled++; |
memory_barrier(); |
} |
/** Decrement preemption disabled counter. */ |
void preemption_enable(void) |
{ |
ASSERT(THE->preemption_disabled); |
memory_barrier(); |
THE->preemption_disabled--; |
} |
/** @} |
*/ |