/SPARTAN/trunk/src/Makefile.config |
---|
0,0 → 1,31 |
ARCH=ia32 |
#ARCH=mips |
#ARCH=ia64 |
#ARCH=powerpc |
#ARCH=amd64 |
# Support for symetric multiprocessors |
#SMP=__SMP__ |
# Improved support for hyperthreading |
HT=__HT__ |
# Deadlock detection support for spinlocks. |
DEBUG_SPINLOCK=DEBUG_SPINLOCK |
# Uncomment if you want to compile in userspace support |
#USERSPACE=__USERSPACE__ |
# Uncomment if you want to run in the test mode |
#TEST=__TEST__ |
TEST_FILE=test.c |
# Select what test do you want to run |
#TEST_DIR=synch/rwlock1/ |
#TEST_DIR=synch/rwlock2/ |
#TEST_DIR=synch/rwlock3/ |
TEST_DIR=synch/rwlock4/ |
#TEST_DIR=synch/rwlock5/ |
#TEST_DIR=synch/semaphore1/ |
#TEST_DIR=synch/semaphore2/ |
/SPARTAN/trunk/src/main/main.c |
---|
0,0 → 1,210 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch/asm.h> |
#include <arch/context.h> |
#include <print.h> |
#include <config.h> |
#include <time/clock.h> |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/vm.h> |
#include <main/kinit.h> |
#include <cpu.h> |
#include <mm/heap.h> |
#ifdef __SMP__ |
#include <arch/smp/apic.h> |
#include <arch/smp/mps.h> |
#endif /* __SMP__ */ |
#include <smp/smp.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/tlb.h> |
#include <synch/waitq.h> |
#include <arch.h> |
char *project = "SPARTAN kernel"; |
char *copyright = "Copyright (C) 2001-2005 Jakub Jermar"; |
config_t config; |
context_t ctx; |
/* |
* These 'hardcoded' variables will be intialised by |
* the linker with appropriate sizes and addresses. |
*/ |
__address hardcoded_load_address = 0; |
__u32 hardcoded_ktext_size = 0; |
__u32 hardcoded_kdata_size = 0; |
void main_bsp(void); |
void main_ap(void); |
/* |
* These two functions prevent stack from underflowing during the |
* kernel boot phase when SP is set to the very top of the reserved |
* space. The stack could get corrupted by a fooled compiler-generated |
* pop sequence otherwise. |
*/ |
static void main_bsp_separated_stack(void); |
static void main_ap_separated_stack(void); |
/* |
* Executed by the bootstrap processor. cpu_priority_high()'d |
*/ |
void main_bsp(void) |
{ |
config.cpu_count = 1; |
config.cpu_active = 1; |
config.base = hardcoded_load_address; |
config.memory_size = CONFIG_MEMORY_SIZE; |
config.kernel_size = hardcoded_ktext_size + hardcoded_kdata_size + CONFIG_HEAP_SIZE + CONFIG_STACK_SIZE; |
context_save(&ctx); /* There is no nead to save FPU context */ |
ctx.sp = config.base + config.kernel_size - 8; |
ctx.pc = (__address) main_bsp_separated_stack; |
context_restore(&ctx); /* There is no nead to load FPU context */ |
/* not reached */ |
} |
void main_bsp_separated_stack(void) { |
vm_t *m; |
task_t *k; |
thread_t *t; |
arch_pre_mm_init(); |
heap_init(config.base + hardcoded_ktext_size + hardcoded_kdata_size, CONFIG_HEAP_SIZE); |
frame_init(); |
page_init(); |
tlb_init(); |
arch_post_mm_init(); |
printf("%s, %s\n", project, copyright); |
printf("%L: hardcoded_ktext_size=%dK, hardcoded_kdata_size=%dK\n", |
config.base, hardcoded_ktext_size/1024, hardcoded_kdata_size/1024); |
arch_late_init(); |
smp_init(); |
printf("config.cpu_count=%d\n", config.cpu_count); |
cpu_init(); |
calibrate_delay_loop(); |
timeout_init(); |
scheduler_init(); |
task_init(); |
thread_init(); |
/* |
* Create kernel vm mapping. |
*/ |
m = vm_create(); |
if (!m) panic("can't create kernel vm address space\n"); |
/* |
* Create kernel task. |
*/ |
k = task_create(m); |
if (!k) panic("can't create kernel task\n"); |
/* |
* Create the first thread. |
*/ |
t = thread_create(kinit, NULL, k, 0); |
if (!t) panic("can't create kinit thread\n"); |
thread_ready(t); |
/* |
* This call to scheduler() will return to kinit, |
* starting the thread of kernel threads. |
*/ |
scheduler(); |
/* not reached */ |
} |
#ifdef __SMP__ |
/* |
* Executed by application processors. cpu_priority_high()'d |
* Temporary stack is at ctx.sp which was set during BP boot. |
*/ |
void main_ap(void) |
{ |
/* |
* Incrementing the active CPU counter will guarantee that the |
* pm_init() will not attempt to build GDT and IDT tables again. |
* Neither frame_init() will do the complete thing. Neither cpu_init() |
* will do. |
*/ |
config.cpu_active++; |
arch_pre_mm_init(); |
frame_init(); |
page_init(); |
arch_post_mm_init(); |
cpu_init(); |
calibrate_delay_loop(); |
l_apic_init(); |
l_apic_debug(); |
/* |
* If we woke kmp up before we left the kernel stack, we could |
* collide with another CPU coming up. To prevent this, we |
* switch to this cpu's private stack prior to waking kmp up. |
*/ |
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
CPU->saved_context.pc = (__address) main_ap_separated_stack; |
context_restore(&CPU->saved_context); /* There is no nead to load FPU context */ |
/* not reached */ |
} |
void main_ap_separated_stack(void) |
{ |
/* |
* Configure timeouts for this cpu. |
*/ |
timeout_init(); |
waitq_wakeup(&ap_completion_wq, WAKEUP_FIRST); |
scheduler(); |
/* not reached */ |
} |
#endif /* __SMP__*/ |
/SPARTAN/trunk/src/main/kinit.c |
---|
0,0 → 1,154 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <config.h> |
#include <arch.h> |
#include <main/kinit.h> |
#include <main/uinit.h> |
#include <proc/scheduler.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <panic.h> |
#include <func.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <mm/vm.h> |
#ifdef __SMP__ |
#include <arch/smp/mps.h> |
#endif /* __SMP__ */ |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#ifdef __TEST__ |
#include <test.h> |
#endif /* __TEST__ */ |
void kinit(void *arg) |
{ |
vm_t *m; |
vm_area_t *a; |
task_t *u; |
thread_t *t; |
int i; |
cpu_priority_high(); |
#ifdef __SMP__ |
if (config.cpu_count > 1) { |
/* |
* Create the kmp thread and wait for its completion. |
* cpu1 through cpuN-1 will come up consecutively and |
* not mess together with kcpulb and klwtm threads. |
* Just a beautification. |
*/ |
if (t = thread_create(kmp, NULL, TASK, 0)) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[0]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
waitq_sleep(&kmp_completion_wq); |
cpu_priority_high(); |
} |
else panic("thread_create/kmp"); |
} |
#endif /* __SMP__ */ |
/* |
* Now that all CPUs are up, we can report what we've found. |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if (cpus[i].active) |
cpu_print_report(&cpus[i]); |
else |
printf("cpu%d: not active\n", i); |
} |
#ifdef __SMP__ |
if (config.cpu_count > 1) { |
/* |
* For each CPU, create its load balancing thread. |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if (t = thread_create(kcpulb, NULL, TASK, 0)) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
} |
else panic("thread_create/kcpulb"); |
} |
} |
#endif /* __SMP__ */ |
cpu_priority_low(); |
#ifdef __USERSPACE__ |
/* |
* Create the first user task. |
*/ |
m = vm_create(); |
if (!m) panic(PANIC "vm_create"); |
u = task_create(m); |
if (!u) panic(PANIC "task_create"); |
t = thread_create(uinit, NULL, u, THREAD_USER_STACK); |
if (!t) panic(PANIC "thread_create"); |
/* |
* Create the text vm_area and copy the userspace code there. |
*/ |
a = vm_area_create(m, VMA_TEXT, 1, UTEXT_ADDRESS); |
if (!a) panic(PANIC "vm_area_create: vm_text"); |
memcopy((__address) utext, PA2KA(a->mapping[0]), utext_size < PAGE_SIZE ? utext_size : PAGE_SIZE); |
/* |
* Create the data vm_area. |
*/ |
a = vm_area_create(m, VMA_STACK, 1, USTACK_ADDRESS); |
if (!a) panic(PANIC "vm_area_create: vm_stack"); |
thread_ready(t); |
#endif /* __USERSPACE__ */ |
#ifdef __TEST__ |
test(); |
#endif /* __TEST__ */ |
while (1) { |
thread_usleep(1000000); |
printf("kinit... "); |
} |
} |
/SPARTAN/trunk/src/main/uinit.c |
---|
0,0 → 1,38 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <main/uinit.h> |
#include <arch/types.h> |
#include <proc/thread.h> |
#include <userspace.h> |
void uinit(void *arg) |
{ |
printf("USER task, uinit thread: kernel mode\n"); |
userspace(); |
} |
/SPARTAN/trunk/src/clean.ia32 |
---|
0,0 → 1,3 |
#! /bin/sh |
make dist-clean ARCH=ia32 |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/clean.ia64 |
---|
0,0 → 1,3 |
#! /bin/sh |
make dist-clean ARCH=ia64 |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/clean.mips |
---|
0,0 → 1,3 |
#! /bin/sh |
make dist-clean ARCH=mips |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/build.ia32 |
---|
0,0 → 1,3 |
#! /bin/sh |
make all ARCH=ia32 |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/build.ia64 |
---|
0,0 → 1,3 |
#! /bin/sh |
make all ARCH=ia64 |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/build.mips |
---|
0,0 → 1,3 |
#! /bin/sh |
make all ARCH=mips |
Property changes: |
Added: svn:executable |
+* |
\ No newline at end of property |
/SPARTAN/trunk/src/debug/print.c |
---|
0,0 → 1,168 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <putchar.h> |
#include <print.h> |
#include <synch/spinlock.h> |
#include <arch/arg.h> |
static char digits[] = "0123456789abcdef"; |
static spinlock_t printflock; |
void print_str(char *str) |
{ |
int i = 0; |
char c; |
while (c = str[i++]) |
putchar(c); |
} |
/* |
* This is a universal function for printing hexadecimal numbers of fixed |
* width. |
*/ |
void print_fixed_hex(__native num, int width) |
{ |
int i; |
for (i = width*8 - 4; i >= 0; i -= 4) |
putchar(digits[(num>>i) & 0xf]); |
} |
/* |
* This is a universal function for printing decimal and hexadecimal numbers. |
* It prints only significant digits. |
*/ |
void print_number(__native num, int base) |
{ |
char d[sizeof(__native)*8+1]; /* this is good enough even for base == 2 */ |
int i = sizeof(__native)*8-1; |
do { |
d[i--] = digits[num % base]; |
} while (num /= base); |
d[sizeof(__native)*8] = 0; |
print_str(&d[i + 1]); |
} |
/* |
* This is our function for printing formatted text. |
* It's much simpler than the user-space one. |
* We are greateful for this function. |
*/ |
void printf(char *fmt, ...) |
{ |
int irqpri, i = 0; |
va_list ap; |
char c; |
va_start(ap, fmt); |
irqpri = cpu_priority_high(); |
spinlock_lock(&printflock); |
while (c = fmt[i++]) { |
switch (c) { |
/* control character */ |
case '%': |
switch (c = fmt[i++]) { |
/* percentile itself */ |
case '%': |
break; |
/* |
* String and character conversions. |
*/ |
case 's': |
print_str(va_arg(ap, char_ptr)); |
goto loop; |
case 'c': |
c = (char) va_arg(ap, int); |
break; |
/* |
* Hexadecimal conversions with fixed width. |
*/ |
case 'L': |
print_str("0x"); |
case 'l': |
print_fixed_hex(va_arg(ap, __native), INT32); |
goto loop; |
case 'W': |
print_str("0x"); |
case 'w': |
print_fixed_hex(va_arg(ap, __native), INT16); |
goto loop; |
case 'B': |
print_str("0x"); |
case 'b': |
print_fixed_hex(va_arg(ap, __native), INT8); |
goto loop; |
/* |
* Decimal and hexadecimal conversions. |
*/ |
case 'd': |
print_number(va_arg(ap, __native), 10); |
goto loop; |
case 'X': |
print_str("0x"); |
case 'x': |
print_number(va_arg(ap, __native), 16); |
goto loop; |
/* |
* Bad formatting. |
*/ |
default: |
goto out; |
} |
default: putchar(c); |
} |
loop: |
; |
} |
out: |
spinlock_unlock(&printflock); |
cpu_priority_restore(irqpri); |
va_end(ap); |
} |
/SPARTAN/trunk/src/proc/task.c |
---|
0,0 → 1,69 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/vm.h> |
#include <mm/heap.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <panic.h> |
#include <list.h> |
spinlock_t tasks_lock; |
link_t tasks_head; |
void task_init(void) |
{ |
TASK = NULL; |
spinlock_initialize(&tasks_lock); |
list_initialize(&tasks_head); |
} |
task_t *task_create(vm_t *m) |
{ |
pri_t pri; |
task_t *ta; |
ta = (task_t *) malloc(sizeof(task_t)); |
if (ta) { |
spinlock_initialize(&ta->lock); |
list_initialize(&ta->th_head); |
list_initialize(&ta->tasks_link); |
ta->vm = m; |
pri = cpu_priority_high(); |
spinlock_lock(&tasks_lock); |
list_append(&ta->tasks_link, &tasks_head); |
spinlock_unlock(&tasks_lock); |
cpu_priority_restore(pri); |
} |
return ta; |
} |
/SPARTAN/trunk/src/proc/scheduler.c |
---|
0,0 → 1,511 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <cpu.h> |
#include <mm/vm.h> |
#include <config.h> |
#include <context.h> |
#include <func.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <list.h> |
#include <typedefs.h> |
#include <mm/page.h> |
#include <synch/spinlock.h> |
#ifdef __SMP__ |
#include <arch/smp/atomic.h> |
#endif /* __SMP__ */ |
/* |
* NOTE ON ATOMIC READS: |
* Some architectures cannot read __u32 atomically. |
* For that reason, all accesses to nrdy and the likes must be protected by spinlock. |
*/ |
spinlock_t nrdylock; |
volatile int nrdy; |
void scheduler_init(void) |
{ |
spinlock_initialize(&nrdylock); |
} |
/* cpu_priority_high()'d */ |
struct thread *find_best_thread(void) |
{ |
thread_t *t; |
runq_t *r; |
int i, n; |
loop: |
cpu_priority_high(); |
spinlock_lock(&CPU->lock); |
n = CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_low(); |
if (n == 0) { |
#ifdef __SMP__ |
/* |
* If the load balancing thread is not running, wake it up and |
* set CPU-private flag that the kcpulb has been started. |
*/ |
if (test_and_set(&CPU->kcpulbstarted) == 0) { |
waitq_wakeup(&CPU->kcpulb_wq, 0); |
goto loop; |
} |
#endif /* __SMP__ */ |
/* |
* For there was nothing to run, the CPU goes to sleep |
* until a hardware interrupt or an IPI comes. |
* This improves energy saving and hyperthreading. |
* On the other hand, several hardware interrupts can be ignored. |
*/ |
cpu_sleep(); |
goto loop; |
} |
cpu_priority_high(); |
for (i = 0; i<RQ_COUNT; i++) { |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
/* |
* If this queue is empty, try a lower-priority queue. |
*/ |
spinlock_unlock(&r->lock); |
continue; |
} |
spinlock_lock(&nrdylock); |
nrdy--; |
spinlock_unlock(&nrdylock); |
spinlock_lock(&CPU->lock); |
CPU->nrdy--; |
spinlock_unlock(&CPU->lock); |
r->n--; |
/* |
* Take the first thread from the queue. |
*/ |
t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
list_remove(&t->rq_link); |
spinlock_unlock(&r->lock); |
spinlock_lock(&t->lock); |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->pri = i; /* eventually correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
*/ |
t->flags &= ~X_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
} |
goto loop; |
} |
/* |
* This function prevents low priority threads from starving in rq's. |
* When it decides to relink rq's, it reconnects respective pointers |
* so that in result threads with 'pri' greater or equal 'start' are |
* moved to a higher-priority queue. |
*/ |
void relink_rq(int start) |
{ |
link_t head; |
runq_t *r; |
int i, n; |
list_initialize(&head); |
spinlock_lock(&CPU->lock); |
if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
for (i = start; i<RQ_COUNT-1; i++) { |
/* remember and empty rq[i + 1] */ |
r = &CPU->rq[i + 1]; |
spinlock_lock(&r->lock); |
list_concat(&head, &r->rq_head); |
n = r->n; |
r->n = 0; |
spinlock_unlock(&r->lock); |
/* append rq[i + 1] to rq[i] */ |
r = &CPU->rq[i]; |
spinlock_lock(&r->lock); |
list_concat(&r->rq_head, &head); |
r->n += n; |
spinlock_unlock(&r->lock); |
} |
CPU->needs_relink = 0; |
} |
spinlock_unlock(&CPU->lock); |
} |
/* |
* The scheduler. |
*/ |
void scheduler(void) |
{ |
volatile pri_t pri; |
pri = cpu_priority_high(); |
if (haltstate) |
halt(); |
if (THREAD) { |
spinlock_lock(&THREAD->lock); |
if (!context_save(&THREAD->saved_context)) { |
/* |
* This is the place where threads leave scheduler(); |
*/ |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(THREAD->saved_context.pri); |
return; |
} |
THREAD->saved_context.pri = pri; |
} |
/* |
* We may not keep the old stack. |
* Reason: If we kept the old stack and got blocked, for instance, in |
* find_best_thread(), the old thread could get rescheduled by another |
* CPU and overwrite the part of its own stack that was also used by |
* the scheduler on this CPU. |
* |
* Moreover, we have to bypass the compiler-generated POP sequence |
* which is fooled by SP being set to the very top of the stack. |
* Therefore the scheduler() function continues in |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; |
CPU->saved_context.pc = (__address) scheduler_separated_stack; |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
void scheduler_separated_stack(void) |
{ |
int priority; |
if (THREAD) { |
switch (THREAD->state) { |
case Running: |
THREAD->state = Ready; |
spinlock_unlock(&THREAD->lock); |
thread_ready(THREAD); |
break; |
case Exiting: |
frame_free((__address) THREAD->kstack); |
if (THREAD->ustack) { |
frame_free((__address) THREAD->ustack); |
} |
/* |
* Detach from the containing task. |
*/ |
spinlock_lock(&TASK->lock); |
list_remove(&THREAD->th_link); |
spinlock_unlock(&TASK->lock); |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&threads_lock); |
list_remove(&THREAD->threads_link); |
spinlock_unlock(&threads_lock); |
free(THREAD); |
break; |
case Sleeping: |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->pri = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
* Address of wq->lock is kept in THREAD->sleep_queue. |
*/ |
spinlock_unlock(&THREAD->sleep_queue->lock); |
/* |
* Check for possible requests for out-of-context invocation. |
*/ |
if (THREAD->call_me) { |
THREAD->call_me(THREAD->call_me_with); |
THREAD->call_me = NULL; |
THREAD->call_me_with = NULL; |
} |
spinlock_unlock(&THREAD->lock); |
break; |
default: |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->pri; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
spinlock_lock(&THREAD->lock); |
/* |
* If both the old and the new task are the same, lots of work is avoided. |
*/ |
if (TASK != THREAD->task) { |
vm_t *m1 = NULL; |
vm_t *m2; |
if (TASK) { |
spinlock_lock(&TASK->lock); |
m1 = TASK->vm; |
spinlock_unlock(&TASK->lock); |
} |
spinlock_lock(&THREAD->task->lock); |
m2 = THREAD->task->vm; |
spinlock_unlock(&THREAD->task->lock); |
/* |
* Note that it is possible for two tasks to share one vm mapping. |
*/ |
if (m1 != m2) { |
/* |
* Both tasks and vm mappings are different. |
* Replace the old one with the new one. |
*/ |
if (m1) { |
vm_uninstall(m1); |
} |
vm_install(m2); |
} |
TASK = THREAD->task; |
} |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
#endif |
context_restore(&THREAD->saved_context); |
/* not reached */ |
} |
#ifdef __SMP__ |
/* |
* This is the load balancing thread. |
* It supervises thread supplies for the CPU it's wired to. |
*/ |
void kcpulb(void *arg) |
{ |
thread_t *t; |
int count, i, j, k = 0; |
pri_t pri; |
loop: |
/* |
* Sleep until there's some work to do. |
*/ |
waitq_sleep(&CPU->kcpulb_wq); |
not_satisfied: |
/* |
* Calculate the number of threads that will be migrated/stolen from |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
pri = cpu_priority_high(); |
spinlock_lock(&CPU->lock); |
count = nrdy / config.cpu_active; |
count -= CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_restore(pri); |
if (count <= 0) |
goto satisfied; |
/* |
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
*/ |
for (j=RQ_COUNT-1; j >= 0; j--) { |
for (i=0; i < config.cpu_active; i++) { |
link_t *l; |
runq_t *r; |
cpu_t *cpu; |
cpu = &cpus[(i + k) % config.cpu_active]; |
r = &cpu->rq[j]; |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
restart: pri = cpu_priority_high(); |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
cpu_priority_restore(pri); |
continue; |
} |
t = NULL; |
l = r->rq_head.prev; /* search rq from the back */ |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already stolen. |
* The latter prevents threads from migrating between CPU's without ever being run. |
*/ |
spinlock_lock(&t->lock); |
if (!(t->flags & (X_WIRED | X_STOLEN))) { |
/* |
* Remove t from r. |
*/ |
spinlock_unlock(&t->lock); |
/* |
* Here we have to avoid deadlock with relink_rq(), |
* because it locks cpu and r in a different order than we do. |
*/ |
if (!spinlock_trylock(&cpu->lock)) { |
/* Release all locks and try again. */ |
spinlock_unlock(&r->lock); |
cpu_priority_restore(pri); |
goto restart; |
} |
cpu->nrdy--; |
spinlock_unlock(&cpu->lock); |
spinlock_lock(&nrdylock); |
nrdy--; |
spinlock_unlock(&nrdylock); |
r->n--; |
list_remove(&t->rq_link); |
break; |
} |
spinlock_unlock(&t->lock); |
l = l->prev; |
t = NULL; |
} |
spinlock_unlock(&r->lock); |
if (t) { |
/* |
* Ready t on local CPU |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
cpu_priority_restore(pri); |
if (--count == 0) |
goto satisfied; |
/* |
* We are not satisfied yet, focus on another CPU next time. |
*/ |
k++; |
continue; |
} |
cpu_priority_restore(pri); |
} |
} |
if (CPU->nrdy) { |
/* |
* Be a little bit light-weight and let migrated threads run. |
*/ |
scheduler(); |
} |
else { |
/* |
* We failed to migrate a single thread. |
* Something more sophisticated should be done. |
*/ |
scheduler(); |
} |
goto not_satisfied; |
satisfied: |
/* |
* Tell find_best_thread() to wake us up later again. |
*/ |
CPU->kcpulbstarted = 0; |
goto loop; |
} |
#endif /* __SMP__ */ |
/SPARTAN/trunk/src/proc/thread.c |
---|
0,0 → 1,257 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <mm/heap.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <synch/rwlock.h> |
#include <cpu.h> |
#include <func.h> |
#include <context.h> |
#include <list.h> |
#include <typedefs.h> |
#include <time/clock.h> |
#include <list.h> |
#include <config.h> |
#include <arch/interrupt.h> |
#include <smp/ipi.h> |
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; |
spinlock_t threads_lock; |
link_t threads_head; |
static spinlock_t tidlock; |
__u32 last_tid = 0; |
/* |
* cushion() is provided to ensure that every thread |
* makes a call to thread_exit() when its implementing |
* function returns. |
* |
* cpu_priority_high()'d |
*/ |
void cushion(void) |
{ |
void (*f)(void *) = THREAD->thread_code; |
void *arg = THREAD->thread_arg; |
before_thread_runs(); |
/* this is where each thread wakes up after its creation */ |
spinlock_unlock(&THREAD->lock); |
cpu_priority_low(); |
f(arg); |
thread_exit(); |
/* not reached */ |
} |
void thread_init(void) |
{ |
THREAD = NULL; |
nrdy = 0; |
spinlock_initialize(&threads_lock); |
list_initialize(&threads_head); |
} |
void thread_ready(thread_t *t) |
{ |
cpu_t *cpu; |
runq_t *r; |
pri_t pri; |
int i, avg, send_ipi = 0; |
pri = cpu_priority_high(); |
spinlock_lock(&t->lock); |
i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
cpu = t->cpu; |
} |
spinlock_unlock(&t->lock); |
/* |
* Append t to respective ready queue on respective processor. |
*/ |
r = &cpu->rq[i]; |
spinlock_lock(&r->lock); |
list_append(&t->rq_link, &r->rq_head); |
r->n++; |
spinlock_unlock(&r->lock); |
spinlock_lock(&nrdylock); |
avg = ++nrdy / config.cpu_active; |
spinlock_unlock(&nrdylock); |
spinlock_lock(&cpu->lock); |
if ((++cpu->nrdy) > avg) { |
/* |
* If there are idle halted CPU's, this will wake them up. |
*/ |
ipi_broadcast(VECTOR_WAKEUP_IPI); |
} |
spinlock_unlock(&cpu->lock); |
cpu_priority_restore(pri); |
} |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
{ |
thread_t *t; |
__address frame_ks, frame_us = NULL; |
t = (thread_t *) malloc(sizeof(thread_t)); |
if (t) { |
pri_t pri; |
spinlock_initialize(&t->lock); |
frame_ks = frame_alloc(FRAME_KA); |
if (THREAD_USER_STACK & flags) { |
frame_us = frame_alloc(0); |
} |
pri = cpu_priority_high(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
cpu_priority_restore(pri); |
memsetb(frame_ks, THREAD_STACK_SIZE, 0); |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
link_initialize(&t->threads_link); |
t->kstack = (__u8 *) frame_ks; |
t->ustack = (__u8 *) frame_us; |
context_save(&t->saved_context); |
t->saved_context.pc = (__address) cushion; |
t->saved_context.sp = (__address) &t->kstack[THREAD_STACK_SIZE-8]; |
pri = cpu_priority_high(); |
t->saved_context.pri = cpu_priority_read(); |
cpu_priority_restore(pri); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->pri = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
timeout_initialize(&t->sleep_timeout); |
t->sleep_queue = NULL; |
t->timeout_pending = 0; |
t->rwlock_holder_type = RWLOCK_NONE; |
t->task = task; |
/* |
* Register this thread in the system-wide list. |
*/ |
pri = cpu_priority_high(); |
spinlock_lock(&threads_lock); |
list_append(&t->threads_link, &threads_head); |
spinlock_unlock(&threads_lock); |
/* |
* Attach to the containing task. |
*/ |
spinlock_lock(&task->lock); |
list_append(&t->th_link, &task->th_head); |
spinlock_unlock(&task->lock); |
cpu_priority_restore(pri); |
} |
return t; |
} |
void thread_exit(void) |
{ |
pri_t pri; |
restart: |
pri = cpu_priority_high(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
goto restart; |
} |
THREAD->state = Exiting; |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
} |
void thread_sleep(__u32 sec) |
{ |
thread_usleep(sec*1000000); |
} |
/* |
* Suspend execution of current thread for usec microseconds. |
*/ |
void thread_usleep(__u32 usec) |
{ |
waitq_t wq; |
waitq_initialize(&wq); |
(void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
} |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/mm/vm.c |
---|
0,0 → 1,186 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/vm.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <mm/tlb.h> |
#include <mm/heap.h> |
#include <arch/mm/page.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <list.h> |
#include <panic.h> |
vm_t *vm_create(void) |
{ |
vm_t *m; |
m = (vm_t *) malloc(sizeof(vm_t)); |
if (m) { |
spinlock_initialize(&m->lock); |
list_initialize(&m->vm_area_head); |
} |
return m; |
} |
void vm_destroy(vm_t *m) |
{ |
} |
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, int size, __address addr) |
{ |
pri_t pri; |
vm_area_t *a; |
if (addr % PAGE_SIZE) |
panic(PANIC "addr not aligned to a page boundary"); |
pri = cpu_priority_high(); |
spinlock_lock(&m->lock); |
/* |
* TODO: test vm_area which is to be created doesn't overlap with an existing one. |
*/ |
a = (vm_area_t *) malloc(sizeof(vm_area_t)); |
if (a) { |
int i; |
a->mapping = (__address *) malloc(size * sizeof(__address)); |
if (!a->mapping) { |
free(a); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
return NULL; |
} |
for (i=0; i<size; i++) |
a->mapping[i] = frame_alloc(0); |
spinlock_initialize(&a->lock); |
link_initialize(&a->link); |
a->type = type; |
a->size = size; |
a->address = addr; |
list_append(&a->link, &m->vm_area_head); |
} |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
return a; |
} |
void vm_area_destroy(vm_area_t *a) |
{ |
} |
void vm_area_map(vm_area_t *a) |
{ |
int i, flags; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&a->lock); |
switch (a->type) { |
case VMA_TEXT: |
flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
case VMA_DATA: |
case VMA_STACK: |
flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
default: |
panic(PANIC "unexpected vm_type_t %d", a->type); |
} |
for (i=0; i<a->size; i++) |
map_page_to_frame(a->address + i*PAGE_SIZE, a->mapping[i], flags, 0); |
spinlock_unlock(&a->lock); |
cpu_priority_restore(pri); |
} |
void vm_area_unmap(vm_area_t *a) |
{ |
int i; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&a->lock); |
for (i=0; i<a->size; i++) |
map_page_to_frame(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, 0); |
spinlock_unlock(&a->lock); |
cpu_priority_restore(pri); |
} |
void vm_install(vm_t *m) |
{ |
link_t *l; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&m->lock); |
for(l = m->vm_area_head.next; l != &m->vm_area_head; l = l->next) |
vm_area_map(list_get_instance(l, vm_area_t, link)); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
} |
void vm_uninstall(vm_t *m) |
{ |
link_t *l; |
pri_t pri; |
pri = cpu_priority_high(); |
tlb_shootdown_start(); |
spinlock_lock(&m->lock); |
for(l = m->vm_area_head.next; l != &m->vm_area_head; l = l->next) |
vm_area_unmap(list_get_instance(l, vm_area_t, link)); |
spinlock_unlock(&m->lock); |
tlb_shootdown_finalize(); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/mm/tlb.c |
---|
0,0 → 1,81 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/tlb.h> |
#include <smp/ipi.h> |
#include <synch/spinlock.h> |
#include <typedefs.h> |
#include <arch/smp/atomic.h> |
#include <arch/interrupt.h> |
#include <config.h> |
#include <arch.h> |
#ifdef __SMP__ |
static spinlock_t tlblock; |
void tlb_init(void) |
{ |
spinlock_initialize(&tlblock); |
} |
/* must be called with interrupts disabled */ |
void tlb_shootdown_start(void) |
{ |
int i; |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
tlb_shootdown_ipi_send(); |
tlb_invalidate(0); /* TODO: use valid ASID */ |
busy_wait: |
for (i = 0; i<config.cpu_count; i++) |
if (cpus[i].tlb_active) |
goto busy_wait; |
} |
void tlb_shootdown_finalize(void) |
{ |
spinlock_unlock(&tlblock); |
CPU->tlb_active = 1; |
} |
void tlb_shootdown_ipi_send(void) |
{ |
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI); |
} |
void tlb_shootdown_ipi_recv(void) |
{ |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
spinlock_unlock(&tlblock); |
tlb_invalidate(0); /* TODO: use valid ASID */ |
CPU->tlb_active = 1; |
} |
#endif /* __SMP__ */ |
/SPARTAN/trunk/src/mm/page.c |
---|
0,0 → 1,36 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
void page_init(void) |
{ |
page_arch_init(); |
map_page_to_frame(0x0, 0x0, PAGE_NOT_PRESENT, 0); |
} |
/SPARTAN/trunk/src/mm/frame.c |
---|
0,0 → 1,251 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch/types.h> |
#include <func.h> |
#include <mm/heap.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/vm.h> |
#include <arch/mm/page.h> |
#include <config.h> |
#include <memstr.h> |
#include <panic.h> |
#include <synch/spinlock.h> |
__u32 frames; |
__u32 frames_free; |
__u8 *frame_bitmap; |
__u32 frame_bitmap_octets; |
/* |
* This is for kernel address space frames (allocated with FRAME_KA). |
* Their addresses may not interfere with user address space. |
*/ |
__u8 *frame_kernel_bitmap; |
__u32 kernel_frames; |
__u32 kernel_frames_free; |
static spinlock_t framelock; |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
/* |
* The bootstrap processor will allocate all necessary memory for frame allocation. |
*/ |
frames = config.memory_size / FRAME_SIZE; |
frame_bitmap_octets = frames / 8 + (frames % 8 > 0); |
frame_bitmap = (__u8 *) malloc(frame_bitmap_octets); |
if (!frame_bitmap) |
panic(PANIC "malloc/frame_bitmap\n"); |
/* |
* Mark all frames free. |
*/ |
memsetb((__address) frame_bitmap, frame_bitmap_octets, 0); |
frames_free = frames; |
/* |
* Will be properly set up by architecture dependent frame init. |
*/ |
frame_kernel_bitmap = NULL; |
kernel_frames_free = 0; |
kernel_frames = 0; |
} |
/* |
* No frame allocations/reservations prior this point. |
*/ |
frame_arch_init(); |
if (config.cpu_active == 1) { |
/* |
* Create the memory address space map. Marked frames and frame |
* regions cannot be used for allocation. |
*/ |
frame_region_not_free(config.base, config.base + config.kernel_size); |
} |
} |
/* |
* Allocate a frame. |
*/ |
__address frame_alloc(int flags) |
{ |
int i; |
pri_t pri; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
__u32 *frames_ptr = &frames, *frames_free_ptr = &frames_free; |
if (flags & FRAME_KA) { |
frame_bitmap_ptr = &frame_kernel_bitmap; |
frames_ptr = &kernel_frames; |
frames_free_ptr = &kernel_frames_free; |
} |
loop: |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
if (*frames_free_ptr) { |
for (i=0; i < *frames_ptr; i++) { |
int m, n; |
m = i / 8; |
n = i % 8; |
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) { |
(*frame_bitmap_ptr)[m] |= (1<<n); |
*frames_free_ptr--; |
if (flags & FRAME_KA) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to decrement frames_free. |
*/ |
frames_free--; |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
if (flags & FRAME_KA) return PA2KA(i*FRAME_SIZE); |
return i*FRAME_SIZE; |
} |
} |
panic(PANIC "frames_free inconsistent (%d)\n", frames_free); |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
if (flags & FRAME_PANIC) |
panic(PANIC "unable to allocate frame\n"); |
/* TODO: implement sleeping logic here */ |
panic(PANIC "sleep not supported\n"); |
goto loop; |
} |
/* |
* Free a frame. |
*/ |
void frame_free(__address addr) |
{ |
pri_t pri; |
__u32 frame; |
__u32 *frames_free_ptr = &frames_free, *frames_ptr = &frames; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
if (IS_KA(addr)) { |
frames_free_ptr = &kernel_frames_free; |
frame_bitmap_ptr = &frame_kernel_bitmap; |
} |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
frame = IS_KA(addr) ? KA2PA(addr) : addr; |
frame /= FRAME_SIZE; |
if (frame < *frames_ptr) { |
int m, n; |
m = frame / 8; |
n = frame % 8; |
if ((*frame_bitmap_ptr)[m] & (1<<n)) { |
(*frame_bitmap_ptr)[m] &= ~(1<<n); |
*frames_free_ptr++; |
if (IS_KA(addr)) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to increment frames_free. |
*/ |
frames_free++; |
} |
} |
else panic(PANIC "frame_free: frame already free\n"); |
} |
else panic(PANIC "frame_free: frame number too big\n"); |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
} |
/* |
* Don't use this function for normal allocation. Use frame_alloc() instead. |
* Use this function to declare that some special frame is not free. |
*/ |
void frame_not_free(__address addr) |
{ |
pri_t pri; |
__u32 frame; |
__u32 *frames_ptr = &frames, *frames_free_ptr = &frames_free; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
frame = IS_KA(addr) ? KA2PA(addr) : addr; |
frame /= FRAME_SIZE; |
if (frame < *frames_ptr) { |
int m, n; |
m = frame / 8; |
n = frame % 8; |
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) { |
(*frame_bitmap_ptr)[m] |= (1<<n); |
*frames_free_ptr--; |
if (IS_KA(addr)) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to decrement frames_free. |
*/ |
frames_free--; |
} |
} |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
} |
void frame_region_not_free(__address start, __address stop) |
{ |
__u32 i; |
start /= FRAME_SIZE; |
stop /= FRAME_SIZE; |
for (i = start; i <= stop; i++) |
frame_not_free(i * FRAME_SIZE); |
} |
/SPARTAN/trunk/src/mm/heap.c |
---|
0,0 → 1,151 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/heap.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <memstr.h> |
#include <arch/types.h> |
/* |
* First-fit algorithm. |
* Simple, but hopefully correct. |
* Chunks being freed are tested for mergability with their neighbours. |
*/ |
static chunk_t *chunk0; |
static spinlock_t heaplock; |
void heap_init(__address heap, int size) |
{ |
spinlock_initialize(&heaplock); |
memsetb(heap,size,0); |
chunk0 = (chunk_t *) heap; |
chunk0->used = 0; |
chunk0->size = size - sizeof(chunk_t); |
chunk0->next = NULL; |
chunk0->prev = NULL; |
} |
/* |
* Uses first-fit algorithm. |
*/ |
void *malloc(int size) |
{ |
pri_t pri; |
chunk_t *x, *y, *z; |
if (size == 0) |
panic("malloc: zero-size allocation request"); |
x = chunk0; |
pri = cpu_priority_high(); |
spinlock_lock(&heaplock); |
while (x) { |
if (x->used || x->size < size) { |
x = x->next; |
continue; |
} |
x->used = 1; |
/* |
* If the chunk exactly matches required size or if truncating |
* it would not provide enough space for storing a new chunk |
* header plus at least one byte of data, we are finished. |
*/ |
if (x->size < size + sizeof(chunk_t) + 1) { |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return &x->data[0]; |
} |
/* |
* Truncate x and create a new chunk. |
*/ |
y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
y->used = 0; |
y->size = x->size - size - sizeof(chunk_t); |
y->prev = x; |
y->next = NULL; |
if (z = x->next) { |
z->prev = y; |
y->next = z; |
} |
x->size = size; |
x->next = y; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return &x->data[0]; |
} |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return NULL; |
} |
void free(void *ptr) |
{ |
pri_t pri; |
chunk_t *x, *y, *z; |
if (!ptr) |
panic("free on NULL"); |
y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
if (y->used != 1) |
panic("freeing unused/damaged chunk"); |
pri = cpu_priority_high(); |
spinlock_lock(&heaplock); |
x = y->prev; |
z = y->next; |
/* merge x and y */ |
if (x && !x->used) { |
x->size += y->size + sizeof(chunk_t); |
x->next = z; |
if (z) |
z->prev = x; |
y = x; |
} |
/* merge y and z or merge (x merged with y) and z */ |
if (z && !z->used) { |
y->size += z->size + sizeof(chunk_t); |
y->next = z->next; |
if (z->next) { |
/* y is either y or x */ |
z->next->prev = y; |
} |
} |
y->used = 0; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/Makefile |
---|
0,0 → 1,86 |
include Makefile.config |
include ../arch/$(ARCH)/Makefile.inc |
sources=cpu/cpu.c \ |
main/main.c \ |
main/kinit.c \ |
main/uinit.c \ |
proc/scheduler.c \ |
proc/thread.c \ |
proc/task.c \ |
mm/heap.c \ |
mm/frame.c \ |
mm/page.c \ |
mm/tlb.c \ |
mm/vm.c \ |
lib/func.c \ |
lib/list.c \ |
debug/print.c \ |
time/clock.c \ |
time/timeout.c \ |
time/delay.c \ |
synch/spinlock.c \ |
synch/condvar.c \ |
synch/rwlock.c \ |
synch/mutex.c \ |
synch/semaphore.c \ |
synch/waitq.c \ |
smp/ipi.c |
ifdef DEBUG_SPINLOCK |
CFLAGS+=-D$(DEBUG_SPINLOCK) |
endif |
ifdef USERSPACE |
CFLAGS+=-D$(USERSPACE) |
endif |
ifdef TEST |
test_objects:=$(addsuffix .o,$(basename ../test/$(TEST_DIR)/$(TEST_FILE))) |
CFLAGS+=-D$(TEST) |
endif |
arch_objects:=$(addsuffix .o,$(basename $(arch_sources))) |
objects:=$(addsuffix .o,$(basename $(sources))) |
.PHONY : all config depend build clean dist-clean boot |
all: dist-clean config depend build |
-include Makefile.depend |
config: |
find . ../include -name arch -type l -exec rm \{\} \; |
ln -s ../arch/$(ARCH)/src arch |
ln -s ../arch/$(ARCH)/include ../include/arch |
depend: |
$(CC) $(CPPFLAGS) -M $(arch_sources) $(sources) >Makefile.depend |
build: kernel.bin boot |
clean: |
find . ../arch/$(ARCH)/src ../test -name '*.o' -exec rm \{\} \; |
-rm *.bin kernel.map |
$(MAKE) -C ../arch/$(ARCH)/boot clean |
dist-clean: |
find . ../include -name arch -type l -exec rm \{\} \; |
-rm Makefile.depend |
-$(MAKE) clean |
kernel.bin: $(arch_objects) $(objects) $(test_objects) |
$(LD) $(LFLAGS) $(arch_objects) $(objects) $(test_objects) -o $@ >kernel.map |
%.s: %.S |
$(CC) $(CPPFLAGS) -E $< >$@ |
%.o: %.s |
$(AS) $< -o $@ |
%.o: %.c |
$(CC) $(CFLAGS) -c $< -o $@ |
KS=`cat kernel.bin | wc -c` |
boot: |
$(MAKE) -C ../arch/$(ARCH)/boot build KERNEL_SIZE=$(KS) |
/SPARTAN/trunk/src/lib/func.c |
---|
0,0 → 1,61 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <func.h> |
#include <print.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
__u32 haltstate = 0; |
void halt(void) |
{ |
haltstate = 1; |
cpu_priority_high(); |
printf("cpu%d: halted\n", CPU->id); |
cpu_halt(); |
} |
/* |
* returns 0 if src == dst |
* otherwise returns 1 |
*/ |
int strcmp(char *src, char *dst) |
{ |
int i; |
i = 0; |
while (src[i] == dst[i]) { |
if (src[i] == '\0') |
return 0; |
i++; |
} |
return 1; |
} |
/SPARTAN/trunk/src/lib/list.c |
---|
0,0 → 1,57 |
/* |
* Copyright (C) 2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <list.h> |
int list_member(link_t *link, link_t *head) |
{ |
int found = 0; |
link_t *hlp = head->next; |
while (hlp != head) { |
if (hlp == link) { |
found = 1; |
break; |
} |
hlp = hlp->next; |
} |
return found; |
} |
void list_concat(link_t *head1, link_t *head2) |
{ |
if (list_empty(head2)) |
return; |
head2->next->prev = head1->prev; |
head2->prev->next = head1; |
head1->prev->next = head2->next; |
head1->prev = head2->prev; |
list_initialize(head2); |
} |
/SPARTAN/trunk/src/cpu/cpu.c |
---|
0,0 → 1,89 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <cpu.h> |
#include <arch.h> |
#include <arch/cpu.h> |
#include <mm/heap.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <config.h> |
#include <panic.h> |
#include <typedefs.h> |
#include <memstr.h> |
#include <list.h> |
cpu_private_data_t *cpu_private_data; |
cpu_t *cpus; |
void cpu_init(void) { |
int i, j; |
#ifdef __SMP__ |
if (config.cpu_active == 1) { |
#endif /* __SMP__ */ |
cpu_private_data = (cpu_private_data_t *) malloc(sizeof(cpu_private_data_t) * config.cpu_count); |
if (!cpu_private_data) |
panic("malloc/cpu_private_data"); |
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count); |
if (!cpus) |
panic("malloc/cpus"); |
/* initialize everything */ |
memsetb((__address) cpu_private_data, sizeof(cpu_private_data_t) * config.cpu_count, 0); |
memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0); |
for (i=0; i < config.cpu_count; i++) { |
cpus[i].stack = (__u8 *) malloc(CPU_STACK_SIZE); |
if (!cpus[i].stack) |
panic("malloc/cpus[%d].stack\n", i); |
cpus[i].id = i; |
#ifdef __SMP__ |
waitq_initialize(&cpus[i].kcpulb_wq); |
#endif /* __SMP */ |
for (j = 0; j < RQ_COUNT; j++) { |
list_initialize(&cpus[i].rq[j].rq_head); |
} |
} |
#ifdef __SMP__ |
} |
#endif /* __SMP__ */ |
CPU->active = 1; |
CPU->tlb_active = 1; |
cpu_identify(); |
cpu_arch_init(); |
} |
/SPARTAN/trunk/src/synch/waitq.c |
---|
0,0 → 1,241 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <context.h> |
#include <proc/thread.h> |
#include <synch/synch.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <arch.h> |
#include <list.h> |
#include <time/timeout.h> |
void waitq_initialize(waitq_t *wq) |
{ |
spinlock_initialize(&wq->lock); |
list_initialize(&wq->head); |
wq->missed_wakeups = 0; |
} |
/* |
* Called with interrupts disabled from clock() when sleep_timeout |
* timeouts. This function is not allowed to enable interrupts. |
* |
* It is supposed to try to remove 'its' thread from the waitqueue; it |
* can eventually fail to achieve this goal when these two events |
* overlap; in that case it behaves just as though there was no |
* timeout at all |
*/ |
void waitq_interrupted_sleep(void *data) |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
int do_wakeup = 0; |
spinlock_lock(&threads_lock); |
if (!list_member(&t->threads_link, &threads_head)) |
goto out; |
grab_locks: |
spinlock_lock(&t->lock); |
if (wq = t->sleep_queue) { |
if (!spinlock_trylock(&wq->lock)) { |
spinlock_unlock(&t->lock); |
goto grab_locks; /* avoid deadlock */ |
} |
list_remove(&t->wq_link); |
t->saved_context = t->sleep_timeout_context; |
do_wakeup = 1; |
spinlock_unlock(&wq->lock); |
t->sleep_queue = NULL; |
} |
t->timeout_pending = 0; |
spinlock_unlock(&t->lock); |
if (do_wakeup) thread_ready(t); |
out: |
spinlock_unlock(&threads_lock); |
} |
/* |
* This is a sleep implementation which allows itself to be |
* interrupted from the sleep, restoring a failover context. |
* |
* This function is really basic in that other functions as waitq_sleep() |
* and all the *_timeout() functions use it. |
* |
* The third argument controls whether only a conditional sleep |
* (non-blocking sleep) is called for when the second argument is 0. |
* |
* usec | nonblocking | what happens if there is no missed_wakeup |
* -----+-------------+-------------------------------------------- |
* 0 | 0 | blocks without timeout until wakeup |
* 0 | <> 0 | immediately returns ESYNCH_WOULD_BLOCK |
* > 0 | x | blocks with timeout until timeout or wakeup |
* |
* return values: |
* ESYNCH_WOULD_BLOCK |
* ESYNCH_TIMEOUT |
* ESYNCH_OK_ATOMIC |
* ESYNCH_OK_BLOCKED |
*/ |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
{ |
volatile pri_t pri; /* must be live after context_restore() */ |
restart: |
pri = cpu_priority_high(); |
/* |
* Busy waiting for a delayed timeout. |
* This is an important fix for the race condition between |
* a delayed timeout and a next call to waitq_sleep_timeout(). |
* Simply, the thread is not allowed to go to sleep if |
* there are timeouts in progress. |
*/ |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
goto restart; |
} |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&wq->lock); |
/* checks whether to go to sleep at all */ |
if (wq->missed_wakeups) { |
wq->missed_wakeups--; |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
return ESYNCH_OK_ATOMIC; |
} |
else { |
if (nonblocking && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
return ESYNCH_WOULD_BLOCK; |
} |
} |
/* |
* Now we are firmly decided to go to sleep. |
*/ |
spinlock_lock(&THREAD->lock); |
if (usec) { |
/* We use the timeout variant. */ |
if (!context_save(&THREAD->sleep_timeout_context)) { |
/* |
* Short emulation of scheduler() return code. |
*/ |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = 1; |
timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
/* |
* Suspend execution. |
*/ |
THREAD->state = Sleeping; |
THREAD->sleep_queue = wq; |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
cpu_priority_restore(pri); |
return ESYNCH_OK_BLOCKED; |
} |
/* |
* This is the SMP- and IRQ-safe wrapper meant for general use. |
*/ |
/* |
* Besides its 'normal' wakeup operation, it attempts to unregister possible timeout. |
*/ |
void waitq_wakeup(waitq_t *wq, int all) |
{ |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
} |
/* |
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup. |
* It assumes wq->lock is already locked. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, int all) |
{ |
thread_t *t; |
loop: |
if (list_empty(&wq->head)) { |
wq->missed_wakeups++; |
if (all) wq->missed_wakeups = 0; |
return; |
} |
t = list_get_instance(wq->head.next, thread_t, wq_link); |
list_remove(&t->wq_link); |
spinlock_lock(&t->lock); |
if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) |
t->timeout_pending = 0; |
t->sleep_queue = NULL; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
if (all) goto loop; |
} |
/SPARTAN/trunk/src/synch/rwlock.c |
---|
0,0 → 1,310 |
/* |
* Reader/Writer locks |
*/ |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* These locks are not recursive. |
* Neither readers nor writers will suffer starvation. |
* |
* If there is a writer followed by a reader waiting for the rwlock |
* and the writer times out, all leading readers are automatically woken up |
* and allowed in. |
*/ |
/* |
* NOTE ON rwlock_holder_type |
* This field is set on an attempt to acquire the exclusive mutex |
* to the respective value depending whether the caller is a reader |
* or a writer. The field is examined only if the thread had been |
* previously blocked on the exclusive mutex. Thus it is save |
* to store the rwlock type in the thread structure, because |
* each thread can block on only one rwlock at a time. |
*/ |
#include <synch/synch.h> |
#include <synch/rwlock.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <list.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <proc/thread.h> |
#include <panic.h> |
#define ALLOW_ALL 0 |
#define ALLOW_READERS_ONLY 1 |
static void let_others_in(rwlock_t *rwl, int readers_only); |
static void release_spinlock(void *arg); |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock); |
mutex_initialize(&rwl->exclusive); |
rwl->readers_in = 0; |
} |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
pri_t pri; |
int rc; |
pri = cpu_priority_high(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
/* |
* Writers take the easy part. |
* They just need to acquire the exclusive mutex. |
*/ |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
if (SYNCH_FAILED(rc)) { |
/* |
* Lock operation timed out. |
* The state of rwl is UNKNOWN at this point. |
* No claims about its holder can be made. |
*/ |
pri = cpu_priority_high(); |
spinlock_lock(&rwl->lock); |
/* |
* Now when rwl is locked, we can inspect it again. |
* If it is held by some readers already, we can let |
* readers from the head of the wait queue in. |
*/ |
if (rwl->readers_in) |
let_others_in(rwl, ALLOW_READERS_ONLY); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
} |
return rc; |
} |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
int rc; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
spinlock_lock(&rwl->lock); |
/* |
* Find out whether we can get what we want without blocking. |
*/ |
rc = mutex_trylock(&rwl->exclusive); |
if (SYNCH_FAILED(rc)) { |
/* |
* 'exclusive' mutex is being held by someone else. |
* If the holder is a reader and there is no one |
* else waiting for it, we can enter the critical |
* section. |
*/ |
if (rwl->readers_in) { |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (list_empty(&rwl->exclusive.sem.wq.head)) { |
/* |
* We can enter. |
*/ |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
goto shortcut; |
} |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
/* |
* In order to prevent a race condition when a reader |
* could block another reader at the head of the waitq, |
* we register a function to unlock rwl->lock |
* after this thread is put asleep. |
*/ |
thread_register_call_me(release_spinlock, &rwl->lock); |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
switch (rc) { |
case ESYNCH_WOULD_BLOCK: |
/* |
* release_spinlock() wasn't called |
*/ |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
/* |
* The sleep timeouted. |
* We just restore the cpu priority. |
*/ |
case ESYNCH_OK_BLOCKED: |
/* |
* We were woken with rwl->readers_in already incremented. |
* Note that this arrangement avoids race condition between |
* two concurrent readers. (Race is avoided if 'exclusive' is |
* locked at the same time as 'readers_in' is incremented. |
* Same time means both events happen atomically when |
* rwl->lock is held.) |
*/ |
cpu_priority_restore(pri); |
break; |
case ESYNCH_OK_ATOMIC: |
panic(PANIC "_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
break; |
dafault: |
panic(PANIC "invalid ESYNCH"); |
break; |
} |
return rc; |
} |
shortcut: |
/* |
* We can increment readers_in only if we didn't go to sleep. |
* For sleepers, rwlock_let_others_in() will do the job. |
*/ |
rwl->readers_in++; |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
return ESYNCH_OK_ATOMIC; |
} |
void rwlock_write_unlock(rwlock_t *rwl) |
{ |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&rwl->lock); |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
} |
void rwlock_read_unlock(rwlock_t *rwl) |
{ |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&rwl->lock); |
if (!--rwl->readers_in) |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
} |
/* |
* Must be called with rwl->lock locked. |
* Must be called with cpu_priority_high'ed. |
*/ |
/* |
* If readers_only is false: (unlock scenario) |
* Let the first sleeper on 'exclusive' mutex in, no matter |
* whether it is a reader or a writer. If there are more leading |
* readers in line, let each of them in. |
* |
* Otherwise: (timeout scenario) |
* Let all leading readers in. |
*/ |
void let_others_in(rwlock_t *rwl, int readers_only) |
{ |
rwlock_type_t type = RWLOCK_NONE; |
thread_t *t = NULL; |
int one_more = 1; |
spinlock_lock(&rwl->exclusive.sem.wq.lock); |
if (!list_empty(&rwl->exclusive.sem.wq.head)) |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
do { |
if (t) { |
spinlock_lock(&t->lock); |
type = t->rwlock_holder_type; |
spinlock_unlock(&t->lock); |
} |
/* |
* If readers_only is true, we wake all leading readers |
* if and only if rwl is locked by another reader. |
* Assumption: readers_only ==> rwl->readers_in |
*/ |
if (readers_only && (type != RWLOCK_READER)) |
break; |
if (type == RWLOCK_READER) { |
/* |
* Waking up a reader. |
* We are responsible for incrementing rwl->readers_in for it. |
*/ |
rwl->readers_in++; |
} |
/* |
* Only the last iteration through this loop can increment |
* rwl->exclusive.sem.wq.missed_wakeup's. All preceeding |
* iterations will wake up a thread. |
*/ |
/* We call the internal version of waitq_wakeup, which |
* relies on the fact that the waitq is already locked. |
*/ |
_waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); |
t = NULL; |
if (!list_empty(&rwl->exclusive.sem.wq.head)) { |
t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); |
if (t) { |
spinlock_lock(&t->lock); |
if (t->rwlock_holder_type != RWLOCK_READER) |
one_more = 0; |
spinlock_unlock(&t->lock); |
} |
} |
} while ((type == RWLOCK_READER) && t && one_more); |
spinlock_unlock(&rwl->exclusive.sem.wq.lock); |
} |
void release_spinlock(void *arg) |
{ |
spinlock_unlock((spinlock_t *) arg); |
} |
/SPARTAN/trunk/src/synch/spinlock.c |
---|
0,0 → 1,79 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch.h> |
#ifdef __SMP__ |
#include <arch/smp/atomic.h> |
#endif |
#include <synch/spinlock.h> |
#ifdef __SMP__ |
void spinlock_initialize(spinlock_t *sl) |
{ |
sl->val = 0; |
} |
#ifdef DEBUG_SPINLOCK |
void spinlock_lock(spinlock_t *sl) |
{ |
int i = 0; |
__address caller = ((__u32 *) &sl)[-1]; |
while (test_and_set(&sl->val)) { |
if (i++ > 300000) { |
printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller); |
i = 0; |
} |
} |
} |
#else |
void spinlock_lock(spinlock_t *sl) |
{ |
/* |
* Each architecture has its own efficient/recommended |
* implementation of spinlock. |
*/ |
spinlock_arch(&sl->val); |
} |
#endif |
int spinlock_trylock(spinlock_t *sl) |
{ |
return !test_and_set(&sl->val); |
} |
void spinlock_unlock(spinlock_t *sl) |
{ |
sl->val = 0; |
} |
#endif |
/SPARTAN/trunk/src/synch/mutex.c |
---|
0,0 → 1,46 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/synch.h> |
#include <synch/mutex.h> |
#include <synch/semaphore.h> |
void mutex_initialize(mutex_t *mtx) |
{ |
semaphore_initialize(&mtx->sem, 1); |
} |
int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, trylock); |
} |
void mutex_unlock(mutex_t *mtx) |
{ |
semaphore_up(&mtx->sem); |
} |
/SPARTAN/trunk/src/synch/semaphore.c |
---|
0,0 → 1,57 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/synch.h> |
#include <synch/semaphore.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
void semaphore_initialize(semaphore_t *s, int val) |
{ |
pri_t pri; |
waitq_initialize(&s->wq); |
pri = cpu_priority_high(); |
spinlock_lock(&s->wq.lock); |
s->wq.missed_wakeups = val; |
spinlock_unlock(&s->wq.lock); |
cpu_priority_restore(pri); |
} |
int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown) |
{ |
return waitq_sleep_timeout(&s->wq, usec, trydown); |
} |
void semaphore_up(semaphore_t *s) |
{ |
waitq_wakeup(&s->wq, WAKEUP_FIRST); |
} |
/SPARTAN/trunk/src/synch/condvar.c |
---|
0,0 → 1,57 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <synch/synch.h> |
#include <synch/condvar.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
void condvar_initialize(condvar_t *cv) |
{ |
waitq_initialize(&cv->wq); |
} |
void condvar_signal(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_FIRST); |
} |
void condvar_broadcast(condvar_t *cv) |
{ |
waitq_wakeup(&cv->wq, WAKEUP_ALL); |
} |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int trywait) |
{ |
int rc; |
mutex_unlock(mtx); |
rc = waitq_sleep_timeout(&cv->wq, usec, trywait); |
mutex_lock(mtx); |
return rc; |
} |
/SPARTAN/trunk/src/time/delay.c |
---|
0,0 → 1,46 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/delay.h> |
#include <arch/types.h> |
#include <cpu.h> |
#include <arch/asm.h> |
#include <arch.h> |
/* |
* Note that the delay loop is calibrated for each and every CPU in the system. |
* Therefore it is necessary to cpu_priority_high() before calling the asm_delay_loop(). |
*/ |
void delay(__u32 microseconds) |
{ |
pri_t pri; |
pri = cpu_priority_high(); |
asm_delay_loop(microseconds * CPU->delay_loop_const); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/time/timeout.c |
---|
0,0 → 1,164 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/timeout.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <cpu.h> |
#include <print.h> |
#include <arch/asm.h> |
#include <arch.h> |
void timeout_init(void) |
{ |
spinlock_initialize(&CPU->timeoutlock); |
list_initialize(&CPU->timeout_active_head); |
} |
void timeout_reinitialize(timeout_t *t) |
{ |
t->cpu = NULL; |
t->ticks = 0; |
t->handler = NULL; |
t->arg = NULL; |
link_initialize(&t->link); |
} |
void timeout_initialize(timeout_t *t) |
{ |
spinlock_initialize(&t->lock); |
timeout_reinitialize(t); |
} |
/* |
* This function registers f for execution in about time microseconds. |
*/ |
void timeout_register(timeout_t *t, __u64 time, timeout_handler f, void *arg) |
{ |
timeout_t *hlp; |
link_t *l, *m; |
pri_t pri; |
__u64 sum; |
pri = cpu_priority_high(); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
if (t->cpu) |
panic("timeout_register: t->cpu != 0"); |
t->cpu = CPU; |
t->ticks = us2ticks(time); |
t->handler = f; |
t->arg = arg; |
/* |
* Insert t into the active timeouts list according to t->ticks. |
*/ |
sum = 0; |
l = CPU->timeout_active_head.next; |
while (l != &CPU->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
if (t->ticks < sum + hlp->ticks) { |
spinlock_unlock(&hlp->lock); |
break; |
} |
sum += hlp->ticks; |
spinlock_unlock(&hlp->lock); |
l = l->next; |
} |
m = l->prev; |
list_prepend(&t->link, m); /* avoid using l->prev */ |
/* |
* Adjust t->ticks according to ticks accumulated in h's predecessors. |
*/ |
t->ticks -= sum; |
/* |
* Decrease ticks of t's immediate succesor by t->ticks. |
*/ |
if (l != &CPU->timeout_active_head) { |
spinlock_lock(&hlp->lock); |
hlp->ticks -= t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
spinlock_unlock(&t->lock); |
spinlock_unlock(&CPU->timeoutlock); |
cpu_priority_restore(pri); |
} |
int timeout_unregister(timeout_t *t) |
{ |
timeout_t *hlp; |
link_t *l; |
pri_t pri; |
grab_locks: |
pri = cpu_priority_high(); |
spinlock_lock(&t->lock); |
if (!t->cpu) { |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
return 0; |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
goto grab_locks; |
} |
/* |
* Now we know for sure that t hasn't been activated yet |
* and is lurking in t->cpu->timeout_active_head queue. |
*/ |
l = t->link.next; |
if (l != &t->cpu->timeout_active_head) { |
hlp = list_get_instance(l, timeout_t, link); |
spinlock_lock(&hlp->lock); |
hlp->ticks += t->ticks; |
spinlock_unlock(&hlp->lock); |
} |
list_remove(&t->link); |
spinlock_unlock(&t->cpu->timeoutlock); |
timeout_reinitialize(t); |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
return 1; |
} |
/SPARTAN/trunk/src/time/clock.c |
---|
0,0 → 1,100 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <time/clock.h> |
#include <time/timeout.h> |
#include <arch/types.h> |
#include <config.h> |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <func.h> |
#include <proc/scheduler.h> |
#include <cpu.h> |
#include <print.h> |
#include <arch.h> |
#include <list.h> |
#ifdef __SMP__ |
#include <arch/smp/atomic.h> |
#endif |
/* |
* Clock is called from an interrupt and is cpu_priority_high()'d. |
*/ |
void clock(void) |
{ |
link_t *l; |
timeout_t *h; |
timeout_handler f; |
void *arg; |
/* |
* To avoid lock ordering problems, |
* run all expired timeouts as you visit them. |
*/ |
spinlock_lock(&CPU->timeoutlock); |
while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { |
h = list_get_instance(l, timeout_t, link); |
spinlock_lock(&h->lock); |
if (h->ticks-- != 0) { |
spinlock_unlock(&h->lock); |
break; |
} |
list_remove(l); |
f = h->handler; |
arg = h->arg; |
timeout_reinitialize(h); |
spinlock_unlock(&h->lock); |
spinlock_unlock(&CPU->timeoutlock); |
f(arg); |
spinlock_lock(&CPU->timeoutlock); |
} |
spinlock_unlock(&CPU->timeoutlock); |
/* |
* Do CPU usage accounting and find out whether to preempt THREAD. |
*/ |
if (THREAD) { |
spinlock_lock(&CPU->lock); |
CPU->needs_relink++; |
spinlock_unlock(&CPU->lock); |
spinlock_lock(&THREAD->lock); |
if (!THREAD->ticks--) { |
spinlock_unlock(&THREAD->lock); |
scheduler(); |
} |
else { |
spinlock_unlock(&THREAD->lock); |
} |
} |
} |
/SPARTAN/trunk/src/smp/ipi.c |
---|
0,0 → 1,46 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifdef __SMP__ |
#include <smp/ipi.h> |
#include <config.h> |
void ipi_broadcast(int ipi) |
{ |
/* |
* Provisions must be made to avoid sending IPI: |
* - before all CPU's were configured to accept the IPI |
* - if there is only one CPU but the kernel was compiled with __SMP__ |
*/ |
if ((config.cpu_active > 1) && (config.cpu_active == config.cpu_count)) |
ipi_broadcast_arch(ipi); |
} |
#endif /* __SMP__ */ |