Rev 1184 | Rev 1210 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1184 | Rev 1196 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <proc/uarg.h> |
32 | #include <proc/uarg.h> |
33 | #include <mm/frame.h> |
33 | #include <mm/frame.h> |
34 | #include <mm/page.h> |
34 | #include <mm/page.h> |
35 | #include <arch/asm.h> |
35 | #include <arch/asm.h> |
36 | #include <arch.h> |
36 | #include <arch.h> |
37 | #include <synch/synch.h> |
37 | #include <synch/synch.h> |
38 | #include <synch/spinlock.h> |
38 | #include <synch/spinlock.h> |
39 | #include <synch/waitq.h> |
39 | #include <synch/waitq.h> |
40 | #include <synch/rwlock.h> |
40 | #include <synch/rwlock.h> |
41 | #include <cpu.h> |
41 | #include <cpu.h> |
42 | #include <func.h> |
42 | #include <func.h> |
43 | #include <context.h> |
43 | #include <context.h> |
44 | #include <adt/btree.h> |
44 | #include <adt/btree.h> |
45 | #include <adt/list.h> |
45 | #include <adt/list.h> |
46 | #include <typedefs.h> |
46 | #include <typedefs.h> |
47 | #include <time/clock.h> |
47 | #include <time/clock.h> |
48 | #include <config.h> |
48 | #include <config.h> |
49 | #include <arch/interrupt.h> |
49 | #include <arch/interrupt.h> |
50 | #include <smp/ipi.h> |
50 | #include <smp/ipi.h> |
51 | #include <arch/faddr.h> |
51 | #include <arch/faddr.h> |
52 | #include <atomic.h> |
52 | #include <atomic.h> |
53 | #include <memstr.h> |
53 | #include <memstr.h> |
54 | #include <print.h> |
54 | #include <print.h> |
55 | #include <mm/slab.h> |
55 | #include <mm/slab.h> |
56 | #include <debug.h> |
56 | #include <debug.h> |
57 | #include <main/uinit.h> |
57 | #include <main/uinit.h> |
58 | 58 | ||
59 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
59 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
60 | 60 | ||
61 | /** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
61 | /** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
62 | SPINLOCK_INITIALIZE(threads_lock); |
62 | SPINLOCK_INITIALIZE(threads_lock); |
63 | btree_t threads_btree; /**< B+tree of all threads. */ |
63 | btree_t threads_btree; /**< B+tree of all threads. */ |
64 | 64 | ||
65 | SPINLOCK_INITIALIZE(tidlock); |
65 | SPINLOCK_INITIALIZE(tidlock); |
66 | __u32 last_tid = 0; |
66 | __u32 last_tid = 0; |
67 | 67 | ||
68 | static slab_cache_t *thread_slab; |
68 | static slab_cache_t *thread_slab; |
69 | #ifdef ARCH_HAS_FPU |
69 | #ifdef ARCH_HAS_FPU |
70 | slab_cache_t *fpu_context_slab; |
70 | slab_cache_t *fpu_context_slab; |
71 | #endif |
71 | #endif |
72 | 72 | ||
73 | /** Thread wrapper |
73 | /** Thread wrapper |
74 | * |
74 | * |
75 | * This wrapper is provided to ensure that every thread |
75 | * This wrapper is provided to ensure that every thread |
76 | * makes a call to thread_exit() when its implementing |
76 | * makes a call to thread_exit() when its implementing |
77 | * function returns. |
77 | * function returns. |
78 | * |
78 | * |
79 | * interrupts_disable() is assumed. |
79 | * interrupts_disable() is assumed. |
80 | * |
80 | * |
81 | */ |
81 | */ |
82 | static void cushion(void) |
82 | static void cushion(void) |
83 | { |
83 | { |
84 | void (*f)(void *) = THREAD->thread_code; |
84 | void (*f)(void *) = THREAD->thread_code; |
85 | void *arg = THREAD->thread_arg; |
85 | void *arg = THREAD->thread_arg; |
86 | 86 | ||
87 | /* this is where each thread wakes up after its creation */ |
87 | /* this is where each thread wakes up after its creation */ |
88 | spinlock_unlock(&THREAD->lock); |
88 | spinlock_unlock(&THREAD->lock); |
89 | interrupts_enable(); |
89 | interrupts_enable(); |
90 | 90 | ||
91 | f(arg); |
91 | f(arg); |
92 | thread_exit(); |
92 | thread_exit(); |
93 | /* not reached */ |
93 | /* not reached */ |
94 | } |
94 | } |
95 | 95 | ||
96 | /** Initialization and allocation for thread_t structure */ |
96 | /** Initialization and allocation for thread_t structure */ |
97 | static int thr_constructor(void *obj, int kmflags) |
97 | static int thr_constructor(void *obj, int kmflags) |
98 | { |
98 | { |
99 | thread_t *t = (thread_t *)obj; |
99 | thread_t *t = (thread_t *)obj; |
100 | pfn_t pfn; |
100 | pfn_t pfn; |
101 | int status; |
101 | int status; |
102 | 102 | ||
103 | spinlock_initialize(&t->lock, "thread_t_lock"); |
103 | spinlock_initialize(&t->lock, "thread_t_lock"); |
104 | link_initialize(&t->rq_link); |
104 | link_initialize(&t->rq_link); |
105 | link_initialize(&t->wq_link); |
105 | link_initialize(&t->wq_link); |
106 | link_initialize(&t->th_link); |
106 | link_initialize(&t->th_link); |
107 | 107 | ||
108 | #ifdef ARCH_HAS_FPU |
108 | #ifdef ARCH_HAS_FPU |
109 | # ifdef CONFIG_FPU_LAZY |
109 | # ifdef CONFIG_FPU_LAZY |
110 | t->saved_fpu_context = NULL; |
110 | t->saved_fpu_context = NULL; |
111 | # else |
111 | # else |
112 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
112 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
113 | if (!t->saved_fpu_context) |
113 | if (!t->saved_fpu_context) |
114 | return -1; |
114 | return -1; |
115 | # endif |
115 | # endif |
116 | #endif |
116 | #endif |
117 | 117 | ||
118 | pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
118 | pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
119 | if (status) { |
119 | if (status) { |
120 | #ifdef ARCH_HAS_FPU |
120 | #ifdef ARCH_HAS_FPU |
121 | if (t->saved_fpu_context) |
121 | if (t->saved_fpu_context) |
122 | slab_free(fpu_context_slab,t->saved_fpu_context); |
122 | slab_free(fpu_context_slab,t->saved_fpu_context); |
123 | #endif |
123 | #endif |
124 | return -1; |
124 | return -1; |
125 | } |
125 | } |
126 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
126 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
127 | 127 | ||
128 | return 0; |
128 | return 0; |
129 | } |
129 | } |
130 | 130 | ||
131 | /** Destruction of thread_t object */ |
131 | /** Destruction of thread_t object */ |
132 | static int thr_destructor(void *obj) |
132 | static int thr_destructor(void *obj) |
133 | { |
133 | { |
134 | thread_t *t = (thread_t *)obj; |
134 | thread_t *t = (thread_t *)obj; |
135 | 135 | ||
136 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
136 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
137 | #ifdef ARCH_HAS_FPU |
137 | #ifdef ARCH_HAS_FPU |
138 | if (t->saved_fpu_context) |
138 | if (t->saved_fpu_context) |
139 | slab_free(fpu_context_slab,t->saved_fpu_context); |
139 | slab_free(fpu_context_slab,t->saved_fpu_context); |
140 | #endif |
140 | #endif |
141 | return 1; /* One page freed */ |
141 | return 1; /* One page freed */ |
142 | } |
142 | } |
143 | 143 | ||
144 | /** Initialize threads |
144 | /** Initialize threads |
145 | * |
145 | * |
146 | * Initialize kernel threads support. |
146 | * Initialize kernel threads support. |
147 | * |
147 | * |
148 | */ |
148 | */ |
149 | void thread_init(void) |
149 | void thread_init(void) |
150 | { |
150 | { |
151 | THREAD = NULL; |
151 | THREAD = NULL; |
152 | atomic_set(&nrdy,0); |
152 | atomic_set(&nrdy,0); |
153 | thread_slab = slab_cache_create("thread_slab", |
153 | thread_slab = slab_cache_create("thread_slab", |
154 | sizeof(thread_t),0, |
154 | sizeof(thread_t),0, |
155 | thr_constructor, thr_destructor, 0); |
155 | thr_constructor, thr_destructor, 0); |
156 | #ifdef ARCH_HAS_FPU |
156 | #ifdef ARCH_HAS_FPU |
157 | fpu_context_slab = slab_cache_create("fpu_slab", |
157 | fpu_context_slab = slab_cache_create("fpu_slab", |
158 | sizeof(fpu_context_t), |
158 | sizeof(fpu_context_t), |
159 | FPU_CONTEXT_ALIGN, |
159 | FPU_CONTEXT_ALIGN, |
160 | NULL, NULL, 0); |
160 | NULL, NULL, 0); |
161 | #endif |
161 | #endif |
162 | 162 | ||
163 | btree_create(&threads_btree); |
163 | btree_create(&threads_btree); |
164 | } |
164 | } |
165 | 165 | ||
166 | /** Make thread ready |
166 | /** Make thread ready |
167 | * |
167 | * |
168 | * Switch thread t to the ready state. |
168 | * Switch thread t to the ready state. |
169 | * |
169 | * |
170 | * @param t Thread to make ready. |
170 | * @param t Thread to make ready. |
171 | * |
171 | * |
172 | */ |
172 | */ |
173 | void thread_ready(thread_t *t) |
173 | void thread_ready(thread_t *t) |
174 | { |
174 | { |
175 | cpu_t *cpu; |
175 | cpu_t *cpu; |
176 | runq_t *r; |
176 | runq_t *r; |
177 | ipl_t ipl; |
177 | ipl_t ipl; |
178 | int i, avg; |
178 | int i, avg; |
179 | 179 | ||
180 | ipl = interrupts_disable(); |
180 | ipl = interrupts_disable(); |
181 | 181 | ||
182 | spinlock_lock(&t->lock); |
182 | spinlock_lock(&t->lock); |
183 | 183 | ||
184 | ASSERT(! (t->state == Ready)); |
184 | ASSERT(! (t->state == Ready)); |
185 | 185 | ||
186 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
186 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
187 | 187 | ||
188 | cpu = CPU; |
188 | cpu = CPU; |
189 | if (t->flags & X_WIRED) { |
189 | if (t->flags & X_WIRED) { |
190 | cpu = t->cpu; |
190 | cpu = t->cpu; |
191 | } |
191 | } |
192 | t->state = Ready; |
192 | t->state = Ready; |
193 | spinlock_unlock(&t->lock); |
193 | spinlock_unlock(&t->lock); |
194 | 194 | ||
195 | /* |
195 | /* |
196 | * Append t to respective ready queue on respective processor. |
196 | * Append t to respective ready queue on respective processor. |
197 | */ |
197 | */ |
198 | r = &cpu->rq[i]; |
198 | r = &cpu->rq[i]; |
199 | spinlock_lock(&r->lock); |
199 | spinlock_lock(&r->lock); |
200 | list_append(&t->rq_link, &r->rq_head); |
200 | list_append(&t->rq_link, &r->rq_head); |
201 | r->n++; |
201 | r->n++; |
202 | spinlock_unlock(&r->lock); |
202 | spinlock_unlock(&r->lock); |
203 | 203 | ||
204 | atomic_inc(&nrdy); |
204 | atomic_inc(&nrdy); |
205 | avg = atomic_get(&nrdy) / config.cpu_active; |
205 | avg = atomic_get(&nrdy) / config.cpu_active; |
206 | atomic_inc(&cpu->nrdy); |
206 | atomic_inc(&cpu->nrdy); |
207 | 207 | ||
208 | interrupts_restore(ipl); |
208 | interrupts_restore(ipl); |
209 | } |
209 | } |
210 | 210 | ||
211 | /** Destroy thread memory structure |
211 | /** Destroy thread memory structure |
212 | * |
212 | * |
213 | * Detach thread from all queues, cpus etc. and destroy it. |
213 | * Detach thread from all queues, cpus etc. and destroy it. |
214 | * |
214 | * |
215 | * Assume thread->lock is held!! |
215 | * Assume thread->lock is held!! |
216 | */ |
216 | */ |
217 | void thread_destroy(thread_t *t) |
217 | void thread_destroy(thread_t *t) |
218 | { |
218 | { |
219 | ASSERT(t->state == Exiting); |
219 | ASSERT(t->state == Exiting); |
220 | ASSERT(t->task); |
220 | ASSERT(t->task); |
221 | ASSERT(t->cpu); |
221 | ASSERT(t->cpu); |
222 | 222 | ||
223 | spinlock_lock(&t->cpu->lock); |
223 | spinlock_lock(&t->cpu->lock); |
224 | if(t->cpu->fpu_owner==t) |
224 | if(t->cpu->fpu_owner==t) |
225 | t->cpu->fpu_owner=NULL; |
225 | t->cpu->fpu_owner=NULL; |
226 | spinlock_unlock(&t->cpu->lock); |
226 | spinlock_unlock(&t->cpu->lock); |
227 | 227 | ||
228 | /* |
228 | /* |
229 | * Detach from the containing task. |
229 | * Detach from the containing task. |
230 | */ |
230 | */ |
231 | spinlock_lock(&t->task->lock); |
231 | spinlock_lock(&t->task->lock); |
232 | list_remove(&t->th_link); |
232 | list_remove(&t->th_link); |
233 | spinlock_unlock(&t->task->lock); |
233 | spinlock_unlock(&t->task->lock); |
234 | 234 | ||
235 | spinlock_unlock(&t->lock); |
235 | spinlock_unlock(&t->lock); |
236 | 236 | ||
237 | spinlock_lock(&threads_lock); |
237 | spinlock_lock(&threads_lock); |
238 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
238 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
239 | spinlock_unlock(&threads_lock); |
239 | spinlock_unlock(&threads_lock); |
240 | 240 | ||
241 | slab_free(thread_slab, t); |
241 | slab_free(thread_slab, t); |
242 | } |
242 | } |
243 | 243 | ||
244 | /** Create new thread |
244 | /** Create new thread |
245 | * |
245 | * |
246 | * Create a new thread. |
246 | * Create a new thread. |
247 | * |
247 | * |
248 | * @param func Thread's implementing function. |
248 | * @param func Thread's implementing function. |
249 | * @param arg Thread's implementing function argument. |
249 | * @param arg Thread's implementing function argument. |
250 | * @param task Task to which the thread belongs. |
250 | * @param task Task to which the thread belongs. |
251 | * @param flags Thread flags. |
251 | * @param flags Thread flags. |
252 | * @param name Symbolic name. |
252 | * @param name Symbolic name. |
253 | * |
253 | * |
254 | * @return New thread's structure on success, NULL on failure. |
254 | * @return New thread's structure on success, NULL on failure. |
255 | * |
255 | * |
256 | */ |
256 | */ |
257 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
257 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
258 | { |
258 | { |
259 | thread_t *t; |
259 | thread_t *t; |
260 | ipl_t ipl; |
260 | ipl_t ipl; |
261 | 261 | ||
262 | t = (thread_t *) slab_alloc(thread_slab, 0); |
262 | t = (thread_t *) slab_alloc(thread_slab, 0); |
263 | if (!t) |
263 | if (!t) |
264 | return NULL; |
264 | return NULL; |
265 | 265 | ||
266 | thread_create_arch(t); |
266 | thread_create_arch(t); |
267 | 267 | ||
268 | /* Not needed, but good for debugging */ |
268 | /* Not needed, but good for debugging */ |
269 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
269 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
270 | 270 | ||
271 | ipl = interrupts_disable(); |
271 | ipl = interrupts_disable(); |
272 | spinlock_lock(&tidlock); |
272 | spinlock_lock(&tidlock); |
273 | t->tid = ++last_tid; |
273 | t->tid = ++last_tid; |
274 | spinlock_unlock(&tidlock); |
274 | spinlock_unlock(&tidlock); |
275 | interrupts_restore(ipl); |
275 | interrupts_restore(ipl); |
276 | 276 | ||
277 | context_save(&t->saved_context); |
277 | context_save(&t->saved_context); |
278 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
278 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
279 | 279 | ||
280 | the_initialize((the_t *) t->kstack); |
280 | the_initialize((the_t *) t->kstack); |
281 | 281 | ||
282 | ipl = interrupts_disable(); |
282 | ipl = interrupts_disable(); |
283 | t->saved_context.ipl = interrupts_read(); |
283 | t->saved_context.ipl = interrupts_read(); |
284 | interrupts_restore(ipl); |
284 | interrupts_restore(ipl); |
285 | 285 | ||
286 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
286 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
287 | 287 | ||
288 | t->thread_code = func; |
288 | t->thread_code = func; |
289 | t->thread_arg = arg; |
289 | t->thread_arg = arg; |
290 | t->ticks = -1; |
290 | t->ticks = -1; |
291 | t->priority = -1; /* start in rq[0] */ |
291 | t->priority = -1; /* start in rq[0] */ |
292 | t->cpu = NULL; |
292 | t->cpu = NULL; |
293 | t->flags = 0; |
293 | t->flags = 0; |
294 | t->state = Entering; |
294 | t->state = Entering; |
295 | t->call_me = NULL; |
295 | t->call_me = NULL; |
296 | t->call_me_with = NULL; |
296 | t->call_me_with = NULL; |
297 | 297 | ||
298 | timeout_initialize(&t->sleep_timeout); |
298 | timeout_initialize(&t->sleep_timeout); |
299 | t->sleep_queue = NULL; |
299 | t->sleep_queue = NULL; |
300 | t->timeout_pending = 0; |
300 | t->timeout_pending = 0; |
301 | 301 | ||
302 | t->rwlock_holder_type = RWLOCK_NONE; |
302 | t->rwlock_holder_type = RWLOCK_NONE; |
303 | 303 | ||
304 | t->task = task; |
304 | t->task = task; |
305 | 305 | ||
306 | t->fpu_context_exists = 0; |
306 | t->fpu_context_exists = 0; |
307 | t->fpu_context_engaged = 0; |
307 | t->fpu_context_engaged = 0; |
308 | 308 | ||
309 | /* |
309 | /* |
310 | * Register this thread in the system-wide list. |
310 | * Register this thread in the system-wide list. |
311 | */ |
311 | */ |
312 | ipl = interrupts_disable(); |
312 | ipl = interrupts_disable(); |
313 | spinlock_lock(&threads_lock); |
313 | spinlock_lock(&threads_lock); |
314 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
314 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
315 | spinlock_unlock(&threads_lock); |
315 | spinlock_unlock(&threads_lock); |
316 | 316 | ||
317 | /* |
317 | /* |
318 | * Attach to the containing task. |
318 | * Attach to the containing task. |
319 | */ |
319 | */ |
320 | spinlock_lock(&task->lock); |
320 | spinlock_lock(&task->lock); |
321 | list_append(&t->th_link, &task->th_head); |
321 | list_append(&t->th_link, &task->th_head); |
322 | spinlock_unlock(&task->lock); |
322 | spinlock_unlock(&task->lock); |
323 | 323 | ||
324 | interrupts_restore(ipl); |
324 | interrupts_restore(ipl); |
325 | 325 | ||
326 | return t; |
326 | return t; |
327 | } |
327 | } |
328 | 328 | ||
329 | /** Make thread exiting |
329 | /** Make thread exiting |
330 | * |
330 | * |
331 | * End current thread execution and switch it to the exiting |
331 | * End current thread execution and switch it to the exiting |
332 | * state. All pending timeouts are executed. |
332 | * state. All pending timeouts are executed. |
333 | * |
333 | * |
334 | */ |
334 | */ |
335 | void thread_exit(void) |
335 | void thread_exit(void) |
336 | { |
336 | { |
337 | ipl_t ipl; |
337 | ipl_t ipl; |
338 | 338 | ||
339 | restart: |
339 | restart: |
340 | ipl = interrupts_disable(); |
340 | ipl = interrupts_disable(); |
341 | spinlock_lock(&THREAD->lock); |
341 | spinlock_lock(&THREAD->lock); |
342 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
342 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
343 | spinlock_unlock(&THREAD->lock); |
343 | spinlock_unlock(&THREAD->lock); |
344 | interrupts_restore(ipl); |
344 | interrupts_restore(ipl); |
345 | goto restart; |
345 | goto restart; |
346 | } |
346 | } |
347 | THREAD->state = Exiting; |
347 | THREAD->state = Exiting; |
348 | spinlock_unlock(&THREAD->lock); |
348 | spinlock_unlock(&THREAD->lock); |
349 | scheduler(); |
349 | scheduler(); |
350 | } |
350 | } |
351 | 351 | ||
352 | 352 | ||
353 | /** Thread sleep |
353 | /** Thread sleep |
354 | * |
354 | * |
355 | * Suspend execution of the current thread. |
355 | * Suspend execution of the current thread. |
356 | * |
356 | * |
357 | * @param sec Number of seconds to sleep. |
357 | * @param sec Number of seconds to sleep. |
358 | * |
358 | * |
359 | */ |
359 | */ |
360 | void thread_sleep(__u32 sec) |
360 | void thread_sleep(__u32 sec) |
361 | { |
361 | { |
362 | thread_usleep(sec*1000000); |
362 | thread_usleep(sec*1000000); |
363 | } |
363 | } |
364 | 364 | ||
365 | /** Thread usleep |
365 | /** Thread usleep |
366 | * |
366 | * |
367 | * Suspend execution of the current thread. |
367 | * Suspend execution of the current thread. |
368 | * |
368 | * |
369 | * @param usec Number of microseconds to sleep. |
369 | * @param usec Number of microseconds to sleep. |
370 | * |
370 | * |
371 | */ |
371 | */ |
372 | void thread_usleep(__u32 usec) |
372 | void thread_usleep(__u32 usec) |
373 | { |
373 | { |
374 | waitq_t wq; |
374 | waitq_t wq; |
375 | 375 | ||
376 | waitq_initialize(&wq); |
376 | waitq_initialize(&wq); |
377 | 377 | ||
378 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
378 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
379 | } |
379 | } |
380 | 380 | ||
381 | /** Register thread out-of-context invocation |
381 | /** Register thread out-of-context invocation |
382 | * |
382 | * |
383 | * Register a function and its argument to be executed |
383 | * Register a function and its argument to be executed |
384 | * on next context switch to the current thread. |
384 | * on next context switch to the current thread. |
385 | * |
385 | * |
386 | * @param call_me Out-of-context function. |
386 | * @param call_me Out-of-context function. |
387 | * @param call_me_with Out-of-context function argument. |
387 | * @param call_me_with Out-of-context function argument. |
388 | * |
388 | * |
389 | */ |
389 | */ |
390 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
390 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
391 | { |
391 | { |
392 | ipl_t ipl; |
392 | ipl_t ipl; |
393 | 393 | ||
394 | ipl = interrupts_disable(); |
394 | ipl = interrupts_disable(); |
395 | spinlock_lock(&THREAD->lock); |
395 | spinlock_lock(&THREAD->lock); |
396 | THREAD->call_me = call_me; |
396 | THREAD->call_me = call_me; |
397 | THREAD->call_me_with = call_me_with; |
397 | THREAD->call_me_with = call_me_with; |
398 | spinlock_unlock(&THREAD->lock); |
398 | spinlock_unlock(&THREAD->lock); |
399 | interrupts_restore(ipl); |
399 | interrupts_restore(ipl); |
400 | } |
400 | } |
401 | 401 | ||
402 | /** Print list of threads debug info */ |
402 | /** Print list of threads debug info */ |
403 | void thread_print_list(void) |
403 | void thread_print_list(void) |
404 | { |
404 | { |
405 | link_t *cur; |
405 | link_t *cur; |
406 | ipl_t ipl; |
406 | ipl_t ipl; |
407 | 407 | ||
408 | /* Messing with thread structures, avoid deadlock */ |
408 | /* Messing with thread structures, avoid deadlock */ |
409 | ipl = interrupts_disable(); |
409 | ipl = interrupts_disable(); |
410 | spinlock_lock(&threads_lock); |
410 | spinlock_lock(&threads_lock); |
411 | 411 | ||
412 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
412 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
413 | btree_node_t *node; |
413 | btree_node_t *node; |
414 | int i; |
414 | int i; |
415 | 415 | ||
416 | node = list_get_instance(cur, btree_node_t, leaf_link); |
416 | node = list_get_instance(cur, btree_node_t, leaf_link); |
417 | for (i = 0; i < node->keys; i++) { |
417 | for (i = 0; i < node->keys; i++) { |
418 | thread_t *t; |
418 | thread_t *t; |
419 | 419 | ||
420 | t = (thread_t *) node->value[i]; |
420 | t = (thread_t *) node->value[i]; |
421 | printf("%s: address=%P, tid=%d, state=%s, task=%P, code=%P, stack=%P, cpu=", |
421 | printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=", |
422 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
422 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
423 | if (t->cpu) |
423 | if (t->cpu) |
424 | printf("cpu%d ", t->cpu->id); |
424 | printf("cpu%zd ", t->cpu->id); |
425 | else |
425 | else |
426 | printf("none"); |
426 | printf("none"); |
427 | printf("\n"); |
427 | printf("\n"); |
428 | } |
428 | } |
429 | } |
429 | } |
430 | 430 | ||
431 | spinlock_unlock(&threads_lock); |
431 | spinlock_unlock(&threads_lock); |
432 | interrupts_restore(ipl); |
432 | interrupts_restore(ipl); |
433 | } |
433 | } |
434 | 434 | ||
435 | /** Check whether thread exists. |
435 | /** Check whether thread exists. |
436 | * |
436 | * |
437 | * Note that threads_lock must be already held and |
437 | * Note that threads_lock must be already held and |
438 | * interrupts must be already disabled. |
438 | * interrupts must be already disabled. |
439 | * |
439 | * |
440 | * @param t Pointer to thread. |
440 | * @param t Pointer to thread. |
441 | * |
441 | * |
442 | * @return True if thread t is known to the system, false otherwise. |
442 | * @return True if thread t is known to the system, false otherwise. |
443 | */ |
443 | */ |
444 | bool thread_exists(thread_t *t) |
444 | bool thread_exists(thread_t *t) |
445 | { |
445 | { |
446 | btree_node_t *leaf; |
446 | btree_node_t *leaf; |
447 | 447 | ||
448 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
448 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
449 | } |
449 | } |
450 | 450 | ||
451 | /** Process syscall to create new thread. |
451 | /** Process syscall to create new thread. |
452 | * |
452 | * |
453 | */ |
453 | */ |
454 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
454 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
455 | { |
455 | { |
456 | thread_t *t; |
456 | thread_t *t; |
457 | char namebuf[THREAD_NAME_BUFLEN]; |
457 | char namebuf[THREAD_NAME_BUFLEN]; |
458 | uspace_arg_t *kernel_uarg; |
458 | uspace_arg_t *kernel_uarg; |
459 | __u32 tid; |
459 | __u32 tid; |
460 | 460 | ||
461 | copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
461 | copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
462 | 462 | ||
463 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
463 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
464 | copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
464 | copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
465 | 465 | ||
466 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
466 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
467 | tid = t->tid; |
467 | tid = t->tid; |
468 | thread_ready(t); |
468 | thread_ready(t); |
469 | return (__native) tid; |
469 | return (__native) tid; |
470 | } else { |
470 | } else { |
471 | free(kernel_uarg); |
471 | free(kernel_uarg); |
472 | } |
472 | } |
473 | 473 | ||
474 | return (__native) -1; |
474 | return (__native) -1; |
475 | } |
475 | } |
476 | 476 | ||
477 | /** Process syscall to terminate thread. |
477 | /** Process syscall to terminate thread. |
478 | * |
478 | * |
479 | */ |
479 | */ |
480 | __native sys_thread_exit(int uspace_status) |
480 | __native sys_thread_exit(int uspace_status) |
481 | { |
481 | { |
482 | thread_exit(); |
482 | thread_exit(); |
483 | /* Unreachable */ |
483 | /* Unreachable */ |
484 | return 0; |
484 | return 0; |
485 | } |
485 | } |
486 | 486 |