Rev 2050 | Rev 2071 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2050 | Rev 2067 | ||
---|---|---|---|
Line 78... | Line 78... | ||
78 | "Entering", |
78 | "Entering", |
79 | "Exiting", |
79 | "Exiting", |
80 | "Undead" |
80 | "Undead" |
81 | }; |
81 | }; |
82 | 82 | ||
- | 83 | /** Lock protecting the threads_btree B+tree. |
|
- | 84 | * |
|
83 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
85 | * For locking rules, see declaration thereof. |
- | 86 | */ |
|
84 | SPINLOCK_INITIALIZE(threads_lock); |
87 | SPINLOCK_INITIALIZE(threads_lock); |
85 | 88 | ||
86 | /** B+tree of all threads. |
89 | /** B+tree of all threads. |
87 | * |
90 | * |
88 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
91 | * When a thread is found in the threads_btree B+tree, it is guaranteed to |
89 | * as the threads_lock is held. |
92 | * exist as long as the threads_lock is held. |
90 | */ |
93 | */ |
91 | btree_t threads_btree; |
94 | btree_t threads_btree; |
92 | 95 | ||
93 | SPINLOCK_INITIALIZE(tidlock); |
96 | SPINLOCK_INITIALIZE(tidlock); |
94 | uint32_t last_tid = 0; |
97 | uint32_t last_tid = 0; |
Line 96... | Line 99... | ||
96 | static slab_cache_t *thread_slab; |
99 | static slab_cache_t *thread_slab; |
97 | #ifdef ARCH_HAS_FPU |
100 | #ifdef ARCH_HAS_FPU |
98 | slab_cache_t *fpu_context_slab; |
101 | slab_cache_t *fpu_context_slab; |
99 | #endif |
102 | #endif |
100 | 103 | ||
101 | /** Thread wrapper |
104 | /** Thread wrapper. |
102 | * |
105 | * |
103 | * This wrapper is provided to ensure that every thread |
106 | * This wrapper is provided to ensure that every thread makes a call to |
104 | * makes a call to thread_exit() when its implementing |
107 | * thread_exit() when its implementing function returns. |
105 | * function returns. |
- | |
106 | * |
108 | * |
107 | * interrupts_disable() is assumed. |
109 | * interrupts_disable() is assumed. |
108 | * |
110 | * |
109 | */ |
111 | */ |
110 | static void cushion(void) |
112 | static void cushion(void) |
Line 199... | Line 201... | ||
199 | */ |
201 | */ |
200 | void thread_init(void) |
202 | void thread_init(void) |
201 | { |
203 | { |
202 | THREAD = NULL; |
204 | THREAD = NULL; |
203 | atomic_set(&nrdy,0); |
205 | atomic_set(&nrdy,0); |
204 | thread_slab = slab_cache_create("thread_slab", |
206 | thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, |
205 | sizeof(thread_t),0, |
- | |
206 | thr_constructor, thr_destructor, 0); |
207 | thr_constructor, thr_destructor, 0); |
- | 208 | ||
207 | #ifdef ARCH_HAS_FPU |
209 | #ifdef ARCH_HAS_FPU |
208 | fpu_context_slab = slab_cache_create("fpu_slab", |
210 | fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), |
209 | sizeof(fpu_context_t), |
- | |
210 | FPU_CONTEXT_ALIGN, |
- | |
211 | NULL, NULL, 0); |
211 | FPU_CONTEXT_ALIGN, NULL, NULL, 0); |
212 | #endif |
212 | #endif |
213 | 213 | ||
214 | btree_create(&threads_btree); |
214 | btree_create(&threads_btree); |
215 | } |
215 | } |
216 | 216 | ||
Line 232... | Line 232... | ||
232 | 232 | ||
233 | spinlock_lock(&t->lock); |
233 | spinlock_lock(&t->lock); |
234 | 234 | ||
235 | ASSERT(! (t->state == Ready)); |
235 | ASSERT(! (t->state == Ready)); |
236 | 236 | ||
237 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
237 | i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; |
238 | 238 | ||
239 | cpu = CPU; |
239 | cpu = CPU; |
240 | if (t->flags & THREAD_FLAG_WIRED) { |
240 | if (t->flags & THREAD_FLAG_WIRED) { |
241 | cpu = t->cpu; |
241 | cpu = t->cpu; |
242 | } |
242 | } |
Line 265... | Line 265... | ||
265 | * |
265 | * |
266 | * Assume thread->lock is held!! |
266 | * Assume thread->lock is held!! |
267 | */ |
267 | */ |
268 | void thread_destroy(thread_t *t) |
268 | void thread_destroy(thread_t *t) |
269 | { |
269 | { |
270 | bool destroy_task = false; |
270 | bool destroy_task = false; |
271 | 271 | ||
272 | ASSERT(t->state == Exiting || t->state == Undead); |
272 | ASSERT(t->state == Exiting || t->state == Undead); |
273 | ASSERT(t->task); |
273 | ASSERT(t->task); |
274 | ASSERT(t->cpu); |
274 | ASSERT(t->cpu); |
275 | 275 | ||
276 | spinlock_lock(&t->cpu->lock); |
276 | spinlock_lock(&t->cpu->lock); |
277 | if(t->cpu->fpu_owner==t) |
277 | if(t->cpu->fpu_owner == t) |
278 | t->cpu->fpu_owner=NULL; |
278 | t->cpu->fpu_owner = NULL; |
279 | spinlock_unlock(&t->cpu->lock); |
279 | spinlock_unlock(&t->cpu->lock); |
280 | 280 | ||
281 | spinlock_unlock(&t->lock); |
281 | spinlock_unlock(&t->lock); |
282 | 282 | ||
283 | spinlock_lock(&threads_lock); |
283 | spinlock_lock(&threads_lock); |
Line 308... | Line 308... | ||
308 | * @param func Thread's implementing function. |
308 | * @param func Thread's implementing function. |
309 | * @param arg Thread's implementing function argument. |
309 | * @param arg Thread's implementing function argument. |
310 | * @param task Task to which the thread belongs. |
310 | * @param task Task to which the thread belongs. |
311 | * @param flags Thread flags. |
311 | * @param flags Thread flags. |
312 | * @param name Symbolic name. |
312 | * @param name Symbolic name. |
313 | * @param uncounted Thread's accounting doesn't affect accumulated task accounting. |
313 | * @param uncounted Thread's accounting doesn't affect accumulated task |
- | 314 | * accounting. |
|
314 | * |
315 | * |
315 | * @return New thread's structure on success, NULL on failure. |
316 | * @return New thread's structure on success, NULL on failure. |
316 | * |
317 | * |
317 | */ |
318 | */ |
318 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted) |
319 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, |
- | 320 | int flags, char *name, bool uncounted) |
|
319 | { |
321 | { |
320 | thread_t *t; |
322 | thread_t *t; |
321 | ipl_t ipl; |
323 | ipl_t ipl; |
322 | 324 | ||
323 | t = (thread_t *) slab_alloc(thread_slab, 0); |
325 | t = (thread_t *) slab_alloc(thread_slab, 0); |
324 | if (!t) |
326 | if (!t) |
325 | return NULL; |
327 | return NULL; |
326 | 328 | ||
327 | /* Not needed, but good for debugging */ |
329 | /* Not needed, but good for debugging */ |
328 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
330 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, |
- | 331 | 0); |
|
329 | 332 | ||
330 | ipl = interrupts_disable(); |
333 | ipl = interrupts_disable(); |
331 | spinlock_lock(&tidlock); |
334 | spinlock_lock(&tidlock); |
332 | t->tid = ++last_tid; |
335 | t->tid = ++last_tid; |
333 | spinlock_unlock(&tidlock); |
336 | spinlock_unlock(&tidlock); |
334 | interrupts_restore(ipl); |
337 | interrupts_restore(ipl); |
335 | 338 | ||
336 | context_save(&t->saved_context); |
339 | context_save(&t->saved_context); |
337 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
340 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, |
- | 341 | THREAD_STACK_SIZE); |
|
338 | 342 | ||
339 | the_initialize((the_t *) t->kstack); |
343 | the_initialize((the_t *) t->kstack); |
340 | 344 | ||
341 | ipl = interrupts_disable(); |
345 | ipl = interrupts_disable(); |
342 | t->saved_context.ipl = interrupts_read(); |
346 | t->saved_context.ipl = interrupts_read(); |
Line 374... | Line 378... | ||
374 | t->task = task; |
378 | t->task = task; |
375 | 379 | ||
376 | t->fpu_context_exists = 0; |
380 | t->fpu_context_exists = 0; |
377 | t->fpu_context_engaged = 0; |
381 | t->fpu_context_engaged = 0; |
378 | 382 | ||
379 | thread_create_arch(t); /* might depend on previous initialization */ |
383 | /* might depend on previous initialization */ |
- | 384 | thread_create_arch(t); |
|
380 | 385 | ||
381 | /* |
386 | /* |
382 | * Attach to the containing task. |
387 | * Attach to the containing task. |
383 | */ |
388 | */ |
384 | ipl = interrupts_disable(); |
389 | ipl = interrupts_disable(); |
Line 396... | Line 401... | ||
396 | 401 | ||
397 | /* |
402 | /* |
398 | * Register this thread in the system-wide list. |
403 | * Register this thread in the system-wide list. |
399 | */ |
404 | */ |
400 | spinlock_lock(&threads_lock); |
405 | spinlock_lock(&threads_lock); |
401 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
406 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, |
- | 407 | NULL); |
|
402 | spinlock_unlock(&threads_lock); |
408 | spinlock_unlock(&threads_lock); |
403 | 409 | ||
404 | interrupts_restore(ipl); |
410 | interrupts_restore(ipl); |
405 | 411 | ||
406 | return t; |
412 | return t; |
407 | } |
413 | } |
408 | 414 | ||
409 | /** Terminate thread. |
415 | /** Terminate thread. |
410 | * |
416 | * |
411 | * End current thread execution and switch it to the exiting |
417 | * End current thread execution and switch it to the exiting state. All pending |
412 | * state. All pending timeouts are executed. |
418 | * timeouts are executed. |
413 | * |
- | |
414 | */ |
419 | */ |
415 | void thread_exit(void) |
420 | void thread_exit(void) |
416 | { |
421 | { |
417 | ipl_t ipl; |
422 | ipl_t ipl; |
418 | 423 | ||
419 | restart: |
424 | restart: |
420 | ipl = interrupts_disable(); |
425 | ipl = interrupts_disable(); |
421 | spinlock_lock(&THREAD->lock); |
426 | spinlock_lock(&THREAD->lock); |
- | 427 | if (THREAD->timeout_pending) { |
|
422 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
428 | /* busy waiting for timeouts in progress */ |
423 | spinlock_unlock(&THREAD->lock); |
429 | spinlock_unlock(&THREAD->lock); |
424 | interrupts_restore(ipl); |
430 | interrupts_restore(ipl); |
425 | goto restart; |
431 | goto restart; |
426 | } |
432 | } |
427 | THREAD->state = Exiting; |
433 | THREAD->state = Exiting; |
Line 441... | Line 447... | ||
441 | * @param sec Number of seconds to sleep. |
447 | * @param sec Number of seconds to sleep. |
442 | * |
448 | * |
443 | */ |
449 | */ |
444 | void thread_sleep(uint32_t sec) |
450 | void thread_sleep(uint32_t sec) |
445 | { |
451 | { |
446 | thread_usleep(sec*1000000); |
452 | thread_usleep(sec * 1000000); |
447 | } |
453 | } |
448 | 454 | ||
449 | /** Wait for another thread to exit. |
455 | /** Wait for another thread to exit. |
450 | * |
456 | * |
451 | * @param t Thread to join on exit. |
457 | * @param t Thread to join on exit. |