Rev 1839 | Rev 2030 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 1839 | Rev 1854 | ||
|---|---|---|---|
| Line 127... | Line 127... | ||
| 127 | 127 | ||
| 128 | spinlock_initialize(&t->lock, "thread_t_lock"); |
128 | spinlock_initialize(&t->lock, "thread_t_lock"); |
| 129 | link_initialize(&t->rq_link); |
129 | link_initialize(&t->rq_link); |
| 130 | link_initialize(&t->wq_link); |
130 | link_initialize(&t->wq_link); |
| 131 | link_initialize(&t->th_link); |
131 | link_initialize(&t->th_link); |
| - | 132 | ||
| - | 133 | /* call the architecture-specific part of the constructor */ |
|
| - | 134 | thr_constructor_arch(t); |
|
| 132 | 135 | ||
| 133 | #ifdef ARCH_HAS_FPU |
136 | #ifdef ARCH_HAS_FPU |
| 134 | # ifdef CONFIG_FPU_LAZY |
137 | # ifdef CONFIG_FPU_LAZY |
| 135 | t->saved_fpu_context = NULL; |
138 | t->saved_fpu_context = NULL; |
| 136 | # else |
139 | # else |
| Line 155... | Line 158... | ||
| 155 | /** Destruction of thread_t object */ |
158 | /** Destruction of thread_t object */ |
| 156 | static int thr_destructor(void *obj) |
159 | static int thr_destructor(void *obj) |
| 157 | { |
160 | { |
| 158 | thread_t *t = (thread_t *) obj; |
161 | thread_t *t = (thread_t *) obj; |
| 159 | 162 | ||
| - | 163 | /* call the architecture-specific part of the destructor */ |
|
| - | 164 | thr_destructor_arch(t); |
|
| - | 165 | ||
| 160 | frame_free(KA2PA(t->kstack)); |
166 | frame_free(KA2PA(t->kstack)); |
| 161 | #ifdef ARCH_HAS_FPU |
167 | #ifdef ARCH_HAS_FPU |
| 162 | if (t->saved_fpu_context) |
168 | if (t->saved_fpu_context) |
| 163 | slab_free(fpu_context_slab,t->saved_fpu_context); |
169 | slab_free(fpu_context_slab,t->saved_fpu_context); |
| 164 | #endif |
170 | #endif |
| Line 208... | Line 214... | ||
| 208 | ASSERT(! (t->state == Ready)); |
214 | ASSERT(! (t->state == Ready)); |
| 209 | 215 | ||
| 210 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
216 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
| 211 | 217 | ||
| 212 | cpu = CPU; |
218 | cpu = CPU; |
| 213 | if (t->flags & X_WIRED) { |
219 | if (t->flags & THREAD_FLAG_WIRED) { |
| 214 | cpu = t->cpu; |
220 | cpu = t->cpu; |
| 215 | } |
221 | } |
| 216 | t->state = Ready; |
222 | t->state = Ready; |
| 217 | spinlock_unlock(&t->lock); |
223 | spinlock_unlock(&t->lock); |
| 218 | 224 | ||
| Line 293... | Line 299... | ||
| 293 | ipl_t ipl; |
299 | ipl_t ipl; |
| 294 | 300 | ||
| 295 | t = (thread_t *) slab_alloc(thread_slab, 0); |
301 | t = (thread_t *) slab_alloc(thread_slab, 0); |
| 296 | if (!t) |
302 | if (!t) |
| 297 | return NULL; |
303 | return NULL; |
| 298 | - | ||
| 299 | thread_create_arch(t); |
- | |
| 300 | 304 | ||
| 301 | /* Not needed, but good for debugging */ |
305 | /* Not needed, but good for debugging */ |
| 302 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
306 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
| 303 | 307 | ||
| 304 | ipl = interrupts_disable(); |
308 | ipl = interrupts_disable(); |
| Line 321... | Line 325... | ||
| 321 | t->thread_code = func; |
325 | t->thread_code = func; |
| 322 | t->thread_arg = arg; |
326 | t->thread_arg = arg; |
| 323 | t->ticks = -1; |
327 | t->ticks = -1; |
| 324 | t->priority = -1; /* start in rq[0] */ |
328 | t->priority = -1; /* start in rq[0] */ |
| 325 | t->cpu = NULL; |
329 | t->cpu = NULL; |
| 326 | t->flags = 0; |
330 | t->flags = flags; |
| 327 | t->state = Entering; |
331 | t->state = Entering; |
| 328 | t->call_me = NULL; |
332 | t->call_me = NULL; |
| 329 | t->call_me_with = NULL; |
333 | t->call_me_with = NULL; |
| 330 | 334 | ||
| 331 | timeout_initialize(&t->sleep_timeout); |
335 | timeout_initialize(&t->sleep_timeout); |
| Line 345... | Line 349... | ||
| 345 | 349 | ||
| 346 | t->task = task; |
350 | t->task = task; |
| 347 | 351 | ||
| 348 | t->fpu_context_exists = 0; |
352 | t->fpu_context_exists = 0; |
| 349 | t->fpu_context_engaged = 0; |
353 | t->fpu_context_engaged = 0; |
| - | 354 | ||
| - | 355 | thread_create_arch(t); /* might depend on previous initialization */ |
|
| 350 | 356 | ||
| 351 | /* |
357 | /* |
| 352 | * Attach to the containing task. |
358 | * Attach to the containing task. |
| 353 | */ |
359 | */ |
| 354 | ipl = interrupts_disable(); |
360 | ipl = interrupts_disable(); |
| Line 587... | Line 593... | ||
| 587 | if (rc != 0) { |
593 | if (rc != 0) { |
| 588 | free(kernel_uarg); |
594 | free(kernel_uarg); |
| 589 | return (unative_t) rc; |
595 | return (unative_t) rc; |
| 590 | } |
596 | } |
| 591 | 597 | ||
| 592 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
598 | if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
| 593 | tid = t->tid; |
599 | tid = t->tid; |
| 594 | thread_ready(t); |
600 | thread_ready(t); |
| 595 | return (unative_t) tid; |
601 | return (unative_t) tid; |
| 596 | } else { |
602 | } else { |
| 597 | free(kernel_uarg); |
603 | free(kernel_uarg); |