Rev 1766 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1766 | Rev 1780 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericproc |
29 | /** @addtogroup genericproc |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Thread management functions. |
35 | * @brief Thread management functions. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include <proc/scheduler.h> |
38 | #include <proc/scheduler.h> |
39 | #include <proc/thread.h> |
39 | #include <proc/thread.h> |
40 | #include <proc/task.h> |
40 | #include <proc/task.h> |
41 | #include <proc/uarg.h> |
41 | #include <proc/uarg.h> |
42 | #include <mm/frame.h> |
42 | #include <mm/frame.h> |
43 | #include <mm/page.h> |
43 | #include <mm/page.h> |
44 | #include <arch/asm.h> |
44 | #include <arch/asm.h> |
45 | #include <arch.h> |
45 | #include <arch.h> |
46 | #include <synch/synch.h> |
46 | #include <synch/synch.h> |
47 | #include <synch/spinlock.h> |
47 | #include <synch/spinlock.h> |
48 | #include <synch/waitq.h> |
48 | #include <synch/waitq.h> |
49 | #include <synch/rwlock.h> |
49 | #include <synch/rwlock.h> |
50 | #include <cpu.h> |
50 | #include <cpu.h> |
51 | #include <func.h> |
51 | #include <func.h> |
52 | #include <context.h> |
52 | #include <context.h> |
53 | #include <adt/btree.h> |
53 | #include <adt/btree.h> |
54 | #include <adt/list.h> |
54 | #include <adt/list.h> |
55 | #include <typedefs.h> |
55 | #include <typedefs.h> |
56 | #include <time/clock.h> |
56 | #include <time/clock.h> |
57 | #include <config.h> |
57 | #include <config.h> |
58 | #include <arch/interrupt.h> |
58 | #include <arch/interrupt.h> |
59 | #include <smp/ipi.h> |
59 | #include <smp/ipi.h> |
60 | #include <arch/faddr.h> |
60 | #include <arch/faddr.h> |
61 | #include <atomic.h> |
61 | #include <atomic.h> |
62 | #include <memstr.h> |
62 | #include <memstr.h> |
63 | #include <print.h> |
63 | #include <print.h> |
64 | #include <mm/slab.h> |
64 | #include <mm/slab.h> |
65 | #include <debug.h> |
65 | #include <debug.h> |
66 | #include <main/uinit.h> |
66 | #include <main/uinit.h> |
67 | #include <syscall/copy.h> |
67 | #include <syscall/copy.h> |
68 | #include <errno.h> |
68 | #include <errno.h> |
69 | 69 | ||
70 | 70 | ||
71 | /** Thread states */ |
71 | /** Thread states */ |
72 | char *thread_states[] = { |
72 | char *thread_states[] = { |
73 | "Invalid", |
73 | "Invalid", |
74 | "Running", |
74 | "Running", |
75 | "Sleeping", |
75 | "Sleeping", |
76 | "Ready", |
76 | "Ready", |
77 | "Entering", |
77 | "Entering", |
78 | "Exiting", |
78 | "Exiting", |
79 | "Undead" |
79 | "Undead" |
80 | }; |
80 | }; |
81 | 81 | ||
82 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
82 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
83 | SPINLOCK_INITIALIZE(threads_lock); |
83 | SPINLOCK_INITIALIZE(threads_lock); |
84 | 84 | ||
85 | /** B+tree of all threads. |
85 | /** B+tree of all threads. |
86 | * |
86 | * |
87 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
87 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
88 | * as the threads_lock is held. |
88 | * as the threads_lock is held. |
89 | */ |
89 | */ |
90 | btree_t threads_btree; |
90 | btree_t threads_btree; |
91 | 91 | ||
92 | SPINLOCK_INITIALIZE(tidlock); |
92 | SPINLOCK_INITIALIZE(tidlock); |
93 | __u32 last_tid = 0; |
93 | uint32_t last_tid = 0; |
94 | 94 | ||
95 | static slab_cache_t *thread_slab; |
95 | static slab_cache_t *thread_slab; |
96 | #ifdef ARCH_HAS_FPU |
96 | #ifdef ARCH_HAS_FPU |
97 | slab_cache_t *fpu_context_slab; |
97 | slab_cache_t *fpu_context_slab; |
98 | #endif |
98 | #endif |
99 | 99 | ||
100 | /** Thread wrapper |
100 | /** Thread wrapper |
101 | * |
101 | * |
102 | * This wrapper is provided to ensure that every thread |
102 | * This wrapper is provided to ensure that every thread |
103 | * makes a call to thread_exit() when its implementing |
103 | * makes a call to thread_exit() when its implementing |
104 | * function returns. |
104 | * function returns. |
105 | * |
105 | * |
106 | * interrupts_disable() is assumed. |
106 | * interrupts_disable() is assumed. |
107 | * |
107 | * |
108 | */ |
108 | */ |
109 | static void cushion(void) |
109 | static void cushion(void) |
110 | { |
110 | { |
111 | void (*f)(void *) = THREAD->thread_code; |
111 | void (*f)(void *) = THREAD->thread_code; |
112 | void *arg = THREAD->thread_arg; |
112 | void *arg = THREAD->thread_arg; |
113 | 113 | ||
114 | /* this is where each thread wakes up after its creation */ |
114 | /* this is where each thread wakes up after its creation */ |
115 | spinlock_unlock(&THREAD->lock); |
115 | spinlock_unlock(&THREAD->lock); |
116 | interrupts_enable(); |
116 | interrupts_enable(); |
117 | 117 | ||
118 | f(arg); |
118 | f(arg); |
119 | thread_exit(); |
119 | thread_exit(); |
120 | /* not reached */ |
120 | /* not reached */ |
121 | } |
121 | } |
122 | 122 | ||
123 | /** Initialization and allocation for thread_t structure */ |
123 | /** Initialization and allocation for thread_t structure */ |
124 | static int thr_constructor(void *obj, int kmflags) |
124 | static int thr_constructor(void *obj, int kmflags) |
125 | { |
125 | { |
126 | thread_t *t = (thread_t *)obj; |
126 | thread_t *t = (thread_t *)obj; |
127 | 127 | ||
128 | spinlock_initialize(&t->lock, "thread_t_lock"); |
128 | spinlock_initialize(&t->lock, "thread_t_lock"); |
129 | link_initialize(&t->rq_link); |
129 | link_initialize(&t->rq_link); |
130 | link_initialize(&t->wq_link); |
130 | link_initialize(&t->wq_link); |
131 | link_initialize(&t->th_link); |
131 | link_initialize(&t->th_link); |
132 | 132 | ||
133 | #ifdef ARCH_HAS_FPU |
133 | #ifdef ARCH_HAS_FPU |
134 | # ifdef CONFIG_FPU_LAZY |
134 | # ifdef CONFIG_FPU_LAZY |
135 | t->saved_fpu_context = NULL; |
135 | t->saved_fpu_context = NULL; |
136 | # else |
136 | # else |
137 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
137 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
138 | if (!t->saved_fpu_context) |
138 | if (!t->saved_fpu_context) |
139 | return -1; |
139 | return -1; |
140 | # endif |
140 | # endif |
141 | #endif |
141 | #endif |
142 | 142 | ||
143 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
143 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
144 | if (! t->kstack) { |
144 | if (! t->kstack) { |
145 | #ifdef ARCH_HAS_FPU |
145 | #ifdef ARCH_HAS_FPU |
146 | if (t->saved_fpu_context) |
146 | if (t->saved_fpu_context) |
147 | slab_free(fpu_context_slab,t->saved_fpu_context); |
147 | slab_free(fpu_context_slab,t->saved_fpu_context); |
148 | #endif |
148 | #endif |
149 | return -1; |
149 | return -1; |
150 | } |
150 | } |
151 | 151 | ||
152 | return 0; |
152 | return 0; |
153 | } |
153 | } |
154 | 154 | ||
155 | /** Destruction of thread_t object */ |
155 | /** Destruction of thread_t object */ |
156 | static int thr_destructor(void *obj) |
156 | static int thr_destructor(void *obj) |
157 | { |
157 | { |
158 | thread_t *t = (thread_t *)obj; |
158 | thread_t *t = (thread_t *)obj; |
159 | 159 | ||
160 | frame_free(KA2PA(t->kstack)); |
160 | frame_free(KA2PA(t->kstack)); |
161 | #ifdef ARCH_HAS_FPU |
161 | #ifdef ARCH_HAS_FPU |
162 | if (t->saved_fpu_context) |
162 | if (t->saved_fpu_context) |
163 | slab_free(fpu_context_slab,t->saved_fpu_context); |
163 | slab_free(fpu_context_slab,t->saved_fpu_context); |
164 | #endif |
164 | #endif |
165 | return 1; /* One page freed */ |
165 | return 1; /* One page freed */ |
166 | } |
166 | } |
167 | 167 | ||
168 | /** Initialize threads |
168 | /** Initialize threads |
169 | * |
169 | * |
170 | * Initialize kernel threads support. |
170 | * Initialize kernel threads support. |
171 | * |
171 | * |
172 | */ |
172 | */ |
173 | void thread_init(void) |
173 | void thread_init(void) |
174 | { |
174 | { |
175 | THREAD = NULL; |
175 | THREAD = NULL; |
176 | atomic_set(&nrdy,0); |
176 | atomic_set(&nrdy,0); |
177 | thread_slab = slab_cache_create("thread_slab", |
177 | thread_slab = slab_cache_create("thread_slab", |
178 | sizeof(thread_t),0, |
178 | sizeof(thread_t),0, |
179 | thr_constructor, thr_destructor, 0); |
179 | thr_constructor, thr_destructor, 0); |
180 | #ifdef ARCH_HAS_FPU |
180 | #ifdef ARCH_HAS_FPU |
181 | fpu_context_slab = slab_cache_create("fpu_slab", |
181 | fpu_context_slab = slab_cache_create("fpu_slab", |
182 | sizeof(fpu_context_t), |
182 | sizeof(fpu_context_t), |
183 | FPU_CONTEXT_ALIGN, |
183 | FPU_CONTEXT_ALIGN, |
184 | NULL, NULL, 0); |
184 | NULL, NULL, 0); |
185 | #endif |
185 | #endif |
186 | 186 | ||
187 | btree_create(&threads_btree); |
187 | btree_create(&threads_btree); |
188 | } |
188 | } |
189 | 189 | ||
190 | /** Make thread ready |
190 | /** Make thread ready |
191 | * |
191 | * |
192 | * Switch thread t to the ready state. |
192 | * Switch thread t to the ready state. |
193 | * |
193 | * |
194 | * @param t Thread to make ready. |
194 | * @param t Thread to make ready. |
195 | * |
195 | * |
196 | */ |
196 | */ |
197 | void thread_ready(thread_t *t) |
197 | void thread_ready(thread_t *t) |
198 | { |
198 | { |
199 | cpu_t *cpu; |
199 | cpu_t *cpu; |
200 | runq_t *r; |
200 | runq_t *r; |
201 | ipl_t ipl; |
201 | ipl_t ipl; |
202 | int i, avg; |
202 | int i, avg; |
203 | 203 | ||
204 | ipl = interrupts_disable(); |
204 | ipl = interrupts_disable(); |
205 | 205 | ||
206 | spinlock_lock(&t->lock); |
206 | spinlock_lock(&t->lock); |
207 | 207 | ||
208 | ASSERT(! (t->state == Ready)); |
208 | ASSERT(! (t->state == Ready)); |
209 | 209 | ||
210 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
210 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
211 | 211 | ||
212 | cpu = CPU; |
212 | cpu = CPU; |
213 | if (t->flags & X_WIRED) { |
213 | if (t->flags & X_WIRED) { |
214 | cpu = t->cpu; |
214 | cpu = t->cpu; |
215 | } |
215 | } |
216 | t->state = Ready; |
216 | t->state = Ready; |
217 | spinlock_unlock(&t->lock); |
217 | spinlock_unlock(&t->lock); |
218 | 218 | ||
219 | /* |
219 | /* |
220 | * Append t to respective ready queue on respective processor. |
220 | * Append t to respective ready queue on respective processor. |
221 | */ |
221 | */ |
222 | r = &cpu->rq[i]; |
222 | r = &cpu->rq[i]; |
223 | spinlock_lock(&r->lock); |
223 | spinlock_lock(&r->lock); |
224 | list_append(&t->rq_link, &r->rq_head); |
224 | list_append(&t->rq_link, &r->rq_head); |
225 | r->n++; |
225 | r->n++; |
226 | spinlock_unlock(&r->lock); |
226 | spinlock_unlock(&r->lock); |
227 | 227 | ||
228 | atomic_inc(&nrdy); |
228 | atomic_inc(&nrdy); |
229 | avg = atomic_get(&nrdy) / config.cpu_active; |
229 | avg = atomic_get(&nrdy) / config.cpu_active; |
230 | atomic_inc(&cpu->nrdy); |
230 | atomic_inc(&cpu->nrdy); |
231 | 231 | ||
232 | interrupts_restore(ipl); |
232 | interrupts_restore(ipl); |
233 | } |
233 | } |
234 | 234 | ||
235 | /** Destroy thread memory structure |
235 | /** Destroy thread memory structure |
236 | * |
236 | * |
237 | * Detach thread from all queues, cpus etc. and destroy it. |
237 | * Detach thread from all queues, cpus etc. and destroy it. |
238 | * |
238 | * |
239 | * Assume thread->lock is held!! |
239 | * Assume thread->lock is held!! |
240 | */ |
240 | */ |
241 | void thread_destroy(thread_t *t) |
241 | void thread_destroy(thread_t *t) |
242 | { |
242 | { |
243 | bool destroy_task = false; |
243 | bool destroy_task = false; |
244 | 244 | ||
245 | ASSERT(t->state == Exiting || t->state == Undead); |
245 | ASSERT(t->state == Exiting || t->state == Undead); |
246 | ASSERT(t->task); |
246 | ASSERT(t->task); |
247 | ASSERT(t->cpu); |
247 | ASSERT(t->cpu); |
248 | 248 | ||
249 | spinlock_lock(&t->cpu->lock); |
249 | spinlock_lock(&t->cpu->lock); |
250 | if(t->cpu->fpu_owner==t) |
250 | if(t->cpu->fpu_owner==t) |
251 | t->cpu->fpu_owner=NULL; |
251 | t->cpu->fpu_owner=NULL; |
252 | spinlock_unlock(&t->cpu->lock); |
252 | spinlock_unlock(&t->cpu->lock); |
253 | 253 | ||
254 | spinlock_unlock(&t->lock); |
254 | spinlock_unlock(&t->lock); |
255 | 255 | ||
256 | spinlock_lock(&threads_lock); |
256 | spinlock_lock(&threads_lock); |
257 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
257 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
258 | spinlock_unlock(&threads_lock); |
258 | spinlock_unlock(&threads_lock); |
259 | 259 | ||
260 | /* |
260 | /* |
261 | * Detach from the containing task. |
261 | * Detach from the containing task. |
262 | */ |
262 | */ |
263 | spinlock_lock(&t->task->lock); |
263 | spinlock_lock(&t->task->lock); |
264 | list_remove(&t->th_link); |
264 | list_remove(&t->th_link); |
265 | if (--t->task->refcount == 0) { |
265 | if (--t->task->refcount == 0) { |
266 | t->task->accept_new_threads = false; |
266 | t->task->accept_new_threads = false; |
267 | destroy_task = true; |
267 | destroy_task = true; |
268 | } |
268 | } |
269 | spinlock_unlock(&t->task->lock); |
269 | spinlock_unlock(&t->task->lock); |
270 | 270 | ||
271 | if (destroy_task) |
271 | if (destroy_task) |
272 | task_destroy(t->task); |
272 | task_destroy(t->task); |
273 | 273 | ||
274 | slab_free(thread_slab, t); |
274 | slab_free(thread_slab, t); |
275 | } |
275 | } |
276 | 276 | ||
277 | /** Create new thread |
277 | /** Create new thread |
278 | * |
278 | * |
279 | * Create a new thread. |
279 | * Create a new thread. |
280 | * |
280 | * |
281 | * @param func Thread's implementing function. |
281 | * @param func Thread's implementing function. |
282 | * @param arg Thread's implementing function argument. |
282 | * @param arg Thread's implementing function argument. |
283 | * @param task Task to which the thread belongs. |
283 | * @param task Task to which the thread belongs. |
284 | * @param flags Thread flags. |
284 | * @param flags Thread flags. |
285 | * @param name Symbolic name. |
285 | * @param name Symbolic name. |
286 | * |
286 | * |
287 | * @return New thread's structure on success, NULL on failure. |
287 | * @return New thread's structure on success, NULL on failure. |
288 | * |
288 | * |
289 | */ |
289 | */ |
290 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
290 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
291 | { |
291 | { |
292 | thread_t *t; |
292 | thread_t *t; |
293 | ipl_t ipl; |
293 | ipl_t ipl; |
294 | 294 | ||
295 | t = (thread_t *) slab_alloc(thread_slab, 0); |
295 | t = (thread_t *) slab_alloc(thread_slab, 0); |
296 | if (!t) |
296 | if (!t) |
297 | return NULL; |
297 | return NULL; |
298 | 298 | ||
299 | thread_create_arch(t); |
299 | thread_create_arch(t); |
300 | 300 | ||
301 | /* Not needed, but good for debugging */ |
301 | /* Not needed, but good for debugging */ |
302 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
302 | memsetb((uintptr_t)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
303 | 303 | ||
304 | ipl = interrupts_disable(); |
304 | ipl = interrupts_disable(); |
305 | spinlock_lock(&tidlock); |
305 | spinlock_lock(&tidlock); |
306 | t->tid = ++last_tid; |
306 | t->tid = ++last_tid; |
307 | spinlock_unlock(&tidlock); |
307 | spinlock_unlock(&tidlock); |
308 | interrupts_restore(ipl); |
308 | interrupts_restore(ipl); |
309 | 309 | ||
310 | context_save(&t->saved_context); |
310 | context_save(&t->saved_context); |
311 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
311 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
312 | 312 | ||
313 | the_initialize((the_t *) t->kstack); |
313 | the_initialize((the_t *) t->kstack); |
314 | 314 | ||
315 | ipl = interrupts_disable(); |
315 | ipl = interrupts_disable(); |
316 | t->saved_context.ipl = interrupts_read(); |
316 | t->saved_context.ipl = interrupts_read(); |
317 | interrupts_restore(ipl); |
317 | interrupts_restore(ipl); |
318 | 318 | ||
319 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
319 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
320 | 320 | ||
321 | t->thread_code = func; |
321 | t->thread_code = func; |
322 | t->thread_arg = arg; |
322 | t->thread_arg = arg; |
323 | t->ticks = -1; |
323 | t->ticks = -1; |
324 | t->priority = -1; /* start in rq[0] */ |
324 | t->priority = -1; /* start in rq[0] */ |
325 | t->cpu = NULL; |
325 | t->cpu = NULL; |
326 | t->flags = 0; |
326 | t->flags = 0; |
327 | t->state = Entering; |
327 | t->state = Entering; |
328 | t->call_me = NULL; |
328 | t->call_me = NULL; |
329 | t->call_me_with = NULL; |
329 | t->call_me_with = NULL; |
330 | 330 | ||
331 | timeout_initialize(&t->sleep_timeout); |
331 | timeout_initialize(&t->sleep_timeout); |
332 | t->sleep_interruptible = false; |
332 | t->sleep_interruptible = false; |
333 | t->sleep_queue = NULL; |
333 | t->sleep_queue = NULL; |
334 | t->timeout_pending = 0; |
334 | t->timeout_pending = 0; |
335 | 335 | ||
336 | t->in_copy_from_uspace = false; |
336 | t->in_copy_from_uspace = false; |
337 | t->in_copy_to_uspace = false; |
337 | t->in_copy_to_uspace = false; |
338 | 338 | ||
339 | t->interrupted = false; |
339 | t->interrupted = false; |
340 | t->join_type = None; |
340 | t->join_type = None; |
341 | t->detached = false; |
341 | t->detached = false; |
342 | waitq_initialize(&t->join_wq); |
342 | waitq_initialize(&t->join_wq); |
343 | 343 | ||
344 | t->rwlock_holder_type = RWLOCK_NONE; |
344 | t->rwlock_holder_type = RWLOCK_NONE; |
345 | 345 | ||
346 | t->task = task; |
346 | t->task = task; |
347 | 347 | ||
348 | t->fpu_context_exists = 0; |
348 | t->fpu_context_exists = 0; |
349 | t->fpu_context_engaged = 0; |
349 | t->fpu_context_engaged = 0; |
350 | 350 | ||
351 | /* |
351 | /* |
352 | * Attach to the containing task. |
352 | * Attach to the containing task. |
353 | */ |
353 | */ |
354 | ipl = interrupts_disable(); |
354 | ipl = interrupts_disable(); |
355 | spinlock_lock(&task->lock); |
355 | spinlock_lock(&task->lock); |
356 | if (!task->accept_new_threads) { |
356 | if (!task->accept_new_threads) { |
357 | spinlock_unlock(&task->lock); |
357 | spinlock_unlock(&task->lock); |
358 | slab_free(thread_slab, t); |
358 | slab_free(thread_slab, t); |
359 | interrupts_restore(ipl); |
359 | interrupts_restore(ipl); |
360 | return NULL; |
360 | return NULL; |
361 | } |
361 | } |
362 | list_append(&t->th_link, &task->th_head); |
362 | list_append(&t->th_link, &task->th_head); |
363 | if (task->refcount++ == 0) |
363 | if (task->refcount++ == 0) |
364 | task->main_thread = t; |
364 | task->main_thread = t; |
365 | spinlock_unlock(&task->lock); |
365 | spinlock_unlock(&task->lock); |
366 | 366 | ||
367 | /* |
367 | /* |
368 | * Register this thread in the system-wide list. |
368 | * Register this thread in the system-wide list. |
369 | */ |
369 | */ |
370 | spinlock_lock(&threads_lock); |
370 | spinlock_lock(&threads_lock); |
371 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
371 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
372 | spinlock_unlock(&threads_lock); |
372 | spinlock_unlock(&threads_lock); |
373 | 373 | ||
374 | interrupts_restore(ipl); |
374 | interrupts_restore(ipl); |
375 | 375 | ||
376 | return t; |
376 | return t; |
377 | } |
377 | } |
378 | 378 | ||
379 | /** Terminate thread. |
379 | /** Terminate thread. |
380 | * |
380 | * |
381 | * End current thread execution and switch it to the exiting |
381 | * End current thread execution and switch it to the exiting |
382 | * state. All pending timeouts are executed. |
382 | * state. All pending timeouts are executed. |
383 | * |
383 | * |
384 | */ |
384 | */ |
385 | void thread_exit(void) |
385 | void thread_exit(void) |
386 | { |
386 | { |
387 | ipl_t ipl; |
387 | ipl_t ipl; |
388 | 388 | ||
389 | restart: |
389 | restart: |
390 | ipl = interrupts_disable(); |
390 | ipl = interrupts_disable(); |
391 | spinlock_lock(&THREAD->lock); |
391 | spinlock_lock(&THREAD->lock); |
392 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
392 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
393 | spinlock_unlock(&THREAD->lock); |
393 | spinlock_unlock(&THREAD->lock); |
394 | interrupts_restore(ipl); |
394 | interrupts_restore(ipl); |
395 | goto restart; |
395 | goto restart; |
396 | } |
396 | } |
397 | THREAD->state = Exiting; |
397 | THREAD->state = Exiting; |
398 | spinlock_unlock(&THREAD->lock); |
398 | spinlock_unlock(&THREAD->lock); |
399 | scheduler(); |
399 | scheduler(); |
400 | 400 | ||
401 | /* Not reached */ |
401 | /* Not reached */ |
402 | while (1) |
402 | while (1) |
403 | ; |
403 | ; |
404 | } |
404 | } |
405 | 405 | ||
406 | 406 | ||
407 | /** Thread sleep |
407 | /** Thread sleep |
408 | * |
408 | * |
409 | * Suspend execution of the current thread. |
409 | * Suspend execution of the current thread. |
410 | * |
410 | * |
411 | * @param sec Number of seconds to sleep. |
411 | * @param sec Number of seconds to sleep. |
412 | * |
412 | * |
413 | */ |
413 | */ |
414 | void thread_sleep(__u32 sec) |
414 | void thread_sleep(uint32_t sec) |
415 | { |
415 | { |
416 | thread_usleep(sec*1000000); |
416 | thread_usleep(sec*1000000); |
417 | } |
417 | } |
418 | 418 | ||
419 | /** Wait for another thread to exit. |
419 | /** Wait for another thread to exit. |
420 | * |
420 | * |
421 | * @param t Thread to join on exit. |
421 | * @param t Thread to join on exit. |
422 | * @param usec Timeout in microseconds. |
422 | * @param usec Timeout in microseconds. |
423 | * @param flags Mode of operation. |
423 | * @param flags Mode of operation. |
424 | * |
424 | * |
425 | * @return An error code from errno.h or an error code from synch.h. |
425 | * @return An error code from errno.h or an error code from synch.h. |
426 | */ |
426 | */ |
427 | int thread_join_timeout(thread_t *t, __u32 usec, int flags) |
427 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
428 | { |
428 | { |
429 | ipl_t ipl; |
429 | ipl_t ipl; |
430 | int rc; |
430 | int rc; |
431 | 431 | ||
432 | if (t == THREAD) |
432 | if (t == THREAD) |
433 | return EINVAL; |
433 | return EINVAL; |
434 | 434 | ||
435 | /* |
435 | /* |
436 | * Since thread join can only be called once on an undetached thread, |
436 | * Since thread join can only be called once on an undetached thread, |
437 | * the thread pointer is guaranteed to be still valid. |
437 | * the thread pointer is guaranteed to be still valid. |
438 | */ |
438 | */ |
439 | 439 | ||
440 | ipl = interrupts_disable(); |
440 | ipl = interrupts_disable(); |
441 | spinlock_lock(&t->lock); |
441 | spinlock_lock(&t->lock); |
442 | ASSERT(!t->detached); |
442 | ASSERT(!t->detached); |
443 | spinlock_unlock(&t->lock); |
443 | spinlock_unlock(&t->lock); |
444 | interrupts_restore(ipl); |
444 | interrupts_restore(ipl); |
445 | 445 | ||
446 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
446 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
447 | 447 | ||
448 | return rc; |
448 | return rc; |
449 | } |
449 | } |
450 | 450 | ||
451 | /** Detach thread. |
451 | /** Detach thread. |
452 | * |
452 | * |
453 | * Mark the thread as detached, if the thread is already in the Undead state, |
453 | * Mark the thread as detached, if the thread is already in the Undead state, |
454 | * deallocate its resources. |
454 | * deallocate its resources. |
455 | * |
455 | * |
456 | * @param t Thread to be detached. |
456 | * @param t Thread to be detached. |
457 | */ |
457 | */ |
458 | void thread_detach(thread_t *t) |
458 | void thread_detach(thread_t *t) |
459 | { |
459 | { |
460 | ipl_t ipl; |
460 | ipl_t ipl; |
461 | 461 | ||
462 | /* |
462 | /* |
463 | * Since the thread is expected to not be already detached, |
463 | * Since the thread is expected to not be already detached, |
464 | * pointer to it must be still valid. |
464 | * pointer to it must be still valid. |
465 | */ |
465 | */ |
466 | ipl = interrupts_disable(); |
466 | ipl = interrupts_disable(); |
467 | spinlock_lock(&t->lock); |
467 | spinlock_lock(&t->lock); |
468 | ASSERT(!t->detached); |
468 | ASSERT(!t->detached); |
469 | if (t->state == Undead) { |
469 | if (t->state == Undead) { |
470 | thread_destroy(t); /* unlocks &t->lock */ |
470 | thread_destroy(t); /* unlocks &t->lock */ |
471 | interrupts_restore(ipl); |
471 | interrupts_restore(ipl); |
472 | return; |
472 | return; |
473 | } else { |
473 | } else { |
474 | t->detached = true; |
474 | t->detached = true; |
475 | } |
475 | } |
476 | spinlock_unlock(&t->lock); |
476 | spinlock_unlock(&t->lock); |
477 | interrupts_restore(ipl); |
477 | interrupts_restore(ipl); |
478 | } |
478 | } |
479 | 479 | ||
480 | /** Thread usleep |
480 | /** Thread usleep |
481 | * |
481 | * |
482 | * Suspend execution of the current thread. |
482 | * Suspend execution of the current thread. |
483 | * |
483 | * |
484 | * @param usec Number of microseconds to sleep. |
484 | * @param usec Number of microseconds to sleep. |
485 | * |
485 | * |
486 | */ |
486 | */ |
487 | void thread_usleep(__u32 usec) |
487 | void thread_usleep(uint32_t usec) |
488 | { |
488 | { |
489 | waitq_t wq; |
489 | waitq_t wq; |
490 | 490 | ||
491 | waitq_initialize(&wq); |
491 | waitq_initialize(&wq); |
492 | 492 | ||
493 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
493 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
494 | } |
494 | } |
495 | 495 | ||
496 | /** Register thread out-of-context invocation |
496 | /** Register thread out-of-context invocation |
497 | * |
497 | * |
498 | * Register a function and its argument to be executed |
498 | * Register a function and its argument to be executed |
499 | * on next context switch to the current thread. |
499 | * on next context switch to the current thread. |
500 | * |
500 | * |
501 | * @param call_me Out-of-context function. |
501 | * @param call_me Out-of-context function. |
502 | * @param call_me_with Out-of-context function argument. |
502 | * @param call_me_with Out-of-context function argument. |
503 | * |
503 | * |
504 | */ |
504 | */ |
505 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
505 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
506 | { |
506 | { |
507 | ipl_t ipl; |
507 | ipl_t ipl; |
508 | 508 | ||
509 | ipl = interrupts_disable(); |
509 | ipl = interrupts_disable(); |
510 | spinlock_lock(&THREAD->lock); |
510 | spinlock_lock(&THREAD->lock); |
511 | THREAD->call_me = call_me; |
511 | THREAD->call_me = call_me; |
512 | THREAD->call_me_with = call_me_with; |
512 | THREAD->call_me_with = call_me_with; |
513 | spinlock_unlock(&THREAD->lock); |
513 | spinlock_unlock(&THREAD->lock); |
514 | interrupts_restore(ipl); |
514 | interrupts_restore(ipl); |
515 | } |
515 | } |
516 | 516 | ||
517 | /** Print list of threads debug info */ |
517 | /** Print list of threads debug info */ |
518 | void thread_print_list(void) |
518 | void thread_print_list(void) |
519 | { |
519 | { |
520 | link_t *cur; |
520 | link_t *cur; |
521 | ipl_t ipl; |
521 | ipl_t ipl; |
522 | 522 | ||
523 | /* Messing with thread structures, avoid deadlock */ |
523 | /* Messing with thread structures, avoid deadlock */ |
524 | ipl = interrupts_disable(); |
524 | ipl = interrupts_disable(); |
525 | spinlock_lock(&threads_lock); |
525 | spinlock_lock(&threads_lock); |
526 | 526 | ||
527 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
527 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
528 | btree_node_t *node; |
528 | btree_node_t *node; |
529 | int i; |
529 | int i; |
530 | 530 | ||
531 | node = list_get_instance(cur, btree_node_t, leaf_link); |
531 | node = list_get_instance(cur, btree_node_t, leaf_link); |
532 | for (i = 0; i < node->keys; i++) { |
532 | for (i = 0; i < node->keys; i++) { |
533 | thread_t *t; |
533 | thread_t *t; |
534 | 534 | ||
535 | t = (thread_t *) node->value[i]; |
535 | t = (thread_t *) node->value[i]; |
536 | printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=", |
536 | printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=", |
537 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
537 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
538 | if (t->cpu) |
538 | if (t->cpu) |
539 | printf("cpu%zd", t->cpu->id); |
539 | printf("cpu%zd", t->cpu->id); |
540 | else |
540 | else |
541 | printf("none"); |
541 | printf("none"); |
542 | if (t->state == Sleeping) { |
542 | if (t->state == Sleeping) { |
543 | printf(", kst=%#zx", t->kstack); |
543 | printf(", kst=%#zx", t->kstack); |
544 | printf(", wq=%#zx", t->sleep_queue); |
544 | printf(", wq=%#zx", t->sleep_queue); |
545 | } |
545 | } |
546 | printf("\n"); |
546 | printf("\n"); |
547 | } |
547 | } |
548 | } |
548 | } |
549 | 549 | ||
550 | spinlock_unlock(&threads_lock); |
550 | spinlock_unlock(&threads_lock); |
551 | interrupts_restore(ipl); |
551 | interrupts_restore(ipl); |
552 | } |
552 | } |
553 | 553 | ||
554 | /** Check whether thread exists. |
554 | /** Check whether thread exists. |
555 | * |
555 | * |
556 | * Note that threads_lock must be already held and |
556 | * Note that threads_lock must be already held and |
557 | * interrupts must be already disabled. |
557 | * interrupts must be already disabled. |
558 | * |
558 | * |
559 | * @param t Pointer to thread. |
559 | * @param t Pointer to thread. |
560 | * |
560 | * |
561 | * @return True if thread t is known to the system, false otherwise. |
561 | * @return True if thread t is known to the system, false otherwise. |
562 | */ |
562 | */ |
563 | bool thread_exists(thread_t *t) |
563 | bool thread_exists(thread_t *t) |
564 | { |
564 | { |
565 | btree_node_t *leaf; |
565 | btree_node_t *leaf; |
566 | 566 | ||
567 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
567 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
568 | } |
568 | } |
569 | 569 | ||
570 | /** Process syscall to create new thread. |
570 | /** Process syscall to create new thread. |
571 | * |
571 | * |
572 | */ |
572 | */ |
573 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
573 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
574 | { |
574 | { |
575 | thread_t *t; |
575 | thread_t *t; |
576 | char namebuf[THREAD_NAME_BUFLEN]; |
576 | char namebuf[THREAD_NAME_BUFLEN]; |
577 | uspace_arg_t *kernel_uarg; |
577 | uspace_arg_t *kernel_uarg; |
578 | __u32 tid; |
578 | uint32_t tid; |
579 | int rc; |
579 | int rc; |
580 | 580 | ||
581 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
581 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
582 | if (rc != 0) |
582 | if (rc != 0) |
583 | return (__native) rc; |
583 | return (unative_t) rc; |
584 | 584 | ||
585 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
585 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
586 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
586 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
587 | if (rc != 0) { |
587 | if (rc != 0) { |
588 | free(kernel_uarg); |
588 | free(kernel_uarg); |
589 | return (__native) rc; |
589 | return (unative_t) rc; |
590 | } |
590 | } |
591 | 591 | ||
592 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
592 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
593 | tid = t->tid; |
593 | tid = t->tid; |
594 | thread_ready(t); |
594 | thread_ready(t); |
595 | return (__native) tid; |
595 | return (unative_t) tid; |
596 | } else { |
596 | } else { |
597 | free(kernel_uarg); |
597 | free(kernel_uarg); |
598 | } |
598 | } |
599 | 599 | ||
600 | return (__native) ENOMEM; |
600 | return (unative_t) ENOMEM; |
601 | } |
601 | } |
602 | 602 | ||
603 | /** Process syscall to terminate thread. |
603 | /** Process syscall to terminate thread. |
604 | * |
604 | * |
605 | */ |
605 | */ |
606 | __native sys_thread_exit(int uspace_status) |
606 | unative_t sys_thread_exit(int uspace_status) |
607 | { |
607 | { |
608 | thread_exit(); |
608 | thread_exit(); |
609 | /* Unreachable */ |
609 | /* Unreachable */ |
610 | return 0; |
610 | return 0; |
611 | } |
611 | } |
612 | 612 | ||
613 | /** @} |
613 | /** @} |
614 | */ |
614 | */ |
615 | 615 |