Rev 1458 | Rev 1571 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1458 | Rev 1502 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** |
29 | /** |
30 | * @file thread.c |
30 | * @file thread.c |
31 | * @brief Thread management functions. |
31 | * @brief Thread management functions. |
32 | */ |
32 | */ |
33 | 33 | ||
34 | #include <proc/scheduler.h> |
34 | #include <proc/scheduler.h> |
35 | #include <proc/thread.h> |
35 | #include <proc/thread.h> |
36 | #include <proc/task.h> |
36 | #include <proc/task.h> |
37 | #include <proc/uarg.h> |
37 | #include <proc/uarg.h> |
38 | #include <mm/frame.h> |
38 | #include <mm/frame.h> |
39 | #include <mm/page.h> |
39 | #include <mm/page.h> |
40 | #include <arch/asm.h> |
40 | #include <arch/asm.h> |
41 | #include <arch.h> |
41 | #include <arch.h> |
42 | #include <synch/synch.h> |
42 | #include <synch/synch.h> |
43 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
44 | #include <synch/waitq.h> |
44 | #include <synch/waitq.h> |
45 | #include <synch/rwlock.h> |
45 | #include <synch/rwlock.h> |
46 | #include <cpu.h> |
46 | #include <cpu.h> |
47 | #include <func.h> |
47 | #include <func.h> |
48 | #include <context.h> |
48 | #include <context.h> |
49 | #include <adt/btree.h> |
49 | #include <adt/btree.h> |
50 | #include <adt/list.h> |
50 | #include <adt/list.h> |
51 | #include <typedefs.h> |
51 | #include <typedefs.h> |
52 | #include <time/clock.h> |
52 | #include <time/clock.h> |
53 | #include <config.h> |
53 | #include <config.h> |
54 | #include <arch/interrupt.h> |
54 | #include <arch/interrupt.h> |
55 | #include <smp/ipi.h> |
55 | #include <smp/ipi.h> |
56 | #include <arch/faddr.h> |
56 | #include <arch/faddr.h> |
57 | #include <atomic.h> |
57 | #include <atomic.h> |
58 | #include <memstr.h> |
58 | #include <memstr.h> |
59 | #include <print.h> |
59 | #include <print.h> |
60 | #include <mm/slab.h> |
60 | #include <mm/slab.h> |
61 | #include <debug.h> |
61 | #include <debug.h> |
62 | #include <main/uinit.h> |
62 | #include <main/uinit.h> |
63 | #include <syscall/copy.h> |
63 | #include <syscall/copy.h> |
64 | #include <errno.h> |
64 | #include <errno.h> |
65 | 65 | ||
66 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
66 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
67 | 67 | ||
68 | /** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
68 | /** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
69 | SPINLOCK_INITIALIZE(threads_lock); |
69 | SPINLOCK_INITIALIZE(threads_lock); |
70 | btree_t threads_btree; /**< B+tree of all threads. */ |
70 | btree_t threads_btree; /**< B+tree of all threads. */ |
71 | 71 | ||
72 | SPINLOCK_INITIALIZE(tidlock); |
72 | SPINLOCK_INITIALIZE(tidlock); |
73 | __u32 last_tid = 0; |
73 | __u32 last_tid = 0; |
74 | 74 | ||
75 | static slab_cache_t *thread_slab; |
75 | static slab_cache_t *thread_slab; |
76 | #ifdef ARCH_HAS_FPU |
76 | #ifdef ARCH_HAS_FPU |
77 | slab_cache_t *fpu_context_slab; |
77 | slab_cache_t *fpu_context_slab; |
78 | #endif |
78 | #endif |
79 | 79 | ||
80 | /** Thread wrapper |
80 | /** Thread wrapper |
81 | * |
81 | * |
82 | * This wrapper is provided to ensure that every thread |
82 | * This wrapper is provided to ensure that every thread |
83 | * makes a call to thread_exit() when its implementing |
83 | * makes a call to thread_exit() when its implementing |
84 | * function returns. |
84 | * function returns. |
85 | * |
85 | * |
86 | * interrupts_disable() is assumed. |
86 | * interrupts_disable() is assumed. |
87 | * |
87 | * |
88 | */ |
88 | */ |
89 | static void cushion(void) |
89 | static void cushion(void) |
90 | { |
90 | { |
91 | void (*f)(void *) = THREAD->thread_code; |
91 | void (*f)(void *) = THREAD->thread_code; |
92 | void *arg = THREAD->thread_arg; |
92 | void *arg = THREAD->thread_arg; |
93 | 93 | ||
94 | /* this is where each thread wakes up after its creation */ |
94 | /* this is where each thread wakes up after its creation */ |
95 | spinlock_unlock(&THREAD->lock); |
95 | spinlock_unlock(&THREAD->lock); |
96 | interrupts_enable(); |
96 | interrupts_enable(); |
97 | 97 | ||
98 | f(arg); |
98 | f(arg); |
99 | thread_exit(); |
99 | thread_exit(); |
100 | /* not reached */ |
100 | /* not reached */ |
101 | } |
101 | } |
102 | 102 | ||
103 | /** Initialization and allocation for thread_t structure */ |
103 | /** Initialization and allocation for thread_t structure */ |
104 | static int thr_constructor(void *obj, int kmflags) |
104 | static int thr_constructor(void *obj, int kmflags) |
105 | { |
105 | { |
106 | thread_t *t = (thread_t *)obj; |
106 | thread_t *t = (thread_t *)obj; |
107 | pfn_t pfn; |
107 | pfn_t pfn; |
108 | int status; |
108 | int status; |
109 | 109 | ||
110 | spinlock_initialize(&t->lock, "thread_t_lock"); |
110 | spinlock_initialize(&t->lock, "thread_t_lock"); |
111 | link_initialize(&t->rq_link); |
111 | link_initialize(&t->rq_link); |
112 | link_initialize(&t->wq_link); |
112 | link_initialize(&t->wq_link); |
113 | link_initialize(&t->th_link); |
113 | link_initialize(&t->th_link); |
114 | 114 | ||
115 | #ifdef ARCH_HAS_FPU |
115 | #ifdef ARCH_HAS_FPU |
116 | # ifdef CONFIG_FPU_LAZY |
116 | # ifdef CONFIG_FPU_LAZY |
117 | t->saved_fpu_context = NULL; |
117 | t->saved_fpu_context = NULL; |
118 | # else |
118 | # else |
119 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
119 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
120 | if (!t->saved_fpu_context) |
120 | if (!t->saved_fpu_context) |
121 | return -1; |
121 | return -1; |
122 | # endif |
122 | # endif |
123 | #endif |
123 | #endif |
124 | 124 | ||
125 | pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
125 | pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status); |
126 | if (status) { |
126 | if (status) { |
127 | #ifdef ARCH_HAS_FPU |
127 | #ifdef ARCH_HAS_FPU |
128 | if (t->saved_fpu_context) |
128 | if (t->saved_fpu_context) |
129 | slab_free(fpu_context_slab,t->saved_fpu_context); |
129 | slab_free(fpu_context_slab,t->saved_fpu_context); |
130 | #endif |
130 | #endif |
131 | return -1; |
131 | return -1; |
132 | } |
132 | } |
133 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
133 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
134 | 134 | ||
135 | return 0; |
135 | return 0; |
136 | } |
136 | } |
137 | 137 | ||
138 | /** Destruction of thread_t object */ |
138 | /** Destruction of thread_t object */ |
139 | static int thr_destructor(void *obj) |
139 | static int thr_destructor(void *obj) |
140 | { |
140 | { |
141 | thread_t *t = (thread_t *)obj; |
141 | thread_t *t = (thread_t *)obj; |
142 | 142 | ||
143 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
143 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
144 | #ifdef ARCH_HAS_FPU |
144 | #ifdef ARCH_HAS_FPU |
145 | if (t->saved_fpu_context) |
145 | if (t->saved_fpu_context) |
146 | slab_free(fpu_context_slab,t->saved_fpu_context); |
146 | slab_free(fpu_context_slab,t->saved_fpu_context); |
147 | #endif |
147 | #endif |
148 | return 1; /* One page freed */ |
148 | return 1; /* One page freed */ |
149 | } |
149 | } |
150 | 150 | ||
151 | /** Initialize threads |
151 | /** Initialize threads |
152 | * |
152 | * |
153 | * Initialize kernel threads support. |
153 | * Initialize kernel threads support. |
154 | * |
154 | * |
155 | */ |
155 | */ |
156 | void thread_init(void) |
156 | void thread_init(void) |
157 | { |
157 | { |
158 | THREAD = NULL; |
158 | THREAD = NULL; |
159 | atomic_set(&nrdy,0); |
159 | atomic_set(&nrdy,0); |
160 | thread_slab = slab_cache_create("thread_slab", |
160 | thread_slab = slab_cache_create("thread_slab", |
161 | sizeof(thread_t),0, |
161 | sizeof(thread_t),0, |
162 | thr_constructor, thr_destructor, 0); |
162 | thr_constructor, thr_destructor, 0); |
163 | #ifdef ARCH_HAS_FPU |
163 | #ifdef ARCH_HAS_FPU |
164 | fpu_context_slab = slab_cache_create("fpu_slab", |
164 | fpu_context_slab = slab_cache_create("fpu_slab", |
165 | sizeof(fpu_context_t), |
165 | sizeof(fpu_context_t), |
166 | FPU_CONTEXT_ALIGN, |
166 | FPU_CONTEXT_ALIGN, |
167 | NULL, NULL, 0); |
167 | NULL, NULL, 0); |
168 | #endif |
168 | #endif |
169 | 169 | ||
170 | btree_create(&threads_btree); |
170 | btree_create(&threads_btree); |
171 | } |
171 | } |
172 | 172 | ||
173 | /** Make thread ready |
173 | /** Make thread ready |
174 | * |
174 | * |
175 | * Switch thread t to the ready state. |
175 | * Switch thread t to the ready state. |
176 | * |
176 | * |
177 | * @param t Thread to make ready. |
177 | * @param t Thread to make ready. |
178 | * |
178 | * |
179 | */ |
179 | */ |
180 | void thread_ready(thread_t *t) |
180 | void thread_ready(thread_t *t) |
181 | { |
181 | { |
182 | cpu_t *cpu; |
182 | cpu_t *cpu; |
183 | runq_t *r; |
183 | runq_t *r; |
184 | ipl_t ipl; |
184 | ipl_t ipl; |
185 | int i, avg; |
185 | int i, avg; |
186 | 186 | ||
187 | ipl = interrupts_disable(); |
187 | ipl = interrupts_disable(); |
188 | 188 | ||
189 | spinlock_lock(&t->lock); |
189 | spinlock_lock(&t->lock); |
190 | 190 | ||
191 | ASSERT(! (t->state == Ready)); |
191 | ASSERT(! (t->state == Ready)); |
192 | 192 | ||
193 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
193 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
194 | 194 | ||
195 | cpu = CPU; |
195 | cpu = CPU; |
196 | if (t->flags & X_WIRED) { |
196 | if (t->flags & X_WIRED) { |
197 | cpu = t->cpu; |
197 | cpu = t->cpu; |
198 | } |
198 | } |
199 | t->state = Ready; |
199 | t->state = Ready; |
200 | spinlock_unlock(&t->lock); |
200 | spinlock_unlock(&t->lock); |
201 | 201 | ||
202 | /* |
202 | /* |
203 | * Append t to respective ready queue on respective processor. |
203 | * Append t to respective ready queue on respective processor. |
204 | */ |
204 | */ |
205 | r = &cpu->rq[i]; |
205 | r = &cpu->rq[i]; |
206 | spinlock_lock(&r->lock); |
206 | spinlock_lock(&r->lock); |
207 | list_append(&t->rq_link, &r->rq_head); |
207 | list_append(&t->rq_link, &r->rq_head); |
208 | r->n++; |
208 | r->n++; |
209 | spinlock_unlock(&r->lock); |
209 | spinlock_unlock(&r->lock); |
210 | 210 | ||
211 | atomic_inc(&nrdy); |
211 | atomic_inc(&nrdy); |
212 | avg = atomic_get(&nrdy) / config.cpu_active; |
212 | avg = atomic_get(&nrdy) / config.cpu_active; |
213 | atomic_inc(&cpu->nrdy); |
213 | atomic_inc(&cpu->nrdy); |
214 | 214 | ||
215 | interrupts_restore(ipl); |
215 | interrupts_restore(ipl); |
216 | } |
216 | } |
217 | 217 | ||
218 | /** Destroy thread memory structure |
218 | /** Destroy thread memory structure |
219 | * |
219 | * |
220 | * Detach thread from all queues, cpus etc. and destroy it. |
220 | * Detach thread from all queues, cpus etc. and destroy it. |
221 | * |
221 | * |
222 | * Assume thread->lock is held!! |
222 | * Assume thread->lock is held!! |
223 | */ |
223 | */ |
224 | void thread_destroy(thread_t *t) |
224 | void thread_destroy(thread_t *t) |
225 | { |
225 | { |
226 | ASSERT(t->state == Exiting); |
226 | ASSERT(t->state == Exiting); |
227 | ASSERT(t->task); |
227 | ASSERT(t->task); |
228 | ASSERT(t->cpu); |
228 | ASSERT(t->cpu); |
229 | 229 | ||
230 | spinlock_lock(&t->cpu->lock); |
230 | spinlock_lock(&t->cpu->lock); |
231 | if(t->cpu->fpu_owner==t) |
231 | if(t->cpu->fpu_owner==t) |
232 | t->cpu->fpu_owner=NULL; |
232 | t->cpu->fpu_owner=NULL; |
233 | spinlock_unlock(&t->cpu->lock); |
233 | spinlock_unlock(&t->cpu->lock); |
234 | 234 | ||
235 | /* |
235 | /* |
236 | * Detach from the containing task. |
236 | * Detach from the containing task. |
237 | */ |
237 | */ |
238 | spinlock_lock(&t->task->lock); |
238 | spinlock_lock(&t->task->lock); |
239 | list_remove(&t->th_link); |
239 | list_remove(&t->th_link); |
240 | spinlock_unlock(&t->task->lock); |
240 | spinlock_unlock(&t->task->lock); |
241 | 241 | ||
242 | spinlock_unlock(&t->lock); |
242 | spinlock_unlock(&t->lock); |
243 | 243 | ||
244 | spinlock_lock(&threads_lock); |
244 | spinlock_lock(&threads_lock); |
245 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
245 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
246 | spinlock_unlock(&threads_lock); |
246 | spinlock_unlock(&threads_lock); |
247 | 247 | ||
248 | slab_free(thread_slab, t); |
248 | slab_free(thread_slab, t); |
249 | } |
249 | } |
250 | 250 | ||
251 | /** Create new thread |
251 | /** Create new thread |
252 | * |
252 | * |
253 | * Create a new thread. |
253 | * Create a new thread. |
254 | * |
254 | * |
255 | * @param func Thread's implementing function. |
255 | * @param func Thread's implementing function. |
256 | * @param arg Thread's implementing function argument. |
256 | * @param arg Thread's implementing function argument. |
257 | * @param task Task to which the thread belongs. |
257 | * @param task Task to which the thread belongs. |
258 | * @param flags Thread flags. |
258 | * @param flags Thread flags. |
259 | * @param name Symbolic name. |
259 | * @param name Symbolic name. |
260 | * |
260 | * |
261 | * @return New thread's structure on success, NULL on failure. |
261 | * @return New thread's structure on success, NULL on failure. |
262 | * |
262 | * |
263 | */ |
263 | */ |
264 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
264 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
265 | { |
265 | { |
266 | thread_t *t; |
266 | thread_t *t; |
267 | ipl_t ipl; |
267 | ipl_t ipl; |
268 | 268 | ||
269 | t = (thread_t *) slab_alloc(thread_slab, 0); |
269 | t = (thread_t *) slab_alloc(thread_slab, 0); |
270 | if (!t) |
270 | if (!t) |
271 | return NULL; |
271 | return NULL; |
272 | 272 | ||
273 | thread_create_arch(t); |
273 | thread_create_arch(t); |
274 | 274 | ||
275 | /* Not needed, but good for debugging */ |
275 | /* Not needed, but good for debugging */ |
276 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
276 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
277 | 277 | ||
278 | ipl = interrupts_disable(); |
278 | ipl = interrupts_disable(); |
279 | spinlock_lock(&tidlock); |
279 | spinlock_lock(&tidlock); |
280 | t->tid = ++last_tid; |
280 | t->tid = ++last_tid; |
281 | spinlock_unlock(&tidlock); |
281 | spinlock_unlock(&tidlock); |
282 | interrupts_restore(ipl); |
282 | interrupts_restore(ipl); |
283 | 283 | ||
284 | context_save(&t->saved_context); |
284 | context_save(&t->saved_context); |
285 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
285 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
286 | 286 | ||
287 | the_initialize((the_t *) t->kstack); |
287 | the_initialize((the_t *) t->kstack); |
288 | 288 | ||
289 | ipl = interrupts_disable(); |
289 | ipl = interrupts_disable(); |
290 | t->saved_context.ipl = interrupts_read(); |
290 | t->saved_context.ipl = interrupts_read(); |
291 | interrupts_restore(ipl); |
291 | interrupts_restore(ipl); |
292 | 292 | ||
293 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
293 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
294 | 294 | ||
295 | t->thread_code = func; |
295 | t->thread_code = func; |
296 | t->thread_arg = arg; |
296 | t->thread_arg = arg; |
297 | t->ticks = -1; |
297 | t->ticks = -1; |
298 | t->priority = -1; /* start in rq[0] */ |
298 | t->priority = -1; /* start in rq[0] */ |
299 | t->cpu = NULL; |
299 | t->cpu = NULL; |
300 | t->flags = 0; |
300 | t->flags = 0; |
301 | t->state = Entering; |
301 | t->state = Entering; |
302 | t->call_me = NULL; |
302 | t->call_me = NULL; |
303 | t->call_me_with = NULL; |
303 | t->call_me_with = NULL; |
304 | 304 | ||
305 | timeout_initialize(&t->sleep_timeout); |
305 | timeout_initialize(&t->sleep_timeout); |
- | 306 | t->sleep_interruptible = false; |
|
306 | t->sleep_queue = NULL; |
307 | t->sleep_queue = NULL; |
307 | t->timeout_pending = 0; |
308 | t->timeout_pending = 0; |
308 | 309 | ||
309 | t->in_copy_from_uspace = false; |
310 | t->in_copy_from_uspace = false; |
310 | t->in_copy_to_uspace = false; |
311 | t->in_copy_to_uspace = false; |
311 | 312 | ||
312 | t->rwlock_holder_type = RWLOCK_NONE; |
313 | t->rwlock_holder_type = RWLOCK_NONE; |
313 | 314 | ||
314 | t->task = task; |
315 | t->task = task; |
315 | 316 | ||
316 | t->fpu_context_exists = 0; |
317 | t->fpu_context_exists = 0; |
317 | t->fpu_context_engaged = 0; |
318 | t->fpu_context_engaged = 0; |
318 | 319 | ||
319 | /* |
320 | /* |
320 | * Register this thread in the system-wide list. |
321 | * Register this thread in the system-wide list. |
321 | */ |
322 | */ |
322 | ipl = interrupts_disable(); |
323 | ipl = interrupts_disable(); |
323 | spinlock_lock(&threads_lock); |
324 | spinlock_lock(&threads_lock); |
324 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
325 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
325 | spinlock_unlock(&threads_lock); |
326 | spinlock_unlock(&threads_lock); |
326 | 327 | ||
327 | /* |
328 | /* |
328 | * Attach to the containing task. |
329 | * Attach to the containing task. |
329 | */ |
330 | */ |
330 | spinlock_lock(&task->lock); |
331 | spinlock_lock(&task->lock); |
331 | list_append(&t->th_link, &task->th_head); |
332 | list_append(&t->th_link, &task->th_head); |
332 | spinlock_unlock(&task->lock); |
333 | spinlock_unlock(&task->lock); |
333 | 334 | ||
334 | interrupts_restore(ipl); |
335 | interrupts_restore(ipl); |
335 | 336 | ||
336 | return t; |
337 | return t; |
337 | } |
338 | } |
338 | 339 | ||
339 | /** Make thread exiting |
340 | /** Make thread exiting |
340 | * |
341 | * |
341 | * End current thread execution and switch it to the exiting |
342 | * End current thread execution and switch it to the exiting |
342 | * state. All pending timeouts are executed. |
343 | * state. All pending timeouts are executed. |
343 | * |
344 | * |
344 | */ |
345 | */ |
345 | void thread_exit(void) |
346 | void thread_exit(void) |
346 | { |
347 | { |
347 | ipl_t ipl; |
348 | ipl_t ipl; |
348 | 349 | ||
349 | restart: |
350 | restart: |
350 | ipl = interrupts_disable(); |
351 | ipl = interrupts_disable(); |
351 | spinlock_lock(&THREAD->lock); |
352 | spinlock_lock(&THREAD->lock); |
352 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
353 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
353 | spinlock_unlock(&THREAD->lock); |
354 | spinlock_unlock(&THREAD->lock); |
354 | interrupts_restore(ipl); |
355 | interrupts_restore(ipl); |
355 | goto restart; |
356 | goto restart; |
356 | } |
357 | } |
357 | THREAD->state = Exiting; |
358 | THREAD->state = Exiting; |
358 | spinlock_unlock(&THREAD->lock); |
359 | spinlock_unlock(&THREAD->lock); |
359 | scheduler(); |
360 | scheduler(); |
360 | } |
361 | } |
361 | 362 | ||
362 | 363 | ||
363 | /** Thread sleep |
364 | /** Thread sleep |
364 | * |
365 | * |
365 | * Suspend execution of the current thread. |
366 | * Suspend execution of the current thread. |
366 | * |
367 | * |
367 | * @param sec Number of seconds to sleep. |
368 | * @param sec Number of seconds to sleep. |
368 | * |
369 | * |
369 | */ |
370 | */ |
370 | void thread_sleep(__u32 sec) |
371 | void thread_sleep(__u32 sec) |
371 | { |
372 | { |
372 | thread_usleep(sec*1000000); |
373 | thread_usleep(sec*1000000); |
373 | } |
374 | } |
374 | 375 | ||
375 | /** Thread usleep |
376 | /** Thread usleep |
376 | * |
377 | * |
377 | * Suspend execution of the current thread. |
378 | * Suspend execution of the current thread. |
378 | * |
379 | * |
379 | * @param usec Number of microseconds to sleep. |
380 | * @param usec Number of microseconds to sleep. |
380 | * |
381 | * |
381 | */ |
382 | */ |
382 | void thread_usleep(__u32 usec) |
383 | void thread_usleep(__u32 usec) |
383 | { |
384 | { |
384 | waitq_t wq; |
385 | waitq_t wq; |
385 | 386 | ||
386 | waitq_initialize(&wq); |
387 | waitq_initialize(&wq); |
387 | 388 | ||
388 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
389 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
389 | } |
390 | } |
390 | 391 | ||
391 | /** Register thread out-of-context invocation |
392 | /** Register thread out-of-context invocation |
392 | * |
393 | * |
393 | * Register a function and its argument to be executed |
394 | * Register a function and its argument to be executed |
394 | * on next context switch to the current thread. |
395 | * on next context switch to the current thread. |
395 | * |
396 | * |
396 | * @param call_me Out-of-context function. |
397 | * @param call_me Out-of-context function. |
397 | * @param call_me_with Out-of-context function argument. |
398 | * @param call_me_with Out-of-context function argument. |
398 | * |
399 | * |
399 | */ |
400 | */ |
400 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
401 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
401 | { |
402 | { |
402 | ipl_t ipl; |
403 | ipl_t ipl; |
403 | 404 | ||
404 | ipl = interrupts_disable(); |
405 | ipl = interrupts_disable(); |
405 | spinlock_lock(&THREAD->lock); |
406 | spinlock_lock(&THREAD->lock); |
406 | THREAD->call_me = call_me; |
407 | THREAD->call_me = call_me; |
407 | THREAD->call_me_with = call_me_with; |
408 | THREAD->call_me_with = call_me_with; |
408 | spinlock_unlock(&THREAD->lock); |
409 | spinlock_unlock(&THREAD->lock); |
409 | interrupts_restore(ipl); |
410 | interrupts_restore(ipl); |
410 | } |
411 | } |
411 | 412 | ||
412 | /** Print list of threads debug info */ |
413 | /** Print list of threads debug info */ |
413 | void thread_print_list(void) |
414 | void thread_print_list(void) |
414 | { |
415 | { |
415 | link_t *cur; |
416 | link_t *cur; |
416 | ipl_t ipl; |
417 | ipl_t ipl; |
417 | 418 | ||
418 | /* Messing with thread structures, avoid deadlock */ |
419 | /* Messing with thread structures, avoid deadlock */ |
419 | ipl = interrupts_disable(); |
420 | ipl = interrupts_disable(); |
420 | spinlock_lock(&threads_lock); |
421 | spinlock_lock(&threads_lock); |
421 | 422 | ||
422 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
423 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
423 | btree_node_t *node; |
424 | btree_node_t *node; |
424 | int i; |
425 | int i; |
425 | 426 | ||
426 | node = list_get_instance(cur, btree_node_t, leaf_link); |
427 | node = list_get_instance(cur, btree_node_t, leaf_link); |
427 | for (i = 0; i < node->keys; i++) { |
428 | for (i = 0; i < node->keys; i++) { |
428 | thread_t *t; |
429 | thread_t *t; |
429 | 430 | ||
430 | t = (thread_t *) node->value[i]; |
431 | t = (thread_t *) node->value[i]; |
431 | printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=", |
432 | printf("%s: address=%#zX, tid=%zd, state=%s, task=%#zX, code=%#zX, stack=%#zX, cpu=", |
432 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
433 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
433 | if (t->cpu) |
434 | if (t->cpu) |
434 | printf("cpu%zd", t->cpu->id); |
435 | printf("cpu%zd", t->cpu->id); |
435 | else |
436 | else |
436 | printf("none"); |
437 | printf("none"); |
437 | if (t->state == Sleeping) { |
438 | if (t->state == Sleeping) { |
438 | printf(", kst=%#zX", t->kstack); |
439 | printf(", kst=%#zX", t->kstack); |
439 | printf(", wq=%#zX", t->sleep_queue); |
440 | printf(", wq=%#zX", t->sleep_queue); |
440 | } |
441 | } |
441 | printf("\n"); |
442 | printf("\n"); |
442 | } |
443 | } |
443 | } |
444 | } |
444 | 445 | ||
445 | spinlock_unlock(&threads_lock); |
446 | spinlock_unlock(&threads_lock); |
446 | interrupts_restore(ipl); |
447 | interrupts_restore(ipl); |
447 | } |
448 | } |
448 | 449 | ||
449 | /** Check whether thread exists. |
450 | /** Check whether thread exists. |
450 | * |
451 | * |
451 | * Note that threads_lock must be already held and |
452 | * Note that threads_lock must be already held and |
452 | * interrupts must be already disabled. |
453 | * interrupts must be already disabled. |
453 | * |
454 | * |
454 | * @param t Pointer to thread. |
455 | * @param t Pointer to thread. |
455 | * |
456 | * |
456 | * @return True if thread t is known to the system, false otherwise. |
457 | * @return True if thread t is known to the system, false otherwise. |
457 | */ |
458 | */ |
458 | bool thread_exists(thread_t *t) |
459 | bool thread_exists(thread_t *t) |
459 | { |
460 | { |
460 | btree_node_t *leaf; |
461 | btree_node_t *leaf; |
461 | 462 | ||
462 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
463 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
463 | } |
464 | } |
464 | 465 | ||
465 | /** Process syscall to create new thread. |
466 | /** Process syscall to create new thread. |
466 | * |
467 | * |
467 | */ |
468 | */ |
468 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
469 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
469 | { |
470 | { |
470 | thread_t *t; |
471 | thread_t *t; |
471 | char namebuf[THREAD_NAME_BUFLEN]; |
472 | char namebuf[THREAD_NAME_BUFLEN]; |
472 | uspace_arg_t *kernel_uarg; |
473 | uspace_arg_t *kernel_uarg; |
473 | __u32 tid; |
474 | __u32 tid; |
474 | int rc; |
475 | int rc; |
475 | 476 | ||
476 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
477 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
477 | if (rc != 0) |
478 | if (rc != 0) |
478 | return (__native) rc; |
479 | return (__native) rc; |
479 | 480 | ||
480 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
481 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
481 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
482 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
482 | if (rc != 0) { |
483 | if (rc != 0) { |
483 | free(kernel_uarg); |
484 | free(kernel_uarg); |
484 | return (__native) rc; |
485 | return (__native) rc; |
485 | } |
486 | } |
486 | 487 | ||
487 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
488 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
488 | tid = t->tid; |
489 | tid = t->tid; |
489 | thread_ready(t); |
490 | thread_ready(t); |
490 | return (__native) tid; |
491 | return (__native) tid; |
491 | } else { |
492 | } else { |
492 | free(kernel_uarg); |
493 | free(kernel_uarg); |
493 | } |
494 | } |
494 | 495 | ||
495 | return (__native) ENOMEM; |
496 | return (__native) ENOMEM; |
496 | } |
497 | } |
497 | 498 | ||
498 | /** Process syscall to terminate thread. |
499 | /** Process syscall to terminate thread. |
499 | * |
500 | * |
500 | */ |
501 | */ |
501 | __native sys_thread_exit(int uspace_status) |
502 | __native sys_thread_exit(int uspace_status) |
502 | { |
503 | { |
503 | thread_exit(); |
504 | thread_exit(); |
504 | /* Unreachable */ |
505 | /* Unreachable */ |
505 | return 0; |
506 | return 0; |
506 | } |
507 | } |
507 | 508 |