Rev 2030 | Rev 2039 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2030 | Rev 2032 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericproc |
29 | /** @addtogroup genericproc |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Thread management functions. |
35 | * @brief Thread management functions. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include <proc/scheduler.h> |
38 | #include <proc/scheduler.h> |
39 | #include <proc/thread.h> |
39 | #include <proc/thread.h> |
40 | #include <proc/task.h> |
40 | #include <proc/task.h> |
41 | #include <proc/uarg.h> |
41 | #include <proc/uarg.h> |
42 | #include <mm/frame.h> |
42 | #include <mm/frame.h> |
43 | #include <mm/page.h> |
43 | #include <mm/page.h> |
44 | #include <arch/asm.h> |
44 | #include <arch/asm.h> |
45 | #include <arch/cycle.h> |
45 | #include <arch/cycle.h> |
46 | #include <arch.h> |
46 | #include <arch.h> |
47 | #include <synch/synch.h> |
47 | #include <synch/synch.h> |
48 | #include <synch/spinlock.h> |
48 | #include <synch/spinlock.h> |
49 | #include <synch/waitq.h> |
49 | #include <synch/waitq.h> |
50 | #include <synch/rwlock.h> |
50 | #include <synch/rwlock.h> |
51 | #include <cpu.h> |
51 | #include <cpu.h> |
52 | #include <func.h> |
52 | #include <func.h> |
53 | #include <context.h> |
53 | #include <context.h> |
54 | #include <adt/btree.h> |
54 | #include <adt/btree.h> |
55 | #include <adt/list.h> |
55 | #include <adt/list.h> |
56 | #include <typedefs.h> |
56 | #include <typedefs.h> |
57 | #include <time/clock.h> |
57 | #include <time/clock.h> |
58 | #include <config.h> |
58 | #include <config.h> |
59 | #include <arch/interrupt.h> |
59 | #include <arch/interrupt.h> |
60 | #include <smp/ipi.h> |
60 | #include <smp/ipi.h> |
61 | #include <arch/faddr.h> |
61 | #include <arch/faddr.h> |
62 | #include <atomic.h> |
62 | #include <atomic.h> |
63 | #include <memstr.h> |
63 | #include <memstr.h> |
64 | #include <print.h> |
64 | #include <print.h> |
65 | #include <mm/slab.h> |
65 | #include <mm/slab.h> |
66 | #include <debug.h> |
66 | #include <debug.h> |
67 | #include <main/uinit.h> |
67 | #include <main/uinit.h> |
68 | #include <syscall/copy.h> |
68 | #include <syscall/copy.h> |
69 | #include <errno.h> |
69 | #include <errno.h> |
70 | 70 | ||
71 | 71 | ||
72 | /** Thread states */ |
72 | /** Thread states */ |
73 | char *thread_states[] = { |
73 | char *thread_states[] = { |
74 | "Invalid", |
74 | "Invalid", |
75 | "Running", |
75 | "Running", |
76 | "Sleeping", |
76 | "Sleeping", |
77 | "Ready", |
77 | "Ready", |
78 | "Entering", |
78 | "Entering", |
79 | "Exiting", |
79 | "Exiting", |
80 | "Undead" |
80 | "Undead" |
81 | }; |
81 | }; |
82 | 82 | ||
83 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
83 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
84 | SPINLOCK_INITIALIZE(threads_lock); |
84 | SPINLOCK_INITIALIZE(threads_lock); |
85 | 85 | ||
86 | /** B+tree of all threads. |
86 | /** B+tree of all threads. |
87 | * |
87 | * |
88 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
88 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
89 | * as the threads_lock is held. |
89 | * as the threads_lock is held. |
90 | */ |
90 | */ |
91 | btree_t threads_btree; |
91 | btree_t threads_btree; |
92 | 92 | ||
93 | SPINLOCK_INITIALIZE(tidlock); |
93 | SPINLOCK_INITIALIZE(tidlock); |
94 | uint32_t last_tid = 0; |
94 | uint32_t last_tid = 0; |
95 | 95 | ||
96 | static slab_cache_t *thread_slab; |
96 | static slab_cache_t *thread_slab; |
97 | #ifdef ARCH_HAS_FPU |
97 | #ifdef ARCH_HAS_FPU |
98 | slab_cache_t *fpu_context_slab; |
98 | slab_cache_t *fpu_context_slab; |
99 | #endif |
99 | #endif |
100 | 100 | ||
101 | /** Thread wrapper |
101 | /** Thread wrapper |
102 | * |
102 | * |
103 | * This wrapper is provided to ensure that every thread |
103 | * This wrapper is provided to ensure that every thread |
104 | * makes a call to thread_exit() when its implementing |
104 | * makes a call to thread_exit() when its implementing |
105 | * function returns. |
105 | * function returns. |
106 | * |
106 | * |
107 | * interrupts_disable() is assumed. |
107 | * interrupts_disable() is assumed. |
108 | * |
108 | * |
109 | */ |
109 | */ |
110 | static void cushion(void) |
110 | static void cushion(void) |
111 | { |
111 | { |
112 | void (*f)(void *) = THREAD->thread_code; |
112 | void (*f)(void *) = THREAD->thread_code; |
113 | void *arg = THREAD->thread_arg; |
113 | void *arg = THREAD->thread_arg; |
- | 114 | THREAD->last_cycle = get_cycle(); |
|
114 | 115 | ||
115 | /* this is where each thread wakes up after its creation */ |
116 | /* this is where each thread wakes up after its creation */ |
116 | spinlock_unlock(&THREAD->lock); |
117 | spinlock_unlock(&THREAD->lock); |
117 | interrupts_enable(); |
118 | interrupts_enable(); |
118 | 119 | ||
119 | f(arg); |
120 | f(arg); |
120 | thread_exit(); |
121 | thread_exit(); |
121 | /* not reached */ |
122 | /* not reached */ |
122 | } |
123 | } |
123 | 124 | ||
124 | /** Initialization and allocation for thread_t structure */ |
125 | /** Initialization and allocation for thread_t structure */ |
125 | static int thr_constructor(void *obj, int kmflags) |
126 | static int thr_constructor(void *obj, int kmflags) |
126 | { |
127 | { |
127 | thread_t *t = (thread_t *) obj; |
128 | thread_t *t = (thread_t *) obj; |
128 | 129 | ||
129 | spinlock_initialize(&t->lock, "thread_t_lock"); |
130 | spinlock_initialize(&t->lock, "thread_t_lock"); |
130 | link_initialize(&t->rq_link); |
131 | link_initialize(&t->rq_link); |
131 | link_initialize(&t->wq_link); |
132 | link_initialize(&t->wq_link); |
132 | link_initialize(&t->th_link); |
133 | link_initialize(&t->th_link); |
133 | 134 | ||
134 | /* call the architecture-specific part of the constructor */ |
135 | /* call the architecture-specific part of the constructor */ |
135 | thr_constructor_arch(t); |
136 | thr_constructor_arch(t); |
136 | 137 | ||
137 | #ifdef ARCH_HAS_FPU |
138 | #ifdef ARCH_HAS_FPU |
138 | # ifdef CONFIG_FPU_LAZY |
139 | # ifdef CONFIG_FPU_LAZY |
139 | t->saved_fpu_context = NULL; |
140 | t->saved_fpu_context = NULL; |
140 | # else |
141 | # else |
141 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
142 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
142 | if (!t->saved_fpu_context) |
143 | if (!t->saved_fpu_context) |
143 | return -1; |
144 | return -1; |
144 | # endif |
145 | # endif |
145 | #endif |
146 | #endif |
146 | 147 | ||
147 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
148 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
148 | if (! t->kstack) { |
149 | if (! t->kstack) { |
149 | #ifdef ARCH_HAS_FPU |
150 | #ifdef ARCH_HAS_FPU |
150 | if (t->saved_fpu_context) |
151 | if (t->saved_fpu_context) |
151 | slab_free(fpu_context_slab,t->saved_fpu_context); |
152 | slab_free(fpu_context_slab,t->saved_fpu_context); |
152 | #endif |
153 | #endif |
153 | return -1; |
154 | return -1; |
154 | } |
155 | } |
155 | 156 | ||
156 | return 0; |
157 | return 0; |
157 | } |
158 | } |
158 | 159 | ||
159 | /** Destruction of thread_t object */ |
160 | /** Destruction of thread_t object */ |
160 | static int thr_destructor(void *obj) |
161 | static int thr_destructor(void *obj) |
161 | { |
162 | { |
162 | thread_t *t = (thread_t *) obj; |
163 | thread_t *t = (thread_t *) obj; |
163 | 164 | ||
164 | /* call the architecture-specific part of the destructor */ |
165 | /* call the architecture-specific part of the destructor */ |
165 | thr_destructor_arch(t); |
166 | thr_destructor_arch(t); |
166 | 167 | ||
167 | frame_free(KA2PA(t->kstack)); |
168 | frame_free(KA2PA(t->kstack)); |
168 | #ifdef ARCH_HAS_FPU |
169 | #ifdef ARCH_HAS_FPU |
169 | if (t->saved_fpu_context) |
170 | if (t->saved_fpu_context) |
170 | slab_free(fpu_context_slab,t->saved_fpu_context); |
171 | slab_free(fpu_context_slab,t->saved_fpu_context); |
171 | #endif |
172 | #endif |
172 | return 1; /* One page freed */ |
173 | return 1; /* One page freed */ |
173 | } |
174 | } |
174 | 175 | ||
175 | /** Initialize threads |
176 | /** Initialize threads |
176 | * |
177 | * |
177 | * Initialize kernel threads support. |
178 | * Initialize kernel threads support. |
178 | * |
179 | * |
179 | */ |
180 | */ |
180 | void thread_init(void) |
181 | void thread_init(void) |
181 | { |
182 | { |
182 | THREAD = NULL; |
183 | THREAD = NULL; |
183 | atomic_set(&nrdy,0); |
184 | atomic_set(&nrdy,0); |
184 | thread_slab = slab_cache_create("thread_slab", |
185 | thread_slab = slab_cache_create("thread_slab", |
185 | sizeof(thread_t),0, |
186 | sizeof(thread_t),0, |
186 | thr_constructor, thr_destructor, 0); |
187 | thr_constructor, thr_destructor, 0); |
187 | #ifdef ARCH_HAS_FPU |
188 | #ifdef ARCH_HAS_FPU |
188 | fpu_context_slab = slab_cache_create("fpu_slab", |
189 | fpu_context_slab = slab_cache_create("fpu_slab", |
189 | sizeof(fpu_context_t), |
190 | sizeof(fpu_context_t), |
190 | FPU_CONTEXT_ALIGN, |
191 | FPU_CONTEXT_ALIGN, |
191 | NULL, NULL, 0); |
192 | NULL, NULL, 0); |
192 | #endif |
193 | #endif |
193 | 194 | ||
194 | btree_create(&threads_btree); |
195 | btree_create(&threads_btree); |
195 | } |
196 | } |
196 | 197 | ||
197 | /** Make thread ready |
198 | /** Make thread ready |
198 | * |
199 | * |
199 | * Switch thread t to the ready state. |
200 | * Switch thread t to the ready state. |
200 | * |
201 | * |
201 | * @param t Thread to make ready. |
202 | * @param t Thread to make ready. |
202 | * |
203 | * |
203 | */ |
204 | */ |
204 | void thread_ready(thread_t *t) |
205 | void thread_ready(thread_t *t) |
205 | { |
206 | { |
206 | cpu_t *cpu; |
207 | cpu_t *cpu; |
207 | runq_t *r; |
208 | runq_t *r; |
208 | ipl_t ipl; |
209 | ipl_t ipl; |
209 | int i, avg; |
210 | int i, avg; |
210 | 211 | ||
211 | ipl = interrupts_disable(); |
212 | ipl = interrupts_disable(); |
212 | 213 | ||
213 | spinlock_lock(&t->lock); |
214 | spinlock_lock(&t->lock); |
214 | 215 | ||
215 | ASSERT(! (t->state == Ready)); |
216 | ASSERT(! (t->state == Ready)); |
216 | 217 | ||
217 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
218 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
218 | 219 | ||
219 | cpu = CPU; |
220 | cpu = CPU; |
220 | if (t->flags & THREAD_FLAG_WIRED) { |
221 | if (t->flags & THREAD_FLAG_WIRED) { |
221 | cpu = t->cpu; |
222 | cpu = t->cpu; |
222 | } |
223 | } |
223 | t->state = Ready; |
224 | t->state = Ready; |
224 | spinlock_unlock(&t->lock); |
225 | spinlock_unlock(&t->lock); |
225 | 226 | ||
226 | /* |
227 | /* |
227 | * Append t to respective ready queue on respective processor. |
228 | * Append t to respective ready queue on respective processor. |
228 | */ |
229 | */ |
229 | r = &cpu->rq[i]; |
230 | r = &cpu->rq[i]; |
230 | spinlock_lock(&r->lock); |
231 | spinlock_lock(&r->lock); |
231 | list_append(&t->rq_link, &r->rq_head); |
232 | list_append(&t->rq_link, &r->rq_head); |
232 | r->n++; |
233 | r->n++; |
233 | spinlock_unlock(&r->lock); |
234 | spinlock_unlock(&r->lock); |
234 | 235 | ||
235 | atomic_inc(&nrdy); |
236 | atomic_inc(&nrdy); |
236 | avg = atomic_get(&nrdy) / config.cpu_active; |
237 | avg = atomic_get(&nrdy) / config.cpu_active; |
237 | atomic_inc(&cpu->nrdy); |
238 | atomic_inc(&cpu->nrdy); |
238 | 239 | ||
239 | interrupts_restore(ipl); |
240 | interrupts_restore(ipl); |
240 | } |
241 | } |
241 | 242 | ||
242 | /** Destroy thread memory structure |
243 | /** Destroy thread memory structure |
243 | * |
244 | * |
244 | * Detach thread from all queues, cpus etc. and destroy it. |
245 | * Detach thread from all queues, cpus etc. and destroy it. |
245 | * |
246 | * |
246 | * Assume thread->lock is held!! |
247 | * Assume thread->lock is held!! |
247 | */ |
248 | */ |
248 | void thread_destroy(thread_t *t) |
249 | void thread_destroy(thread_t *t) |
249 | { |
250 | { |
250 | bool destroy_task = false; |
251 | bool destroy_task = false; |
251 | 252 | ||
252 | ASSERT(t->state == Exiting || t->state == Undead); |
253 | ASSERT(t->state == Exiting || t->state == Undead); |
253 | ASSERT(t->task); |
254 | ASSERT(t->task); |
254 | ASSERT(t->cpu); |
255 | ASSERT(t->cpu); |
255 | 256 | ||
256 | spinlock_lock(&t->cpu->lock); |
257 | spinlock_lock(&t->cpu->lock); |
257 | if(t->cpu->fpu_owner==t) |
258 | if(t->cpu->fpu_owner==t) |
258 | t->cpu->fpu_owner=NULL; |
259 | t->cpu->fpu_owner=NULL; |
259 | spinlock_unlock(&t->cpu->lock); |
260 | spinlock_unlock(&t->cpu->lock); |
260 | 261 | ||
261 | spinlock_unlock(&t->lock); |
262 | spinlock_unlock(&t->lock); |
262 | 263 | ||
263 | spinlock_lock(&threads_lock); |
264 | spinlock_lock(&threads_lock); |
264 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
265 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
265 | spinlock_unlock(&threads_lock); |
266 | spinlock_unlock(&threads_lock); |
266 | 267 | ||
267 | /* |
268 | /* |
268 | * Detach from the containing task. |
269 | * Detach from the containing task. |
269 | */ |
270 | */ |
270 | spinlock_lock(&t->task->lock); |
271 | spinlock_lock(&t->task->lock); |
271 | list_remove(&t->th_link); |
272 | list_remove(&t->th_link); |
272 | if (--t->task->refcount == 0) { |
273 | if (--t->task->refcount == 0) { |
273 | t->task->accept_new_threads = false; |
274 | t->task->accept_new_threads = false; |
274 | destroy_task = true; |
275 | destroy_task = true; |
275 | } |
276 | } |
276 | spinlock_unlock(&t->task->lock); |
277 | spinlock_unlock(&t->task->lock); |
277 | 278 | ||
278 | if (destroy_task) |
279 | if (destroy_task) |
279 | task_destroy(t->task); |
280 | task_destroy(t->task); |
280 | 281 | ||
281 | slab_free(thread_slab, t); |
282 | slab_free(thread_slab, t); |
282 | } |
283 | } |
283 | 284 | ||
284 | /** Create new thread |
285 | /** Create new thread |
285 | * |
286 | * |
286 | * Create a new thread. |
287 | * Create a new thread. |
287 | * |
288 | * |
288 | * @param func Thread's implementing function. |
289 | * @param func Thread's implementing function. |
289 | * @param arg Thread's implementing function argument. |
290 | * @param arg Thread's implementing function argument. |
290 | * @param task Task to which the thread belongs. |
291 | * @param task Task to which the thread belongs. |
291 | * @param flags Thread flags. |
292 | * @param flags Thread flags. |
292 | * @param name Symbolic name. |
293 | * @param name Symbolic name. |
293 | * |
294 | * |
294 | * @return New thread's structure on success, NULL on failure. |
295 | * @return New thread's structure on success, NULL on failure. |
295 | * |
296 | * |
296 | */ |
297 | */ |
297 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
298 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
298 | { |
299 | { |
299 | thread_t *t; |
300 | thread_t *t; |
300 | ipl_t ipl; |
301 | ipl_t ipl; |
301 | 302 | ||
302 | t = (thread_t *) slab_alloc(thread_slab, 0); |
303 | t = (thread_t *) slab_alloc(thread_slab, 0); |
303 | if (!t) |
304 | if (!t) |
304 | return NULL; |
305 | return NULL; |
305 | 306 | ||
306 | /* Not needed, but good for debugging */ |
307 | /* Not needed, but good for debugging */ |
307 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
308 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
308 | 309 | ||
309 | ipl = interrupts_disable(); |
310 | ipl = interrupts_disable(); |
310 | spinlock_lock(&tidlock); |
311 | spinlock_lock(&tidlock); |
311 | t->tid = ++last_tid; |
312 | t->tid = ++last_tid; |
312 | spinlock_unlock(&tidlock); |
313 | spinlock_unlock(&tidlock); |
313 | interrupts_restore(ipl); |
314 | interrupts_restore(ipl); |
314 | 315 | ||
315 | context_save(&t->saved_context); |
316 | context_save(&t->saved_context); |
316 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
317 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
317 | 318 | ||
318 | the_initialize((the_t *) t->kstack); |
319 | the_initialize((the_t *) t->kstack); |
319 | 320 | ||
320 | ipl = interrupts_disable(); |
321 | ipl = interrupts_disable(); |
321 | t->saved_context.ipl = interrupts_read(); |
322 | t->saved_context.ipl = interrupts_read(); |
322 | interrupts_restore(ipl); |
323 | interrupts_restore(ipl); |
323 | 324 | ||
324 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
325 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
325 | 326 | ||
326 | t->thread_code = func; |
327 | t->thread_code = func; |
327 | t->thread_arg = arg; |
328 | t->thread_arg = arg; |
328 | t->ticks = -1; |
329 | t->ticks = -1; |
329 | t->cycles = 0; |
330 | t->cycles = 0; |
330 | t->priority = -1; /* start in rq[0] */ |
331 | t->priority = -1; /* start in rq[0] */ |
331 | t->cpu = NULL; |
332 | t->cpu = NULL; |
332 | t->flags = flags; |
333 | t->flags = flags; |
333 | t->state = Entering; |
334 | t->state = Entering; |
334 | t->call_me = NULL; |
335 | t->call_me = NULL; |
335 | t->call_me_with = NULL; |
336 | t->call_me_with = NULL; |
336 | 337 | ||
337 | timeout_initialize(&t->sleep_timeout); |
338 | timeout_initialize(&t->sleep_timeout); |
338 | t->sleep_interruptible = false; |
339 | t->sleep_interruptible = false; |
339 | t->sleep_queue = NULL; |
340 | t->sleep_queue = NULL; |
340 | t->timeout_pending = 0; |
341 | t->timeout_pending = 0; |
341 | 342 | ||
342 | t->in_copy_from_uspace = false; |
343 | t->in_copy_from_uspace = false; |
343 | t->in_copy_to_uspace = false; |
344 | t->in_copy_to_uspace = false; |
344 | 345 | ||
345 | t->interrupted = false; |
346 | t->interrupted = false; |
346 | t->join_type = None; |
347 | t->join_type = None; |
347 | t->detached = false; |
348 | t->detached = false; |
348 | waitq_initialize(&t->join_wq); |
349 | waitq_initialize(&t->join_wq); |
349 | 350 | ||
350 | t->rwlock_holder_type = RWLOCK_NONE; |
351 | t->rwlock_holder_type = RWLOCK_NONE; |
351 | 352 | ||
352 | t->task = task; |
353 | t->task = task; |
353 | 354 | ||
354 | t->fpu_context_exists = 0; |
355 | t->fpu_context_exists = 0; |
355 | t->fpu_context_engaged = 0; |
356 | t->fpu_context_engaged = 0; |
356 | 357 | ||
357 | thread_create_arch(t); /* might depend on previous initialization */ |
358 | thread_create_arch(t); /* might depend on previous initialization */ |
358 | 359 | ||
359 | /* |
360 | /* |
360 | * Attach to the containing task. |
361 | * Attach to the containing task. |
361 | */ |
362 | */ |
362 | ipl = interrupts_disable(); |
363 | ipl = interrupts_disable(); |
363 | spinlock_lock(&task->lock); |
364 | spinlock_lock(&task->lock); |
364 | if (!task->accept_new_threads) { |
365 | if (!task->accept_new_threads) { |
365 | spinlock_unlock(&task->lock); |
366 | spinlock_unlock(&task->lock); |
366 | slab_free(thread_slab, t); |
367 | slab_free(thread_slab, t); |
367 | interrupts_restore(ipl); |
368 | interrupts_restore(ipl); |
368 | return NULL; |
369 | return NULL; |
369 | } |
370 | } |
370 | list_append(&t->th_link, &task->th_head); |
371 | list_append(&t->th_link, &task->th_head); |
371 | if (task->refcount++ == 0) |
372 | if (task->refcount++ == 0) |
372 | task->main_thread = t; |
373 | task->main_thread = t; |
373 | spinlock_unlock(&task->lock); |
374 | spinlock_unlock(&task->lock); |
374 | 375 | ||
375 | /* |
376 | /* |
376 | * Register this thread in the system-wide list. |
377 | * Register this thread in the system-wide list. |
377 | */ |
378 | */ |
378 | spinlock_lock(&threads_lock); |
379 | spinlock_lock(&threads_lock); |
379 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
380 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
380 | spinlock_unlock(&threads_lock); |
381 | spinlock_unlock(&threads_lock); |
381 | 382 | ||
382 | interrupts_restore(ipl); |
383 | interrupts_restore(ipl); |
383 | 384 | ||
384 | return t; |
385 | return t; |
385 | } |
386 | } |
386 | 387 | ||
387 | /** Terminate thread. |
388 | /** Terminate thread. |
388 | * |
389 | * |
389 | * End current thread execution and switch it to the exiting |
390 | * End current thread execution and switch it to the exiting |
390 | * state. All pending timeouts are executed. |
391 | * state. All pending timeouts are executed. |
391 | * |
392 | * |
392 | */ |
393 | */ |
393 | void thread_exit(void) |
394 | void thread_exit(void) |
394 | { |
395 | { |
395 | ipl_t ipl; |
396 | ipl_t ipl; |
396 | 397 | ||
397 | restart: |
398 | restart: |
398 | ipl = interrupts_disable(); |
399 | ipl = interrupts_disable(); |
399 | spinlock_lock(&THREAD->lock); |
400 | spinlock_lock(&THREAD->lock); |
400 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
401 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
401 | spinlock_unlock(&THREAD->lock); |
402 | spinlock_unlock(&THREAD->lock); |
402 | interrupts_restore(ipl); |
403 | interrupts_restore(ipl); |
403 | goto restart; |
404 | goto restart; |
404 | } |
405 | } |
405 | THREAD->state = Exiting; |
406 | THREAD->state = Exiting; |
406 | spinlock_unlock(&THREAD->lock); |
407 | spinlock_unlock(&THREAD->lock); |
407 | scheduler(); |
408 | scheduler(); |
408 | 409 | ||
409 | /* Not reached */ |
410 | /* Not reached */ |
410 | while (1) |
411 | while (1) |
411 | ; |
412 | ; |
412 | } |
413 | } |
413 | 414 | ||
414 | 415 | ||
415 | /** Thread sleep |
416 | /** Thread sleep |
416 | * |
417 | * |
417 | * Suspend execution of the current thread. |
418 | * Suspend execution of the current thread. |
418 | * |
419 | * |
419 | * @param sec Number of seconds to sleep. |
420 | * @param sec Number of seconds to sleep. |
420 | * |
421 | * |
421 | */ |
422 | */ |
422 | void thread_sleep(uint32_t sec) |
423 | void thread_sleep(uint32_t sec) |
423 | { |
424 | { |
424 | thread_usleep(sec*1000000); |
425 | thread_usleep(sec*1000000); |
425 | } |
426 | } |
426 | 427 | ||
427 | /** Wait for another thread to exit. |
428 | /** Wait for another thread to exit. |
428 | * |
429 | * |
429 | * @param t Thread to join on exit. |
430 | * @param t Thread to join on exit. |
430 | * @param usec Timeout in microseconds. |
431 | * @param usec Timeout in microseconds. |
431 | * @param flags Mode of operation. |
432 | * @param flags Mode of operation. |
432 | * |
433 | * |
433 | * @return An error code from errno.h or an error code from synch.h. |
434 | * @return An error code from errno.h or an error code from synch.h. |
434 | */ |
435 | */ |
435 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
436 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
436 | { |
437 | { |
437 | ipl_t ipl; |
438 | ipl_t ipl; |
438 | int rc; |
439 | int rc; |
439 | 440 | ||
440 | if (t == THREAD) |
441 | if (t == THREAD) |
441 | return EINVAL; |
442 | return EINVAL; |
442 | 443 | ||
443 | /* |
444 | /* |
444 | * Since thread join can only be called once on an undetached thread, |
445 | * Since thread join can only be called once on an undetached thread, |
445 | * the thread pointer is guaranteed to be still valid. |
446 | * the thread pointer is guaranteed to be still valid. |
446 | */ |
447 | */ |
447 | 448 | ||
448 | ipl = interrupts_disable(); |
449 | ipl = interrupts_disable(); |
449 | spinlock_lock(&t->lock); |
450 | spinlock_lock(&t->lock); |
450 | ASSERT(!t->detached); |
451 | ASSERT(!t->detached); |
451 | spinlock_unlock(&t->lock); |
452 | spinlock_unlock(&t->lock); |
452 | interrupts_restore(ipl); |
453 | interrupts_restore(ipl); |
453 | 454 | ||
454 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
455 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
455 | 456 | ||
456 | return rc; |
457 | return rc; |
457 | } |
458 | } |
458 | 459 | ||
459 | /** Detach thread. |
460 | /** Detach thread. |
460 | * |
461 | * |
461 | * Mark the thread as detached, if the thread is already in the Undead state, |
462 | * Mark the thread as detached, if the thread is already in the Undead state, |
462 | * deallocate its resources. |
463 | * deallocate its resources. |
463 | * |
464 | * |
464 | * @param t Thread to be detached. |
465 | * @param t Thread to be detached. |
465 | */ |
466 | */ |
466 | void thread_detach(thread_t *t) |
467 | void thread_detach(thread_t *t) |
467 | { |
468 | { |
468 | ipl_t ipl; |
469 | ipl_t ipl; |
469 | 470 | ||
470 | /* |
471 | /* |
471 | * Since the thread is expected to not be already detached, |
472 | * Since the thread is expected to not be already detached, |
472 | * pointer to it must be still valid. |
473 | * pointer to it must be still valid. |
473 | */ |
474 | */ |
474 | ipl = interrupts_disable(); |
475 | ipl = interrupts_disable(); |
475 | spinlock_lock(&t->lock); |
476 | spinlock_lock(&t->lock); |
476 | ASSERT(!t->detached); |
477 | ASSERT(!t->detached); |
477 | if (t->state == Undead) { |
478 | if (t->state == Undead) { |
478 | thread_destroy(t); /* unlocks &t->lock */ |
479 | thread_destroy(t); /* unlocks &t->lock */ |
479 | interrupts_restore(ipl); |
480 | interrupts_restore(ipl); |
480 | return; |
481 | return; |
481 | } else { |
482 | } else { |
482 | t->detached = true; |
483 | t->detached = true; |
483 | } |
484 | } |
484 | spinlock_unlock(&t->lock); |
485 | spinlock_unlock(&t->lock); |
485 | interrupts_restore(ipl); |
486 | interrupts_restore(ipl); |
486 | } |
487 | } |
487 | 488 | ||
488 | /** Thread usleep |
489 | /** Thread usleep |
489 | * |
490 | * |
490 | * Suspend execution of the current thread. |
491 | * Suspend execution of the current thread. |
491 | * |
492 | * |
492 | * @param usec Number of microseconds to sleep. |
493 | * @param usec Number of microseconds to sleep. |
493 | * |
494 | * |
494 | */ |
495 | */ |
495 | void thread_usleep(uint32_t usec) |
496 | void thread_usleep(uint32_t usec) |
496 | { |
497 | { |
497 | waitq_t wq; |
498 | waitq_t wq; |
498 | 499 | ||
499 | waitq_initialize(&wq); |
500 | waitq_initialize(&wq); |
500 | 501 | ||
501 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
502 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
502 | } |
503 | } |
503 | 504 | ||
504 | /** Register thread out-of-context invocation |
505 | /** Register thread out-of-context invocation |
505 | * |
506 | * |
506 | * Register a function and its argument to be executed |
507 | * Register a function and its argument to be executed |
507 | * on next context switch to the current thread. |
508 | * on next context switch to the current thread. |
508 | * |
509 | * |
509 | * @param call_me Out-of-context function. |
510 | * @param call_me Out-of-context function. |
510 | * @param call_me_with Out-of-context function argument. |
511 | * @param call_me_with Out-of-context function argument. |
511 | * |
512 | * |
512 | */ |
513 | */ |
513 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
514 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
514 | { |
515 | { |
515 | ipl_t ipl; |
516 | ipl_t ipl; |
516 | 517 | ||
517 | ipl = interrupts_disable(); |
518 | ipl = interrupts_disable(); |
518 | spinlock_lock(&THREAD->lock); |
519 | spinlock_lock(&THREAD->lock); |
519 | THREAD->call_me = call_me; |
520 | THREAD->call_me = call_me; |
520 | THREAD->call_me_with = call_me_with; |
521 | THREAD->call_me_with = call_me_with; |
521 | spinlock_unlock(&THREAD->lock); |
522 | spinlock_unlock(&THREAD->lock); |
522 | interrupts_restore(ipl); |
523 | interrupts_restore(ipl); |
523 | } |
524 | } |
524 | 525 | ||
525 | /** Print list of threads debug info */ |
526 | /** Print list of threads debug info */ |
526 | void thread_print_list(void) |
527 | void thread_print_list(void) |
527 | { |
528 | { |
528 | link_t *cur; |
529 | link_t *cur; |
529 | ipl_t ipl; |
530 | ipl_t ipl; |
530 | 531 | ||
531 | /* Messing with thread structures, avoid deadlock */ |
532 | /* Messing with thread structures, avoid deadlock */ |
532 | ipl = interrupts_disable(); |
533 | ipl = interrupts_disable(); |
533 | spinlock_lock(&threads_lock); |
534 | spinlock_lock(&threads_lock); |
534 | 535 | ||
535 | printf("tid name address state task ctx code stack cycles cpu kst wq\n"); |
536 | printf("tid name address state task ctx code stack cycles cpu kst wq\n"); |
536 | printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n"); |
537 | printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n"); |
537 | 538 | ||
538 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
539 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
539 | btree_node_t *node; |
540 | btree_node_t *node; |
540 | int i; |
541 | int i; |
541 | 542 | ||
542 | node = list_get_instance(cur, btree_node_t, leaf_link); |
543 | node = list_get_instance(cur, btree_node_t, leaf_link); |
543 | for (i = 0; i < node->keys; i++) { |
544 | for (i = 0; i < node->keys; i++) { |
544 | thread_t *t; |
545 | thread_t *t; |
545 | 546 | ||
546 | t = (thread_t *) node->value[i]; |
547 | t = (thread_t *) node->value[i]; |
547 | 548 | ||
548 | uint64_t cycles; |
549 | uint64_t cycles; |
549 | char suffix; |
550 | char suffix; |
550 | 551 | ||
551 | if (t->cycles > 1000000000000000000LL) { |
552 | if (t->cycles > 1000000000000000000LL) { |
552 | cycles = t->cycles / 1000000000000000000LL; |
553 | cycles = t->cycles / 1000000000000000000LL; |
553 | suffix = 'E'; |
554 | suffix = 'E'; |
554 | } else if (t->cycles > 1000000000000LL) { |
555 | } else if (t->cycles > 1000000000000LL) { |
555 | cycles = t->cycles / 1000000000000LL; |
556 | cycles = t->cycles / 1000000000000LL; |
556 | suffix = 'T'; |
557 | suffix = 'T'; |
557 | } else if (t->cycles > 1000000LL) { |
558 | } else if (t->cycles > 1000000LL) { |
558 | cycles = t->cycles / 1000000LL; |
559 | cycles = t->cycles / 1000000LL; |
559 | suffix = 'M'; |
560 | suffix = 'M'; |
560 | } else { |
561 | } else { |
561 | cycles = t->cycles; |
562 | cycles = t->cycles; |
562 | suffix = ' '; |
563 | suffix = ' '; |
563 | } |
564 | } |
564 | 565 | ||
565 | printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix); |
566 | printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix); |
566 | 567 | ||
567 | if (t->cpu) |
568 | if (t->cpu) |
568 | printf("%-4zd", t->cpu->id); |
569 | printf("%-4zd", t->cpu->id); |
569 | else |
570 | else |
570 | printf("none"); |
571 | printf("none"); |
571 | 572 | ||
572 | if (t->state == Sleeping) |
573 | if (t->state == Sleeping) |
573 | printf(" %#10zx %#10zx", t->kstack, t->sleep_queue); |
574 | printf(" %#10zx %#10zx", t->kstack, t->sleep_queue); |
574 | 575 | ||
575 | printf("\n"); |
576 | printf("\n"); |
576 | } |
577 | } |
577 | } |
578 | } |
578 | 579 | ||
579 | spinlock_unlock(&threads_lock); |
580 | spinlock_unlock(&threads_lock); |
580 | interrupts_restore(ipl); |
581 | interrupts_restore(ipl); |
581 | } |
582 | } |
582 | 583 | ||
583 | /** Check whether thread exists. |
584 | /** Check whether thread exists. |
584 | * |
585 | * |
585 | * Note that threads_lock must be already held and |
586 | * Note that threads_lock must be already held and |
586 | * interrupts must be already disabled. |
587 | * interrupts must be already disabled. |
587 | * |
588 | * |
588 | * @param t Pointer to thread. |
589 | * @param t Pointer to thread. |
589 | * |
590 | * |
590 | * @return True if thread t is known to the system, false otherwise. |
591 | * @return True if thread t is known to the system, false otherwise. |
591 | */ |
592 | */ |
592 | bool thread_exists(thread_t *t) |
593 | bool thread_exists(thread_t *t) |
593 | { |
594 | { |
594 | btree_node_t *leaf; |
595 | btree_node_t *leaf; |
595 | 596 | ||
596 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
597 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
597 | } |
598 | } |
598 | 599 | ||
599 | 600 | ||
600 | /** Update accounting of current thread. |
601 | /** Update accounting of current thread. |
601 | * |
602 | * |
602 | * Note that thread_lock on THREAD must be already held and |
603 | * Note that thread_lock on THREAD must be already held and |
603 | * interrupts must be already disabled. |
604 | * interrupts must be already disabled. |
604 | * |
605 | * |
605 | * @param t Pointer to thread. |
606 | * @param t Pointer to thread. |
606 | * |
607 | * |
607 | */ |
608 | */ |
608 | void thread_update_accounting(void) |
609 | void thread_update_accounting(void) |
609 | { |
610 | { |
610 | uint64_t time = get_cycle(); |
611 | uint64_t time = get_cycle(); |
611 | THREAD->cycles += time - THREAD->last_cycle; |
612 | THREAD->cycles += time - THREAD->last_cycle; |
612 | THREAD->last_cycle = time; |
613 | THREAD->last_cycle = time; |
613 | } |
614 | } |
614 | 615 | ||
615 | /** Process syscall to create new thread. |
616 | /** Process syscall to create new thread. |
616 | * |
617 | * |
617 | */ |
618 | */ |
618 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
619 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
619 | { |
620 | { |
620 | thread_t *t; |
621 | thread_t *t; |
621 | char namebuf[THREAD_NAME_BUFLEN]; |
622 | char namebuf[THREAD_NAME_BUFLEN]; |
622 | uspace_arg_t *kernel_uarg; |
623 | uspace_arg_t *kernel_uarg; |
623 | uint32_t tid; |
624 | uint32_t tid; |
624 | int rc; |
625 | int rc; |
625 | 626 | ||
626 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
627 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
627 | if (rc != 0) |
628 | if (rc != 0) |
628 | return (unative_t) rc; |
629 | return (unative_t) rc; |
629 | 630 | ||
630 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
631 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
631 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
632 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
632 | if (rc != 0) { |
633 | if (rc != 0) { |
633 | free(kernel_uarg); |
634 | free(kernel_uarg); |
634 | return (unative_t) rc; |
635 | return (unative_t) rc; |
635 | } |
636 | } |
636 | 637 | ||
637 | if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
638 | if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
638 | tid = t->tid; |
639 | tid = t->tid; |
639 | thread_ready(t); |
640 | thread_ready(t); |
640 | return (unative_t) tid; |
641 | return (unative_t) tid; |
641 | } else { |
642 | } else { |
642 | free(kernel_uarg); |
643 | free(kernel_uarg); |
643 | } |
644 | } |
644 | 645 | ||
645 | return (unative_t) ENOMEM; |
646 | return (unative_t) ENOMEM; |
646 | } |
647 | } |
647 | 648 | ||
648 | /** Process syscall to terminate thread. |
649 | /** Process syscall to terminate thread. |
649 | * |
650 | * |
650 | */ |
651 | */ |
651 | unative_t sys_thread_exit(int uspace_status) |
652 | unative_t sys_thread_exit(int uspace_status) |
652 | { |
653 | { |
653 | thread_exit(); |
654 | thread_exit(); |
654 | /* Unreachable */ |
655 | /* Unreachable */ |
655 | return 0; |
656 | return 0; |
656 | } |
657 | } |
657 | 658 | ||
658 | /** @} |
659 | /** @} |
659 | */ |
660 | */ |
660 | 661 |