Rev 2032 | Rev 2042 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2032 | Rev 2039 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericproc |
29 | /** @addtogroup genericproc |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Thread management functions. |
35 | * @brief Thread management functions. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include <proc/scheduler.h> |
38 | #include <proc/scheduler.h> |
39 | #include <proc/thread.h> |
39 | #include <proc/thread.h> |
40 | #include <proc/task.h> |
40 | #include <proc/task.h> |
41 | #include <proc/uarg.h> |
41 | #include <proc/uarg.h> |
42 | #include <mm/frame.h> |
42 | #include <mm/frame.h> |
43 | #include <mm/page.h> |
43 | #include <mm/page.h> |
44 | #include <arch/asm.h> |
44 | #include <arch/asm.h> |
45 | #include <arch/cycle.h> |
45 | #include <arch/cycle.h> |
46 | #include <arch.h> |
46 | #include <arch.h> |
47 | #include <synch/synch.h> |
47 | #include <synch/synch.h> |
48 | #include <synch/spinlock.h> |
48 | #include <synch/spinlock.h> |
49 | #include <synch/waitq.h> |
49 | #include <synch/waitq.h> |
50 | #include <synch/rwlock.h> |
50 | #include <synch/rwlock.h> |
51 | #include <cpu.h> |
51 | #include <cpu.h> |
52 | #include <func.h> |
52 | #include <func.h> |
53 | #include <context.h> |
53 | #include <context.h> |
54 | #include <adt/btree.h> |
54 | #include <adt/btree.h> |
55 | #include <adt/list.h> |
55 | #include <adt/list.h> |
56 | #include <typedefs.h> |
56 | #include <typedefs.h> |
57 | #include <time/clock.h> |
57 | #include <time/clock.h> |
58 | #include <config.h> |
58 | #include <config.h> |
59 | #include <arch/interrupt.h> |
59 | #include <arch/interrupt.h> |
60 | #include <smp/ipi.h> |
60 | #include <smp/ipi.h> |
61 | #include <arch/faddr.h> |
61 | #include <arch/faddr.h> |
62 | #include <atomic.h> |
62 | #include <atomic.h> |
63 | #include <memstr.h> |
63 | #include <memstr.h> |
64 | #include <print.h> |
64 | #include <print.h> |
65 | #include <mm/slab.h> |
65 | #include <mm/slab.h> |
66 | #include <debug.h> |
66 | #include <debug.h> |
67 | #include <main/uinit.h> |
67 | #include <main/uinit.h> |
68 | #include <syscall/copy.h> |
68 | #include <syscall/copy.h> |
69 | #include <errno.h> |
69 | #include <errno.h> |
70 | 70 | ||
71 | 71 | ||
72 | /** Thread states */ |
72 | /** Thread states */ |
73 | char *thread_states[] = { |
73 | char *thread_states[] = { |
74 | "Invalid", |
74 | "Invalid", |
75 | "Running", |
75 | "Running", |
76 | "Sleeping", |
76 | "Sleeping", |
77 | "Ready", |
77 | "Ready", |
78 | "Entering", |
78 | "Entering", |
79 | "Exiting", |
79 | "Exiting", |
80 | "Undead" |
80 | "Undead" |
81 | }; |
81 | }; |
82 | 82 | ||
83 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
83 | /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
84 | SPINLOCK_INITIALIZE(threads_lock); |
84 | SPINLOCK_INITIALIZE(threads_lock); |
85 | 85 | ||
86 | /** B+tree of all threads. |
86 | /** B+tree of all threads. |
87 | * |
87 | * |
88 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
88 | * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
89 | * as the threads_lock is held. |
89 | * as the threads_lock is held. |
90 | */ |
90 | */ |
91 | btree_t threads_btree; |
91 | btree_t threads_btree; |
92 | 92 | ||
93 | SPINLOCK_INITIALIZE(tidlock); |
93 | SPINLOCK_INITIALIZE(tidlock); |
94 | uint32_t last_tid = 0; |
94 | uint32_t last_tid = 0; |
95 | 95 | ||
96 | static slab_cache_t *thread_slab; |
96 | static slab_cache_t *thread_slab; |
97 | #ifdef ARCH_HAS_FPU |
97 | #ifdef ARCH_HAS_FPU |
98 | slab_cache_t *fpu_context_slab; |
98 | slab_cache_t *fpu_context_slab; |
99 | #endif |
99 | #endif |
100 | 100 | ||
101 | /** Thread wrapper |
101 | /** Thread wrapper |
102 | * |
102 | * |
103 | * This wrapper is provided to ensure that every thread |
103 | * This wrapper is provided to ensure that every thread |
104 | * makes a call to thread_exit() when its implementing |
104 | * makes a call to thread_exit() when its implementing |
105 | * function returns. |
105 | * function returns. |
106 | * |
106 | * |
107 | * interrupts_disable() is assumed. |
107 | * interrupts_disable() is assumed. |
108 | * |
108 | * |
109 | */ |
109 | */ |
110 | static void cushion(void) |
110 | static void cushion(void) |
111 | { |
111 | { |
112 | void (*f)(void *) = THREAD->thread_code; |
112 | void (*f)(void *) = THREAD->thread_code; |
113 | void *arg = THREAD->thread_arg; |
113 | void *arg = THREAD->thread_arg; |
114 | THREAD->last_cycle = get_cycle(); |
114 | THREAD->last_cycle = get_cycle(); |
115 | 115 | ||
116 | /* this is where each thread wakes up after its creation */ |
116 | /* This is where each thread wakes up after its creation */ |
117 | spinlock_unlock(&THREAD->lock); |
117 | spinlock_unlock(&THREAD->lock); |
118 | interrupts_enable(); |
118 | interrupts_enable(); |
119 | 119 | ||
120 | f(arg); |
120 | f(arg); |
- | 121 | ||
- | 122 | /* Accumulate accounting to the task */ |
|
- | 123 | ipl_t ipl = interrupts_disable(); |
|
- | 124 | ||
- | 125 | spinlock_lock(&THREAD->lock); |
|
- | 126 | thread_update_accounting(); |
|
- | 127 | uint64_t cycles = THREAD->cycles; |
|
- | 128 | THREAD->cycles = 0; |
|
- | 129 | spinlock_unlock(&THREAD->lock); |
|
- | 130 | ||
- | 131 | spinlock_lock(&TASK->lock); |
|
- | 132 | TASK->cycles += cycles; |
|
- | 133 | spinlock_unlock(&TASK->lock); |
|
- | 134 | ||
- | 135 | interrupts_restore(ipl); |
|
- | 136 | ||
121 | thread_exit(); |
137 | thread_exit(); |
122 | /* not reached */ |
138 | /* not reached */ |
123 | } |
139 | } |
124 | 140 | ||
125 | /** Initialization and allocation for thread_t structure */ |
141 | /** Initialization and allocation for thread_t structure */ |
126 | static int thr_constructor(void *obj, int kmflags) |
142 | static int thr_constructor(void *obj, int kmflags) |
127 | { |
143 | { |
128 | thread_t *t = (thread_t *) obj; |
144 | thread_t *t = (thread_t *) obj; |
129 | 145 | ||
130 | spinlock_initialize(&t->lock, "thread_t_lock"); |
146 | spinlock_initialize(&t->lock, "thread_t_lock"); |
131 | link_initialize(&t->rq_link); |
147 | link_initialize(&t->rq_link); |
132 | link_initialize(&t->wq_link); |
148 | link_initialize(&t->wq_link); |
133 | link_initialize(&t->th_link); |
149 | link_initialize(&t->th_link); |
134 | 150 | ||
135 | /* call the architecture-specific part of the constructor */ |
151 | /* call the architecture-specific part of the constructor */ |
136 | thr_constructor_arch(t); |
152 | thr_constructor_arch(t); |
137 | 153 | ||
138 | #ifdef ARCH_HAS_FPU |
154 | #ifdef ARCH_HAS_FPU |
139 | # ifdef CONFIG_FPU_LAZY |
155 | # ifdef CONFIG_FPU_LAZY |
140 | t->saved_fpu_context = NULL; |
156 | t->saved_fpu_context = NULL; |
141 | # else |
157 | # else |
142 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
158 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
143 | if (!t->saved_fpu_context) |
159 | if (!t->saved_fpu_context) |
144 | return -1; |
160 | return -1; |
145 | # endif |
161 | # endif |
146 | #endif |
162 | #endif |
147 | 163 | ||
148 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
164 | t->kstack = frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); |
149 | if (! t->kstack) { |
165 | if (! t->kstack) { |
150 | #ifdef ARCH_HAS_FPU |
166 | #ifdef ARCH_HAS_FPU |
151 | if (t->saved_fpu_context) |
167 | if (t->saved_fpu_context) |
152 | slab_free(fpu_context_slab,t->saved_fpu_context); |
168 | slab_free(fpu_context_slab,t->saved_fpu_context); |
153 | #endif |
169 | #endif |
154 | return -1; |
170 | return -1; |
155 | } |
171 | } |
156 | 172 | ||
157 | return 0; |
173 | return 0; |
158 | } |
174 | } |
159 | 175 | ||
160 | /** Destruction of thread_t object */ |
176 | /** Destruction of thread_t object */ |
161 | static int thr_destructor(void *obj) |
177 | static int thr_destructor(void *obj) |
162 | { |
178 | { |
163 | thread_t *t = (thread_t *) obj; |
179 | thread_t *t = (thread_t *) obj; |
164 | 180 | ||
165 | /* call the architecture-specific part of the destructor */ |
181 | /* call the architecture-specific part of the destructor */ |
166 | thr_destructor_arch(t); |
182 | thr_destructor_arch(t); |
167 | 183 | ||
168 | frame_free(KA2PA(t->kstack)); |
184 | frame_free(KA2PA(t->kstack)); |
169 | #ifdef ARCH_HAS_FPU |
185 | #ifdef ARCH_HAS_FPU |
170 | if (t->saved_fpu_context) |
186 | if (t->saved_fpu_context) |
171 | slab_free(fpu_context_slab,t->saved_fpu_context); |
187 | slab_free(fpu_context_slab,t->saved_fpu_context); |
172 | #endif |
188 | #endif |
173 | return 1; /* One page freed */ |
189 | return 1; /* One page freed */ |
174 | } |
190 | } |
175 | 191 | ||
176 | /** Initialize threads |
192 | /** Initialize threads |
177 | * |
193 | * |
178 | * Initialize kernel threads support. |
194 | * Initialize kernel threads support. |
179 | * |
195 | * |
180 | */ |
196 | */ |
181 | void thread_init(void) |
197 | void thread_init(void) |
182 | { |
198 | { |
183 | THREAD = NULL; |
199 | THREAD = NULL; |
184 | atomic_set(&nrdy,0); |
200 | atomic_set(&nrdy,0); |
185 | thread_slab = slab_cache_create("thread_slab", |
201 | thread_slab = slab_cache_create("thread_slab", |
186 | sizeof(thread_t),0, |
202 | sizeof(thread_t),0, |
187 | thr_constructor, thr_destructor, 0); |
203 | thr_constructor, thr_destructor, 0); |
188 | #ifdef ARCH_HAS_FPU |
204 | #ifdef ARCH_HAS_FPU |
189 | fpu_context_slab = slab_cache_create("fpu_slab", |
205 | fpu_context_slab = slab_cache_create("fpu_slab", |
190 | sizeof(fpu_context_t), |
206 | sizeof(fpu_context_t), |
191 | FPU_CONTEXT_ALIGN, |
207 | FPU_CONTEXT_ALIGN, |
192 | NULL, NULL, 0); |
208 | NULL, NULL, 0); |
193 | #endif |
209 | #endif |
194 | 210 | ||
195 | btree_create(&threads_btree); |
211 | btree_create(&threads_btree); |
196 | } |
212 | } |
197 | 213 | ||
198 | /** Make thread ready |
214 | /** Make thread ready |
199 | * |
215 | * |
200 | * Switch thread t to the ready state. |
216 | * Switch thread t to the ready state. |
201 | * |
217 | * |
202 | * @param t Thread to make ready. |
218 | * @param t Thread to make ready. |
203 | * |
219 | * |
204 | */ |
220 | */ |
205 | void thread_ready(thread_t *t) |
221 | void thread_ready(thread_t *t) |
206 | { |
222 | { |
207 | cpu_t *cpu; |
223 | cpu_t *cpu; |
208 | runq_t *r; |
224 | runq_t *r; |
209 | ipl_t ipl; |
225 | ipl_t ipl; |
210 | int i, avg; |
226 | int i, avg; |
211 | 227 | ||
212 | ipl = interrupts_disable(); |
228 | ipl = interrupts_disable(); |
213 | 229 | ||
214 | spinlock_lock(&t->lock); |
230 | spinlock_lock(&t->lock); |
215 | 231 | ||
216 | ASSERT(! (t->state == Ready)); |
232 | ASSERT(! (t->state == Ready)); |
217 | 233 | ||
218 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
234 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
219 | 235 | ||
220 | cpu = CPU; |
236 | cpu = CPU; |
221 | if (t->flags & THREAD_FLAG_WIRED) { |
237 | if (t->flags & THREAD_FLAG_WIRED) { |
222 | cpu = t->cpu; |
238 | cpu = t->cpu; |
223 | } |
239 | } |
224 | t->state = Ready; |
240 | t->state = Ready; |
225 | spinlock_unlock(&t->lock); |
241 | spinlock_unlock(&t->lock); |
226 | 242 | ||
227 | /* |
243 | /* |
228 | * Append t to respective ready queue on respective processor. |
244 | * Append t to respective ready queue on respective processor. |
229 | */ |
245 | */ |
230 | r = &cpu->rq[i]; |
246 | r = &cpu->rq[i]; |
231 | spinlock_lock(&r->lock); |
247 | spinlock_lock(&r->lock); |
232 | list_append(&t->rq_link, &r->rq_head); |
248 | list_append(&t->rq_link, &r->rq_head); |
233 | r->n++; |
249 | r->n++; |
234 | spinlock_unlock(&r->lock); |
250 | spinlock_unlock(&r->lock); |
235 | 251 | ||
236 | atomic_inc(&nrdy); |
252 | atomic_inc(&nrdy); |
237 | avg = atomic_get(&nrdy) / config.cpu_active; |
253 | avg = atomic_get(&nrdy) / config.cpu_active; |
238 | atomic_inc(&cpu->nrdy); |
254 | atomic_inc(&cpu->nrdy); |
239 | 255 | ||
240 | interrupts_restore(ipl); |
256 | interrupts_restore(ipl); |
241 | } |
257 | } |
242 | 258 | ||
243 | /** Destroy thread memory structure |
259 | /** Destroy thread memory structure |
244 | * |
260 | * |
245 | * Detach thread from all queues, cpus etc. and destroy it. |
261 | * Detach thread from all queues, cpus etc. and destroy it. |
246 | * |
262 | * |
247 | * Assume thread->lock is held!! |
263 | * Assume thread->lock is held!! |
248 | */ |
264 | */ |
249 | void thread_destroy(thread_t *t) |
265 | void thread_destroy(thread_t *t) |
250 | { |
266 | { |
251 | bool destroy_task = false; |
267 | bool destroy_task = false; |
252 | 268 | ||
253 | ASSERT(t->state == Exiting || t->state == Undead); |
269 | ASSERT(t->state == Exiting || t->state == Undead); |
254 | ASSERT(t->task); |
270 | ASSERT(t->task); |
255 | ASSERT(t->cpu); |
271 | ASSERT(t->cpu); |
256 | 272 | ||
257 | spinlock_lock(&t->cpu->lock); |
273 | spinlock_lock(&t->cpu->lock); |
258 | if(t->cpu->fpu_owner==t) |
274 | if(t->cpu->fpu_owner==t) |
259 | t->cpu->fpu_owner=NULL; |
275 | t->cpu->fpu_owner=NULL; |
260 | spinlock_unlock(&t->cpu->lock); |
276 | spinlock_unlock(&t->cpu->lock); |
261 | 277 | ||
262 | spinlock_unlock(&t->lock); |
278 | spinlock_unlock(&t->lock); |
263 | 279 | ||
264 | spinlock_lock(&threads_lock); |
280 | spinlock_lock(&threads_lock); |
265 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
281 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
266 | spinlock_unlock(&threads_lock); |
282 | spinlock_unlock(&threads_lock); |
267 | 283 | ||
268 | /* |
284 | /* |
269 | * Detach from the containing task. |
285 | * Detach from the containing task. |
270 | */ |
286 | */ |
271 | spinlock_lock(&t->task->lock); |
287 | spinlock_lock(&t->task->lock); |
272 | list_remove(&t->th_link); |
288 | list_remove(&t->th_link); |
273 | if (--t->task->refcount == 0) { |
289 | if (--t->task->refcount == 0) { |
274 | t->task->accept_new_threads = false; |
290 | t->task->accept_new_threads = false; |
275 | destroy_task = true; |
291 | destroy_task = true; |
276 | } |
292 | } |
277 | spinlock_unlock(&t->task->lock); |
293 | spinlock_unlock(&t->task->lock); |
278 | 294 | ||
279 | if (destroy_task) |
295 | if (destroy_task) |
280 | task_destroy(t->task); |
296 | task_destroy(t->task); |
281 | 297 | ||
282 | slab_free(thread_slab, t); |
298 | slab_free(thread_slab, t); |
283 | } |
299 | } |
284 | 300 | ||
285 | /** Create new thread |
301 | /** Create new thread |
286 | * |
302 | * |
287 | * Create a new thread. |
303 | * Create a new thread. |
288 | * |
304 | * |
289 | * @param func Thread's implementing function. |
305 | * @param func Thread's implementing function. |
290 | * @param arg Thread's implementing function argument. |
306 | * @param arg Thread's implementing function argument. |
291 | * @param task Task to which the thread belongs. |
307 | * @param task Task to which the thread belongs. |
292 | * @param flags Thread flags. |
308 | * @param flags Thread flags. |
293 | * @param name Symbolic name. |
309 | * @param name Symbolic name. |
294 | * |
310 | * |
295 | * @return New thread's structure on success, NULL on failure. |
311 | * @return New thread's structure on success, NULL on failure. |
296 | * |
312 | * |
297 | */ |
313 | */ |
298 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
314 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name) |
299 | { |
315 | { |
300 | thread_t *t; |
316 | thread_t *t; |
301 | ipl_t ipl; |
317 | ipl_t ipl; |
302 | 318 | ||
303 | t = (thread_t *) slab_alloc(thread_slab, 0); |
319 | t = (thread_t *) slab_alloc(thread_slab, 0); |
304 | if (!t) |
320 | if (!t) |
305 | return NULL; |
321 | return NULL; |
306 | 322 | ||
307 | /* Not needed, but good for debugging */ |
323 | /* Not needed, but good for debugging */ |
308 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
324 | memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
309 | 325 | ||
310 | ipl = interrupts_disable(); |
326 | ipl = interrupts_disable(); |
311 | spinlock_lock(&tidlock); |
327 | spinlock_lock(&tidlock); |
312 | t->tid = ++last_tid; |
328 | t->tid = ++last_tid; |
313 | spinlock_unlock(&tidlock); |
329 | spinlock_unlock(&tidlock); |
314 | interrupts_restore(ipl); |
330 | interrupts_restore(ipl); |
315 | 331 | ||
316 | context_save(&t->saved_context); |
332 | context_save(&t->saved_context); |
317 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
333 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
318 | 334 | ||
319 | the_initialize((the_t *) t->kstack); |
335 | the_initialize((the_t *) t->kstack); |
320 | 336 | ||
321 | ipl = interrupts_disable(); |
337 | ipl = interrupts_disable(); |
322 | t->saved_context.ipl = interrupts_read(); |
338 | t->saved_context.ipl = interrupts_read(); |
323 | interrupts_restore(ipl); |
339 | interrupts_restore(ipl); |
324 | 340 | ||
325 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
341 | memcpy(t->name, name, THREAD_NAME_BUFLEN); |
326 | 342 | ||
327 | t->thread_code = func; |
343 | t->thread_code = func; |
328 | t->thread_arg = arg; |
344 | t->thread_arg = arg; |
329 | t->ticks = -1; |
345 | t->ticks = -1; |
330 | t->cycles = 0; |
346 | t->cycles = 0; |
331 | t->priority = -1; /* start in rq[0] */ |
347 | t->priority = -1; /* start in rq[0] */ |
332 | t->cpu = NULL; |
348 | t->cpu = NULL; |
333 | t->flags = flags; |
349 | t->flags = flags; |
334 | t->state = Entering; |
350 | t->state = Entering; |
335 | t->call_me = NULL; |
351 | t->call_me = NULL; |
336 | t->call_me_with = NULL; |
352 | t->call_me_with = NULL; |
337 | 353 | ||
338 | timeout_initialize(&t->sleep_timeout); |
354 | timeout_initialize(&t->sleep_timeout); |
339 | t->sleep_interruptible = false; |
355 | t->sleep_interruptible = false; |
340 | t->sleep_queue = NULL; |
356 | t->sleep_queue = NULL; |
341 | t->timeout_pending = 0; |
357 | t->timeout_pending = 0; |
342 | 358 | ||
343 | t->in_copy_from_uspace = false; |
359 | t->in_copy_from_uspace = false; |
344 | t->in_copy_to_uspace = false; |
360 | t->in_copy_to_uspace = false; |
345 | 361 | ||
346 | t->interrupted = false; |
362 | t->interrupted = false; |
347 | t->join_type = None; |
363 | t->join_type = None; |
348 | t->detached = false; |
364 | t->detached = false; |
349 | waitq_initialize(&t->join_wq); |
365 | waitq_initialize(&t->join_wq); |
350 | 366 | ||
351 | t->rwlock_holder_type = RWLOCK_NONE; |
367 | t->rwlock_holder_type = RWLOCK_NONE; |
352 | 368 | ||
353 | t->task = task; |
369 | t->task = task; |
354 | 370 | ||
355 | t->fpu_context_exists = 0; |
371 | t->fpu_context_exists = 0; |
356 | t->fpu_context_engaged = 0; |
372 | t->fpu_context_engaged = 0; |
357 | 373 | ||
358 | thread_create_arch(t); /* might depend on previous initialization */ |
374 | thread_create_arch(t); /* might depend on previous initialization */ |
359 | 375 | ||
360 | /* |
376 | /* |
361 | * Attach to the containing task. |
377 | * Attach to the containing task. |
362 | */ |
378 | */ |
363 | ipl = interrupts_disable(); |
379 | ipl = interrupts_disable(); |
364 | spinlock_lock(&task->lock); |
380 | spinlock_lock(&task->lock); |
365 | if (!task->accept_new_threads) { |
381 | if (!task->accept_new_threads) { |
366 | spinlock_unlock(&task->lock); |
382 | spinlock_unlock(&task->lock); |
367 | slab_free(thread_slab, t); |
383 | slab_free(thread_slab, t); |
368 | interrupts_restore(ipl); |
384 | interrupts_restore(ipl); |
369 | return NULL; |
385 | return NULL; |
370 | } |
386 | } |
371 | list_append(&t->th_link, &task->th_head); |
387 | list_append(&t->th_link, &task->th_head); |
372 | if (task->refcount++ == 0) |
388 | if (task->refcount++ == 0) |
373 | task->main_thread = t; |
389 | task->main_thread = t; |
374 | spinlock_unlock(&task->lock); |
390 | spinlock_unlock(&task->lock); |
375 | 391 | ||
376 | /* |
392 | /* |
377 | * Register this thread in the system-wide list. |
393 | * Register this thread in the system-wide list. |
378 | */ |
394 | */ |
379 | spinlock_lock(&threads_lock); |
395 | spinlock_lock(&threads_lock); |
380 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
396 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
381 | spinlock_unlock(&threads_lock); |
397 | spinlock_unlock(&threads_lock); |
382 | 398 | ||
383 | interrupts_restore(ipl); |
399 | interrupts_restore(ipl); |
384 | 400 | ||
385 | return t; |
401 | return t; |
386 | } |
402 | } |
387 | 403 | ||
388 | /** Terminate thread. |
404 | /** Terminate thread. |
389 | * |
405 | * |
390 | * End current thread execution and switch it to the exiting |
406 | * End current thread execution and switch it to the exiting |
391 | * state. All pending timeouts are executed. |
407 | * state. All pending timeouts are executed. |
392 | * |
408 | * |
393 | */ |
409 | */ |
394 | void thread_exit(void) |
410 | void thread_exit(void) |
395 | { |
411 | { |
396 | ipl_t ipl; |
412 | ipl_t ipl; |
397 | 413 | ||
398 | restart: |
414 | restart: |
399 | ipl = interrupts_disable(); |
415 | ipl = interrupts_disable(); |
400 | spinlock_lock(&THREAD->lock); |
416 | spinlock_lock(&THREAD->lock); |
401 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
417 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
402 | spinlock_unlock(&THREAD->lock); |
418 | spinlock_unlock(&THREAD->lock); |
403 | interrupts_restore(ipl); |
419 | interrupts_restore(ipl); |
404 | goto restart; |
420 | goto restart; |
405 | } |
421 | } |
406 | THREAD->state = Exiting; |
422 | THREAD->state = Exiting; |
407 | spinlock_unlock(&THREAD->lock); |
423 | spinlock_unlock(&THREAD->lock); |
408 | scheduler(); |
424 | scheduler(); |
409 | 425 | ||
410 | /* Not reached */ |
426 | /* Not reached */ |
411 | while (1) |
427 | while (1) |
412 | ; |
428 | ; |
413 | } |
429 | } |
414 | 430 | ||
415 | 431 | ||
416 | /** Thread sleep |
432 | /** Thread sleep |
417 | * |
433 | * |
418 | * Suspend execution of the current thread. |
434 | * Suspend execution of the current thread. |
419 | * |
435 | * |
420 | * @param sec Number of seconds to sleep. |
436 | * @param sec Number of seconds to sleep. |
421 | * |
437 | * |
422 | */ |
438 | */ |
423 | void thread_sleep(uint32_t sec) |
439 | void thread_sleep(uint32_t sec) |
424 | { |
440 | { |
425 | thread_usleep(sec*1000000); |
441 | thread_usleep(sec*1000000); |
426 | } |
442 | } |
427 | 443 | ||
428 | /** Wait for another thread to exit. |
444 | /** Wait for another thread to exit. |
429 | * |
445 | * |
430 | * @param t Thread to join on exit. |
446 | * @param t Thread to join on exit. |
431 | * @param usec Timeout in microseconds. |
447 | * @param usec Timeout in microseconds. |
432 | * @param flags Mode of operation. |
448 | * @param flags Mode of operation. |
433 | * |
449 | * |
434 | * @return An error code from errno.h or an error code from synch.h. |
450 | * @return An error code from errno.h or an error code from synch.h. |
435 | */ |
451 | */ |
436 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
452 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
437 | { |
453 | { |
438 | ipl_t ipl; |
454 | ipl_t ipl; |
439 | int rc; |
455 | int rc; |
440 | 456 | ||
441 | if (t == THREAD) |
457 | if (t == THREAD) |
442 | return EINVAL; |
458 | return EINVAL; |
443 | 459 | ||
444 | /* |
460 | /* |
445 | * Since thread join can only be called once on an undetached thread, |
461 | * Since thread join can only be called once on an undetached thread, |
446 | * the thread pointer is guaranteed to be still valid. |
462 | * the thread pointer is guaranteed to be still valid. |
447 | */ |
463 | */ |
448 | 464 | ||
449 | ipl = interrupts_disable(); |
465 | ipl = interrupts_disable(); |
450 | spinlock_lock(&t->lock); |
466 | spinlock_lock(&t->lock); |
451 | ASSERT(!t->detached); |
467 | ASSERT(!t->detached); |
452 | spinlock_unlock(&t->lock); |
468 | spinlock_unlock(&t->lock); |
453 | interrupts_restore(ipl); |
469 | interrupts_restore(ipl); |
454 | 470 | ||
455 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
471 | rc = waitq_sleep_timeout(&t->join_wq, usec, flags); |
456 | 472 | ||
457 | return rc; |
473 | return rc; |
458 | } |
474 | } |
459 | 475 | ||
460 | /** Detach thread. |
476 | /** Detach thread. |
461 | * |
477 | * |
462 | * Mark the thread as detached, if the thread is already in the Undead state, |
478 | * Mark the thread as detached, if the thread is already in the Undead state, |
463 | * deallocate its resources. |
479 | * deallocate its resources. |
464 | * |
480 | * |
465 | * @param t Thread to be detached. |
481 | * @param t Thread to be detached. |
466 | */ |
482 | */ |
467 | void thread_detach(thread_t *t) |
483 | void thread_detach(thread_t *t) |
468 | { |
484 | { |
469 | ipl_t ipl; |
485 | ipl_t ipl; |
470 | 486 | ||
471 | /* |
487 | /* |
472 | * Since the thread is expected to not be already detached, |
488 | * Since the thread is expected to not be already detached, |
473 | * pointer to it must be still valid. |
489 | * pointer to it must be still valid. |
474 | */ |
490 | */ |
475 | ipl = interrupts_disable(); |
491 | ipl = interrupts_disable(); |
476 | spinlock_lock(&t->lock); |
492 | spinlock_lock(&t->lock); |
477 | ASSERT(!t->detached); |
493 | ASSERT(!t->detached); |
478 | if (t->state == Undead) { |
494 | if (t->state == Undead) { |
479 | thread_destroy(t); /* unlocks &t->lock */ |
495 | thread_destroy(t); /* unlocks &t->lock */ |
480 | interrupts_restore(ipl); |
496 | interrupts_restore(ipl); |
481 | return; |
497 | return; |
482 | } else { |
498 | } else { |
483 | t->detached = true; |
499 | t->detached = true; |
484 | } |
500 | } |
485 | spinlock_unlock(&t->lock); |
501 | spinlock_unlock(&t->lock); |
486 | interrupts_restore(ipl); |
502 | interrupts_restore(ipl); |
487 | } |
503 | } |
488 | 504 | ||
489 | /** Thread usleep |
505 | /** Thread usleep |
490 | * |
506 | * |
491 | * Suspend execution of the current thread. |
507 | * Suspend execution of the current thread. |
492 | * |
508 | * |
493 | * @param usec Number of microseconds to sleep. |
509 | * @param usec Number of microseconds to sleep. |
494 | * |
510 | * |
495 | */ |
511 | */ |
496 | void thread_usleep(uint32_t usec) |
512 | void thread_usleep(uint32_t usec) |
497 | { |
513 | { |
498 | waitq_t wq; |
514 | waitq_t wq; |
499 | 515 | ||
500 | waitq_initialize(&wq); |
516 | waitq_initialize(&wq); |
501 | 517 | ||
502 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
518 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); |
503 | } |
519 | } |
504 | 520 | ||
505 | /** Register thread out-of-context invocation |
521 | /** Register thread out-of-context invocation |
506 | * |
522 | * |
507 | * Register a function and its argument to be executed |
523 | * Register a function and its argument to be executed |
508 | * on next context switch to the current thread. |
524 | * on next context switch to the current thread. |
509 | * |
525 | * |
510 | * @param call_me Out-of-context function. |
526 | * @param call_me Out-of-context function. |
511 | * @param call_me_with Out-of-context function argument. |
527 | * @param call_me_with Out-of-context function argument. |
512 | * |
528 | * |
513 | */ |
529 | */ |
514 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
530 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
515 | { |
531 | { |
516 | ipl_t ipl; |
532 | ipl_t ipl; |
517 | 533 | ||
518 | ipl = interrupts_disable(); |
534 | ipl = interrupts_disable(); |
519 | spinlock_lock(&THREAD->lock); |
535 | spinlock_lock(&THREAD->lock); |
520 | THREAD->call_me = call_me; |
536 | THREAD->call_me = call_me; |
521 | THREAD->call_me_with = call_me_with; |
537 | THREAD->call_me_with = call_me_with; |
522 | spinlock_unlock(&THREAD->lock); |
538 | spinlock_unlock(&THREAD->lock); |
523 | interrupts_restore(ipl); |
539 | interrupts_restore(ipl); |
524 | } |
540 | } |
525 | 541 | ||
526 | /** Print list of threads debug info */ |
542 | /** Print list of threads debug info */ |
527 | void thread_print_list(void) |
543 | void thread_print_list(void) |
528 | { |
544 | { |
529 | link_t *cur; |
545 | link_t *cur; |
530 | ipl_t ipl; |
546 | ipl_t ipl; |
531 | 547 | ||
532 | /* Messing with thread structures, avoid deadlock */ |
548 | /* Messing with thread structures, avoid deadlock */ |
533 | ipl = interrupts_disable(); |
549 | ipl = interrupts_disable(); |
534 | spinlock_lock(&threads_lock); |
550 | spinlock_lock(&threads_lock); |
535 | 551 | ||
536 | printf("tid name address state task ctx code stack cycles cpu kst wq\n"); |
552 | printf("tid name address state task ctx code stack cycles cpu kstack waitqueue\n"); |
537 | printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n"); |
553 | printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n"); |
538 | 554 | ||
539 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
555 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
540 | btree_node_t *node; |
556 | btree_node_t *node; |
541 | int i; |
557 | int i; |
542 | 558 | ||
543 | node = list_get_instance(cur, btree_node_t, leaf_link); |
559 | node = list_get_instance(cur, btree_node_t, leaf_link); |
544 | for (i = 0; i < node->keys; i++) { |
560 | for (i = 0; i < node->keys; i++) { |
545 | thread_t *t; |
561 | thread_t *t; |
546 | 562 | ||
547 | t = (thread_t *) node->value[i]; |
563 | t = (thread_t *) node->value[i]; |
548 | 564 | ||
549 | uint64_t cycles; |
565 | uint64_t cycles; |
550 | char suffix; |
566 | char suffix; |
551 | 567 | ||
552 | if (t->cycles > 1000000000000000000LL) { |
568 | if (t->cycles > 1000000000000000000LL) { |
553 | cycles = t->cycles / 1000000000000000000LL; |
569 | cycles = t->cycles / 1000000000000000000LL; |
554 | suffix = 'E'; |
570 | suffix = 'E'; |
555 | } else if (t->cycles > 1000000000000LL) { |
571 | } else if (t->cycles > 1000000000000LL) { |
556 | cycles = t->cycles / 1000000000000LL; |
572 | cycles = t->cycles / 1000000000000LL; |
557 | suffix = 'T'; |
573 | suffix = 'T'; |
558 | } else if (t->cycles > 1000000LL) { |
574 | } else if (t->cycles > 1000000LL) { |
559 | cycles = t->cycles / 1000000LL; |
575 | cycles = t->cycles / 1000000LL; |
560 | suffix = 'M'; |
576 | suffix = 'M'; |
561 | } else { |
577 | } else { |
562 | cycles = t->cycles; |
578 | cycles = t->cycles; |
563 | suffix = ' '; |
579 | suffix = ' '; |
564 | } |
580 | } |
565 | 581 | ||
566 | printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix); |
582 | printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix); |
567 | 583 | ||
568 | if (t->cpu) |
584 | if (t->cpu) |
569 | printf("%-4zd", t->cpu->id); |
585 | printf("%-4zd", t->cpu->id); |
570 | else |
586 | else |
571 | printf("none"); |
587 | printf("none"); |
572 | 588 | ||
573 | if (t->state == Sleeping) |
589 | if (t->state == Sleeping) |
574 | printf(" %#10zx %#10zx", t->kstack, t->sleep_queue); |
590 | printf(" %#10zx %#10zx", t->kstack, t->sleep_queue); |
575 | 591 | ||
576 | printf("\n"); |
592 | printf("\n"); |
577 | } |
593 | } |
578 | } |
594 | } |
579 | 595 | ||
580 | spinlock_unlock(&threads_lock); |
596 | spinlock_unlock(&threads_lock); |
581 | interrupts_restore(ipl); |
597 | interrupts_restore(ipl); |
582 | } |
598 | } |
583 | 599 | ||
584 | /** Check whether thread exists. |
600 | /** Check whether thread exists. |
585 | * |
601 | * |
586 | * Note that threads_lock must be already held and |
602 | * Note that threads_lock must be already held and |
587 | * interrupts must be already disabled. |
603 | * interrupts must be already disabled. |
588 | * |
604 | * |
589 | * @param t Pointer to thread. |
605 | * @param t Pointer to thread. |
590 | * |
606 | * |
591 | * @return True if thread t is known to the system, false otherwise. |
607 | * @return True if thread t is known to the system, false otherwise. |
592 | */ |
608 | */ |
593 | bool thread_exists(thread_t *t) |
609 | bool thread_exists(thread_t *t) |
594 | { |
610 | { |
595 | btree_node_t *leaf; |
611 | btree_node_t *leaf; |
596 | 612 | ||
597 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
613 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
598 | } |
614 | } |
599 | 615 | ||
600 | 616 | ||
601 | /** Update accounting of current thread. |
617 | /** Update accounting of current thread. |
602 | * |
618 | * |
603 | * Note that thread_lock on THREAD must be already held and |
619 | * Note that thread_lock on THREAD must be already held and |
604 | * interrupts must be already disabled. |
620 | * interrupts must be already disabled. |
605 | * |
621 | * |
606 | * @param t Pointer to thread. |
- | |
607 | * |
- | |
608 | */ |
622 | */ |
609 | void thread_update_accounting(void) |
623 | void thread_update_accounting(void) |
610 | { |
624 | { |
611 | uint64_t time = get_cycle(); |
625 | uint64_t time = get_cycle(); |
612 | THREAD->cycles += time - THREAD->last_cycle; |
626 | THREAD->cycles += time - THREAD->last_cycle; |
613 | THREAD->last_cycle = time; |
627 | THREAD->last_cycle = time; |
614 | } |
628 | } |
615 | 629 | ||
616 | /** Process syscall to create new thread. |
630 | /** Process syscall to create new thread. |
617 | * |
631 | * |
618 | */ |
632 | */ |
619 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
633 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
620 | { |
634 | { |
621 | thread_t *t; |
635 | thread_t *t; |
622 | char namebuf[THREAD_NAME_BUFLEN]; |
636 | char namebuf[THREAD_NAME_BUFLEN]; |
623 | uspace_arg_t *kernel_uarg; |
637 | uspace_arg_t *kernel_uarg; |
624 | uint32_t tid; |
638 | uint32_t tid; |
625 | int rc; |
639 | int rc; |
626 | 640 | ||
627 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
641 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
628 | if (rc != 0) |
642 | if (rc != 0) |
629 | return (unative_t) rc; |
643 | return (unative_t) rc; |
630 | 644 | ||
631 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
645 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
632 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
646 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
633 | if (rc != 0) { |
647 | if (rc != 0) { |
634 | free(kernel_uarg); |
648 | free(kernel_uarg); |
635 | return (unative_t) rc; |
649 | return (unative_t) rc; |
636 | } |
650 | } |
637 | 651 | ||
638 | if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
652 | if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
639 | tid = t->tid; |
653 | tid = t->tid; |
640 | thread_ready(t); |
654 | thread_ready(t); |
641 | return (unative_t) tid; |
655 | return (unative_t) tid; |
642 | } else { |
656 | } else { |
643 | free(kernel_uarg); |
657 | free(kernel_uarg); |
644 | } |
658 | } |
645 | 659 | ||
646 | return (unative_t) ENOMEM; |
660 | return (unative_t) ENOMEM; |
647 | } |
661 | } |
648 | 662 | ||
649 | /** Process syscall to terminate thread. |
663 | /** Process syscall to terminate thread. |
650 | * |
664 | * |
651 | */ |
665 | */ |
652 | unative_t sys_thread_exit(int uspace_status) |
666 | unative_t sys_thread_exit(int uspace_status) |
653 | { |
667 | { |
654 | thread_exit(); |
668 | thread_exit(); |
655 | /* Unreachable */ |
669 | /* Unreachable */ |
656 | return 0; |
670 | return 0; |
657 | } |
671 | } |
658 | 672 | ||
659 | /** @} |
673 | /** @} |
660 | */ |
674 | */ |
661 | 675 |