Rev 860 | Rev 935 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 860 | Rev 906 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/frame.h> |
32 | #include <mm/frame.h> |
33 | #include <mm/page.h> |
33 | #include <mm/page.h> |
34 | #include <arch/asm.h> |
34 | #include <arch/asm.h> |
35 | #include <arch.h> |
35 | #include <arch.h> |
36 | #include <synch/synch.h> |
36 | #include <synch/synch.h> |
37 | #include <synch/spinlock.h> |
37 | #include <synch/spinlock.h> |
38 | #include <synch/waitq.h> |
38 | #include <synch/waitq.h> |
39 | #include <synch/rwlock.h> |
39 | #include <synch/rwlock.h> |
40 | #include <cpu.h> |
40 | #include <cpu.h> |
41 | #include <func.h> |
41 | #include <func.h> |
42 | #include <context.h> |
42 | #include <context.h> |
43 | #include <adt/list.h> |
43 | #include <adt/list.h> |
44 | #include <typedefs.h> |
44 | #include <typedefs.h> |
45 | #include <time/clock.h> |
45 | #include <time/clock.h> |
46 | #include <adt/list.h> |
46 | #include <adt/list.h> |
47 | #include <config.h> |
47 | #include <config.h> |
48 | #include <arch/interrupt.h> |
48 | #include <arch/interrupt.h> |
49 | #include <smp/ipi.h> |
49 | #include <smp/ipi.h> |
50 | #include <arch/faddr.h> |
50 | #include <arch/faddr.h> |
51 | #include <arch/atomic.h> |
51 | #include <arch/atomic.h> |
52 | #include <memstr.h> |
52 | #include <memstr.h> |
53 | #include <print.h> |
53 | #include <print.h> |
54 | #include <mm/slab.h> |
54 | #include <mm/slab.h> |
55 | #include <debug.h> |
55 | #include <debug.h> |
56 | 56 | ||
57 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
57 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
58 | 58 | ||
59 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
59 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
60 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
60 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
61 | 61 | ||
62 | SPINLOCK_INITIALIZE(tidlock); |
62 | SPINLOCK_INITIALIZE(tidlock); |
63 | __u32 last_tid = 0; |
63 | __u32 last_tid = 0; |
64 | 64 | ||
65 | static slab_cache_t *thread_slab; |
65 | static slab_cache_t *thread_slab; |
- | 66 | #ifdef ARCH_HAS_FPU |
|
- | 67 | slab_cache_t *fpu_context_slab; |
|
- | 68 | #endif |
|
66 | 69 | ||
67 | 70 | ||
68 | /** Thread wrapper |
71 | /** Thread wrapper |
69 | * |
72 | * |
70 | * This wrapper is provided to ensure that every thread |
73 | * This wrapper is provided to ensure that every thread |
71 | * makes a call to thread_exit() when its implementing |
74 | * makes a call to thread_exit() when its implementing |
72 | * function returns. |
75 | * function returns. |
73 | * |
76 | * |
74 | * interrupts_disable() is assumed. |
77 | * interrupts_disable() is assumed. |
75 | * |
78 | * |
76 | */ |
79 | */ |
77 | static void cushion(void) |
80 | static void cushion(void) |
78 | { |
81 | { |
79 | void (*f)(void *) = THREAD->thread_code; |
82 | void (*f)(void *) = THREAD->thread_code; |
80 | void *arg = THREAD->thread_arg; |
83 | void *arg = THREAD->thread_arg; |
81 | 84 | ||
82 | /* this is where each thread wakes up after its creation */ |
85 | /* this is where each thread wakes up after its creation */ |
83 | before_thread_runs(); |
86 | before_thread_runs(); |
84 | 87 | ||
85 | spinlock_unlock(&THREAD->lock); |
88 | spinlock_unlock(&THREAD->lock); |
86 | interrupts_enable(); |
89 | interrupts_enable(); |
87 | 90 | ||
88 | f(arg); |
91 | f(arg); |
89 | thread_exit(); |
92 | thread_exit(); |
90 | /* not reached */ |
93 | /* not reached */ |
91 | } |
94 | } |
92 | 95 | ||
93 | /** Initialization and allocation for thread_t structure */ |
96 | /** Initialization and allocation for thread_t structure */ |
94 | static int thr_constructor(void *obj, int kmflags) |
97 | static int thr_constructor(void *obj, int kmflags) |
95 | { |
98 | { |
96 | thread_t *t = (thread_t *)obj; |
99 | thread_t *t = (thread_t *)obj; |
97 | pfn_t pfn; |
100 | pfn_t pfn; |
98 | int status; |
101 | int status; |
99 | 102 | ||
100 | spinlock_initialize(&t->lock, "thread_t_lock"); |
103 | spinlock_initialize(&t->lock, "thread_t_lock"); |
101 | link_initialize(&t->rq_link); |
104 | link_initialize(&t->rq_link); |
102 | link_initialize(&t->wq_link); |
105 | link_initialize(&t->wq_link); |
103 | link_initialize(&t->th_link); |
106 | link_initialize(&t->th_link); |
104 | link_initialize(&t->threads_link); |
107 | link_initialize(&t->threads_link); |
105 | 108 | ||
- | 109 | #ifdef ARCH_HAS_FPU |
|
- | 110 | # ifdef CONFIG_FPU_LAZY |
|
- | 111 | t->saved_fpu_context = NULL; |
|
- | 112 | # else |
|
- | 113 | t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags); |
|
- | 114 | if (!t->saved_fpu_context) |
|
- | 115 | return -1; |
|
- | 116 | # endif |
|
- | 117 | #endif |
|
- | 118 | ||
106 | pfn = frame_alloc_rc(ONE_FRAME, FRAME_KA | kmflags,&status); |
119 | pfn = frame_alloc_rc(ONE_FRAME, FRAME_KA | kmflags,&status); |
107 | if (status) |
120 | if (status) { |
- | 121 | #ifdef ARCH_HAS_FPU |
|
- | 122 | if (t->saved_fpu_context) |
|
- | 123 | slab_free(fpu_context_slab,t->saved_fpu_context); |
|
- | 124 | #endif |
|
108 | return -1; |
125 | return -1; |
- | 126 | } |
|
109 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
127 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
110 | 128 | ||
111 | return 0; |
129 | return 0; |
112 | } |
130 | } |
113 | 131 | ||
114 | /** Destruction of thread_t object */ |
132 | /** Destruction of thread_t object */ |
115 | static int thr_destructor(void *obj) |
133 | static int thr_destructor(void *obj) |
116 | { |
134 | { |
117 | thread_t *t = (thread_t *)obj; |
135 | thread_t *t = (thread_t *)obj; |
118 | 136 | ||
119 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
137 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
- | 138 | #ifdef ARCH_HAS_FPU |
|
- | 139 | if (t->saved_fpu_context) |
|
- | 140 | slab_free(fpu_context_slab,t->saved_fpu_context); |
|
- | 141 | #endif |
|
120 | return 1; /* One page freed */ |
142 | return 1; /* One page freed */ |
121 | } |
143 | } |
122 | 144 | ||
123 | /** Initialize threads |
145 | /** Initialize threads |
124 | * |
146 | * |
125 | * Initialize kernel threads support. |
147 | * Initialize kernel threads support. |
126 | * |
148 | * |
127 | */ |
149 | */ |
128 | void thread_init(void) |
150 | void thread_init(void) |
129 | { |
151 | { |
130 | THREAD = NULL; |
152 | THREAD = NULL; |
131 | atomic_set(&nrdy,0); |
153 | atomic_set(&nrdy,0); |
132 | thread_slab = slab_cache_create("thread_slab", |
154 | thread_slab = slab_cache_create("thread_slab", |
133 | sizeof(thread_t),0, |
155 | sizeof(thread_t),0, |
134 | thr_constructor, thr_destructor, 0); |
156 | thr_constructor, thr_destructor, 0); |
- | 157 | #ifdef ARCH_HAS_FPU |
|
- | 158 | fpu_context_slab = slab_cache_create("fpu_slab", |
|
- | 159 | sizeof(fpu_context_t), |
|
- | 160 | FPU_CONTEXT_ALIGN, |
|
- | 161 | NULL, NULL, 0); |
|
- | 162 | #endif |
|
135 | } |
163 | } |
136 | 164 | ||
137 | 165 | ||
138 | /** Make thread ready |
166 | /** Make thread ready |
139 | * |
167 | * |
140 | * Switch thread t to the ready state. |
168 | * Switch thread t to the ready state. |
141 | * |
169 | * |
142 | * @param t Thread to make ready. |
170 | * @param t Thread to make ready. |
143 | * |
171 | * |
144 | */ |
172 | */ |
145 | void thread_ready(thread_t *t) |
173 | void thread_ready(thread_t *t) |
146 | { |
174 | { |
147 | cpu_t *cpu; |
175 | cpu_t *cpu; |
148 | runq_t *r; |
176 | runq_t *r; |
149 | ipl_t ipl; |
177 | ipl_t ipl; |
150 | int i, avg; |
178 | int i, avg; |
151 | 179 | ||
152 | ipl = interrupts_disable(); |
180 | ipl = interrupts_disable(); |
153 | 181 | ||
154 | spinlock_lock(&t->lock); |
182 | spinlock_lock(&t->lock); |
155 | 183 | ||
156 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
184 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
157 | 185 | ||
158 | cpu = CPU; |
186 | cpu = CPU; |
159 | if (t->flags & X_WIRED) { |
187 | if (t->flags & X_WIRED) { |
160 | cpu = t->cpu; |
188 | cpu = t->cpu; |
161 | } |
189 | } |
162 | spinlock_unlock(&t->lock); |
190 | spinlock_unlock(&t->lock); |
163 | 191 | ||
164 | /* |
192 | /* |
165 | * Append t to respective ready queue on respective processor. |
193 | * Append t to respective ready queue on respective processor. |
166 | */ |
194 | */ |
167 | r = &cpu->rq[i]; |
195 | r = &cpu->rq[i]; |
168 | spinlock_lock(&r->lock); |
196 | spinlock_lock(&r->lock); |
169 | list_append(&t->rq_link, &r->rq_head); |
197 | list_append(&t->rq_link, &r->rq_head); |
170 | r->n++; |
198 | r->n++; |
171 | spinlock_unlock(&r->lock); |
199 | spinlock_unlock(&r->lock); |
172 | 200 | ||
173 | atomic_inc(&nrdy); |
201 | atomic_inc(&nrdy); |
174 | avg = atomic_get(&nrdy) / config.cpu_active; |
202 | avg = atomic_get(&nrdy) / config.cpu_active; |
175 | atomic_inc(&cpu->nrdy); |
203 | atomic_inc(&cpu->nrdy); |
176 | 204 | ||
177 | interrupts_restore(ipl); |
205 | interrupts_restore(ipl); |
178 | } |
206 | } |
179 | 207 | ||
180 | 208 | ||
181 | /** Destroy thread memory structure |
209 | /** Destroy thread memory structure |
182 | * |
210 | * |
183 | * Detach thread from all queues, cpus etc. and destroy it. |
211 | * Detach thread from all queues, cpus etc. and destroy it. |
184 | * |
212 | * |
185 | * Assume thread->lock is held!! |
213 | * Assume thread->lock is held!! |
186 | */ |
214 | */ |
187 | void thread_destroy(thread_t *t) |
215 | void thread_destroy(thread_t *t) |
188 | { |
216 | { |
189 | ASSERT(t->state == Exiting); |
217 | ASSERT(t->state == Exiting); |
190 | ASSERT(t->task); |
218 | ASSERT(t->task); |
191 | ASSERT(t->cpu); |
219 | ASSERT(t->cpu); |
192 | 220 | ||
193 | spinlock_lock(&t->cpu->lock); |
221 | spinlock_lock(&t->cpu->lock); |
194 | if(t->cpu->fpu_owner==t) |
222 | if(t->cpu->fpu_owner==t) |
195 | t->cpu->fpu_owner=NULL; |
223 | t->cpu->fpu_owner=NULL; |
196 | spinlock_unlock(&t->cpu->lock); |
224 | spinlock_unlock(&t->cpu->lock); |
197 | 225 | ||
198 | /* |
226 | /* |
199 | * Detach from the containing task. |
227 | * Detach from the containing task. |
200 | */ |
228 | */ |
201 | spinlock_lock(&t->task->lock); |
229 | spinlock_lock(&t->task->lock); |
202 | list_remove(&t->th_link); |
230 | list_remove(&t->th_link); |
203 | spinlock_unlock(&t->task->lock); |
231 | spinlock_unlock(&t->task->lock); |
204 | 232 | ||
205 | spinlock_unlock(&t->lock); |
233 | spinlock_unlock(&t->lock); |
206 | 234 | ||
207 | spinlock_lock(&threads_lock); |
235 | spinlock_lock(&threads_lock); |
208 | list_remove(&t->threads_link); |
236 | list_remove(&t->threads_link); |
209 | spinlock_unlock(&threads_lock); |
237 | spinlock_unlock(&threads_lock); |
210 | 238 | ||
211 | slab_free(thread_slab, t); |
239 | slab_free(thread_slab, t); |
212 | } |
240 | } |
213 | 241 | ||
214 | 242 | ||
215 | /** Create new thread |
243 | /** Create new thread |
216 | * |
244 | * |
217 | * Create a new thread. |
245 | * Create a new thread. |
218 | * |
246 | * |
219 | * @param func Thread's implementing function. |
247 | * @param func Thread's implementing function. |
220 | * @param arg Thread's implementing function argument. |
248 | * @param arg Thread's implementing function argument. |
221 | * @param task Task to which the thread belongs. |
249 | * @param task Task to which the thread belongs. |
222 | * @param flags Thread flags. |
250 | * @param flags Thread flags. |
223 | * |
251 | * |
224 | * @return New thread's structure on success, NULL on failure. |
252 | * @return New thread's structure on success, NULL on failure. |
225 | * |
253 | * |
226 | */ |
254 | */ |
227 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
255 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
228 | { |
256 | { |
229 | thread_t *t; |
257 | thread_t *t; |
230 | ipl_t ipl; |
258 | ipl_t ipl; |
231 | 259 | ||
232 | t = (thread_t *) slab_alloc(thread_slab, 0); |
260 | t = (thread_t *) slab_alloc(thread_slab, 0); |
233 | if (!t) |
261 | if (!t) |
234 | return NULL; |
262 | return NULL; |
235 | 263 | ||
236 | /* Not needed, but good for debugging */ |
264 | /* Not needed, but good for debugging */ |
237 | memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0); |
265 | memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0); |
238 | 266 | ||
239 | ipl = interrupts_disable(); |
267 | ipl = interrupts_disable(); |
240 | spinlock_lock(&tidlock); |
268 | spinlock_lock(&tidlock); |
241 | t->tid = ++last_tid; |
269 | t->tid = ++last_tid; |
242 | spinlock_unlock(&tidlock); |
270 | spinlock_unlock(&tidlock); |
243 | interrupts_restore(ipl); |
271 | interrupts_restore(ipl); |
244 | 272 | ||
245 | context_save(&t->saved_context); |
273 | context_save(&t->saved_context); |
246 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
274 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
247 | 275 | ||
248 | the_initialize((the_t *) t->kstack); |
276 | the_initialize((the_t *) t->kstack); |
249 | 277 | ||
250 | ipl = interrupts_disable(); |
278 | ipl = interrupts_disable(); |
251 | t->saved_context.ipl = interrupts_read(); |
279 | t->saved_context.ipl = interrupts_read(); |
252 | interrupts_restore(ipl); |
280 | interrupts_restore(ipl); |
253 | 281 | ||
254 | t->thread_code = func; |
282 | t->thread_code = func; |
255 | t->thread_arg = arg; |
283 | t->thread_arg = arg; |
256 | t->ticks = -1; |
284 | t->ticks = -1; |
257 | t->priority = -1; /* start in rq[0] */ |
285 | t->priority = -1; /* start in rq[0] */ |
258 | t->cpu = NULL; |
286 | t->cpu = NULL; |
259 | t->flags = 0; |
287 | t->flags = 0; |
260 | t->state = Entering; |
288 | t->state = Entering; |
261 | t->call_me = NULL; |
289 | t->call_me = NULL; |
262 | t->call_me_with = NULL; |
290 | t->call_me_with = NULL; |
263 | 291 | ||
264 | timeout_initialize(&t->sleep_timeout); |
292 | timeout_initialize(&t->sleep_timeout); |
265 | t->sleep_queue = NULL; |
293 | t->sleep_queue = NULL; |
266 | t->timeout_pending = 0; |
294 | t->timeout_pending = 0; |
267 | 295 | ||
268 | t->rwlock_holder_type = RWLOCK_NONE; |
296 | t->rwlock_holder_type = RWLOCK_NONE; |
269 | 297 | ||
270 | t->task = task; |
298 | t->task = task; |
271 | 299 | ||
272 | t->fpu_context_exists = 0; |
300 | t->fpu_context_exists = 0; |
273 | t->fpu_context_engaged = 0; |
301 | t->fpu_context_engaged = 0; |
274 | 302 | ||
275 | /* |
303 | /* |
276 | * Register this thread in the system-wide list. |
304 | * Register this thread in the system-wide list. |
277 | */ |
305 | */ |
278 | ipl = interrupts_disable(); |
306 | ipl = interrupts_disable(); |
279 | spinlock_lock(&threads_lock); |
307 | spinlock_lock(&threads_lock); |
280 | list_append(&t->threads_link, &threads_head); |
308 | list_append(&t->threads_link, &threads_head); |
281 | spinlock_unlock(&threads_lock); |
309 | spinlock_unlock(&threads_lock); |
282 | 310 | ||
283 | /* |
311 | /* |
284 | * Attach to the containing task. |
312 | * Attach to the containing task. |
285 | */ |
313 | */ |
286 | spinlock_lock(&task->lock); |
314 | spinlock_lock(&task->lock); |
287 | list_append(&t->th_link, &task->th_head); |
315 | list_append(&t->th_link, &task->th_head); |
288 | spinlock_unlock(&task->lock); |
316 | spinlock_unlock(&task->lock); |
289 | 317 | ||
290 | interrupts_restore(ipl); |
318 | interrupts_restore(ipl); |
291 | 319 | ||
292 | return t; |
320 | return t; |
293 | } |
321 | } |
294 | 322 | ||
295 | 323 | ||
296 | /** Make thread exiting |
324 | /** Make thread exiting |
297 | * |
325 | * |
298 | * End current thread execution and switch it to the exiting |
326 | * End current thread execution and switch it to the exiting |
299 | * state. All pending timeouts are executed. |
327 | * state. All pending timeouts are executed. |
300 | * |
328 | * |
301 | */ |
329 | */ |
302 | void thread_exit(void) |
330 | void thread_exit(void) |
303 | { |
331 | { |
304 | ipl_t ipl; |
332 | ipl_t ipl; |
305 | 333 | ||
306 | restart: |
334 | restart: |
307 | ipl = interrupts_disable(); |
335 | ipl = interrupts_disable(); |
308 | spinlock_lock(&THREAD->lock); |
336 | spinlock_lock(&THREAD->lock); |
309 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
337 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
310 | spinlock_unlock(&THREAD->lock); |
338 | spinlock_unlock(&THREAD->lock); |
311 | interrupts_restore(ipl); |
339 | interrupts_restore(ipl); |
312 | goto restart; |
340 | goto restart; |
313 | } |
341 | } |
314 | THREAD->state = Exiting; |
342 | THREAD->state = Exiting; |
315 | spinlock_unlock(&THREAD->lock); |
343 | spinlock_unlock(&THREAD->lock); |
316 | scheduler(); |
344 | scheduler(); |
317 | } |
345 | } |
318 | 346 | ||
319 | 347 | ||
320 | /** Thread sleep |
348 | /** Thread sleep |
321 | * |
349 | * |
322 | * Suspend execution of the current thread. |
350 | * Suspend execution of the current thread. |
323 | * |
351 | * |
324 | * @param sec Number of seconds to sleep. |
352 | * @param sec Number of seconds to sleep. |
325 | * |
353 | * |
326 | */ |
354 | */ |
327 | void thread_sleep(__u32 sec) |
355 | void thread_sleep(__u32 sec) |
328 | { |
356 | { |
329 | thread_usleep(sec*1000000); |
357 | thread_usleep(sec*1000000); |
330 | } |
358 | } |
331 | 359 | ||
332 | 360 | ||
333 | /** Thread usleep |
361 | /** Thread usleep |
334 | * |
362 | * |
335 | * Suspend execution of the current thread. |
363 | * Suspend execution of the current thread. |
336 | * |
364 | * |
337 | * @param usec Number of microseconds to sleep. |
365 | * @param usec Number of microseconds to sleep. |
338 | * |
366 | * |
339 | */ |
367 | */ |
340 | void thread_usleep(__u32 usec) |
368 | void thread_usleep(__u32 usec) |
341 | { |
369 | { |
342 | waitq_t wq; |
370 | waitq_t wq; |
343 | 371 | ||
344 | waitq_initialize(&wq); |
372 | waitq_initialize(&wq); |
345 | 373 | ||
346 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
374 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
347 | } |
375 | } |
348 | 376 | ||
349 | 377 | ||
350 | /** Register thread out-of-context invocation |
378 | /** Register thread out-of-context invocation |
351 | * |
379 | * |
352 | * Register a function and its argument to be executed |
380 | * Register a function and its argument to be executed |
353 | * on next context switch to the current thread. |
381 | * on next context switch to the current thread. |
354 | * |
382 | * |
355 | * @param call_me Out-of-context function. |
383 | * @param call_me Out-of-context function. |
356 | * @param call_me_with Out-of-context function argument. |
384 | * @param call_me_with Out-of-context function argument. |
357 | * |
385 | * |
358 | */ |
386 | */ |
359 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
387 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
360 | { |
388 | { |
361 | ipl_t ipl; |
389 | ipl_t ipl; |
362 | 390 | ||
363 | ipl = interrupts_disable(); |
391 | ipl = interrupts_disable(); |
364 | spinlock_lock(&THREAD->lock); |
392 | spinlock_lock(&THREAD->lock); |
365 | THREAD->call_me = call_me; |
393 | THREAD->call_me = call_me; |
366 | THREAD->call_me_with = call_me_with; |
394 | THREAD->call_me_with = call_me_with; |
367 | spinlock_unlock(&THREAD->lock); |
395 | spinlock_unlock(&THREAD->lock); |
368 | interrupts_restore(ipl); |
396 | interrupts_restore(ipl); |
369 | } |
397 | } |
370 | 398 | ||
371 | /** Print list of threads debug info */ |
399 | /** Print list of threads debug info */ |
372 | void thread_print_list(void) |
400 | void thread_print_list(void) |
373 | { |
401 | { |
374 | link_t *cur; |
402 | link_t *cur; |
375 | thread_t *t; |
403 | thread_t *t; |
376 | ipl_t ipl; |
404 | ipl_t ipl; |
377 | 405 | ||
378 | /* Messing with thread structures, avoid deadlock */ |
406 | /* Messing with thread structures, avoid deadlock */ |
379 | ipl = interrupts_disable(); |
407 | ipl = interrupts_disable(); |
380 | spinlock_lock(&threads_lock); |
408 | spinlock_lock(&threads_lock); |
381 | 409 | ||
382 | for (cur=threads_head.next; cur!=&threads_head; cur=cur->next) { |
410 | for (cur=threads_head.next; cur!=&threads_head; cur=cur->next) { |
383 | t = list_get_instance(cur, thread_t, threads_link); |
411 | t = list_get_instance(cur, thread_t, threads_link); |
384 | printf("Thr: %d(%s) ", t->tid, thread_states[t->state]); |
412 | printf("Thr: %d(%s) ", t->tid, thread_states[t->state]); |
385 | if (t->cpu) |
413 | if (t->cpu) |
386 | printf("cpu%d ", t->cpu->id); |
414 | printf("cpu%d ", t->cpu->id); |
387 | 415 | ||
388 | printf("\n"); |
416 | printf("\n"); |
389 | } |
417 | } |
390 | 418 | ||
391 | spinlock_unlock(&threads_lock); |
419 | spinlock_unlock(&threads_lock); |
392 | interrupts_enable(); |
420 | interrupts_enable(); |
393 | } |
421 | } |
394 | 422 |