Rev 822 | Rev 860 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 822 | Rev 842 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <proc/scheduler.h> |
29 | #include <proc/scheduler.h> |
30 | #include <proc/thread.h> |
30 | #include <proc/thread.h> |
31 | #include <proc/task.h> |
31 | #include <proc/task.h> |
32 | #include <mm/frame.h> |
32 | #include <mm/frame.h> |
33 | #include <mm/page.h> |
33 | #include <mm/page.h> |
34 | #include <arch/asm.h> |
34 | #include <arch/asm.h> |
35 | #include <arch.h> |
35 | #include <arch.h> |
36 | #include <synch/synch.h> |
36 | #include <synch/synch.h> |
37 | #include <synch/spinlock.h> |
37 | #include <synch/spinlock.h> |
38 | #include <synch/waitq.h> |
38 | #include <synch/waitq.h> |
39 | #include <synch/rwlock.h> |
39 | #include <synch/rwlock.h> |
40 | #include <cpu.h> |
40 | #include <cpu.h> |
41 | #include <func.h> |
41 | #include <func.h> |
42 | #include <context.h> |
42 | #include <context.h> |
43 | #include <adt/list.h> |
43 | #include <adt/list.h> |
44 | #include <typedefs.h> |
44 | #include <typedefs.h> |
45 | #include <time/clock.h> |
45 | #include <time/clock.h> |
46 | #include <adt/list.h> |
46 | #include <adt/list.h> |
47 | #include <config.h> |
47 | #include <config.h> |
48 | #include <arch/interrupt.h> |
48 | #include <arch/interrupt.h> |
49 | #include <smp/ipi.h> |
49 | #include <smp/ipi.h> |
50 | #include <arch/faddr.h> |
50 | #include <arch/faddr.h> |
51 | #include <arch/atomic.h> |
51 | #include <arch/atomic.h> |
52 | #include <memstr.h> |
52 | #include <memstr.h> |
53 | #include <print.h> |
53 | #include <print.h> |
54 | #include <mm/slab.h> |
54 | #include <mm/slab.h> |
55 | #include <debug.h> |
55 | #include <debug.h> |
56 | 56 | ||
57 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
57 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
58 | 58 | ||
59 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
59 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
60 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
60 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
61 | 61 | ||
62 | SPINLOCK_INITIALIZE(tidlock); |
62 | SPINLOCK_INITIALIZE(tidlock); |
63 | __u32 last_tid = 0; |
63 | __u32 last_tid = 0; |
64 | 64 | ||
65 | static slab_cache_t *thread_slab; |
65 | static slab_cache_t *thread_slab; |
66 | 66 | ||
67 | 67 | ||
68 | /** Thread wrapper |
68 | /** Thread wrapper |
69 | * |
69 | * |
70 | * This wrapper is provided to ensure that every thread |
70 | * This wrapper is provided to ensure that every thread |
71 | * makes a call to thread_exit() when its implementing |
71 | * makes a call to thread_exit() when its implementing |
72 | * function returns. |
72 | * function returns. |
73 | * |
73 | * |
74 | * interrupts_disable() is assumed. |
74 | * interrupts_disable() is assumed. |
75 | * |
75 | * |
76 | */ |
76 | */ |
77 | static void cushion(void) |
77 | static void cushion(void) |
78 | { |
78 | { |
79 | void (*f)(void *) = THREAD->thread_code; |
79 | void (*f)(void *) = THREAD->thread_code; |
80 | void *arg = THREAD->thread_arg; |
80 | void *arg = THREAD->thread_arg; |
81 | 81 | ||
82 | /* this is where each thread wakes up after its creation */ |
82 | /* this is where each thread wakes up after its creation */ |
83 | before_thread_runs(); |
83 | before_thread_runs(); |
84 | 84 | ||
85 | spinlock_unlock(&THREAD->lock); |
85 | spinlock_unlock(&THREAD->lock); |
86 | interrupts_enable(); |
86 | interrupts_enable(); |
87 | 87 | ||
88 | f(arg); |
88 | f(arg); |
89 | thread_exit(); |
89 | thread_exit(); |
90 | /* not reached */ |
90 | /* not reached */ |
91 | } |
91 | } |
92 | 92 | ||
93 | /** Initialization and allocation for thread_t structure */ |
93 | /** Initialization and allocation for thread_t structure */ |
94 | static int thr_constructor(void *obj, int kmflags) |
94 | static int thr_constructor(void *obj, int kmflags) |
95 | { |
95 | { |
96 | thread_t *t = (thread_t *)obj; |
96 | thread_t *t = (thread_t *)obj; |
97 | pfn_t pfn; |
97 | pfn_t pfn; |
- | 98 | int status; |
|
98 | 99 | ||
99 | spinlock_initialize(&t->lock, "thread_t_lock"); |
100 | spinlock_initialize(&t->lock, "thread_t_lock"); |
100 | link_initialize(&t->rq_link); |
101 | link_initialize(&t->rq_link); |
101 | link_initialize(&t->wq_link); |
102 | link_initialize(&t->wq_link); |
102 | link_initialize(&t->th_link); |
103 | link_initialize(&t->th_link); |
103 | link_initialize(&t->threads_link); |
104 | link_initialize(&t->threads_link); |
104 | 105 | ||
105 | pfn = frame_alloc(ONE_FRAME, FRAME_KA | kmflags); |
106 | pfn = frame_alloc_rc(ONE_FRAME, FRAME_KA | kmflags,&status); |
106 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
- | |
107 | if (!t->kstack) |
107 | if (status) |
108 | return -1; |
108 | return -1; |
- | 109 | t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn)); |
|
109 | 110 | ||
110 | return 0; |
111 | return 0; |
111 | } |
112 | } |
112 | 113 | ||
113 | /** Destruction of thread_t object */ |
114 | /** Destruction of thread_t object */ |
114 | static int thr_destructor(void *obj) |
115 | static int thr_destructor(void *obj) |
115 | { |
116 | { |
116 | thread_t *t = (thread_t *)obj; |
117 | thread_t *t = (thread_t *)obj; |
117 | 118 | ||
118 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
119 | frame_free(ADDR2PFN(KA2PA(t->kstack))); |
119 | return 1; /* One page freed */ |
120 | return 1; /* One page freed */ |
120 | } |
121 | } |
121 | 122 | ||
122 | /** Initialize threads |
123 | /** Initialize threads |
123 | * |
124 | * |
124 | * Initialize kernel threads support. |
125 | * Initialize kernel threads support. |
125 | * |
126 | * |
126 | */ |
127 | */ |
127 | void thread_init(void) |
128 | void thread_init(void) |
128 | { |
129 | { |
129 | THREAD = NULL; |
130 | THREAD = NULL; |
130 | atomic_set(&nrdy,0); |
131 | atomic_set(&nrdy,0); |
131 | thread_slab = slab_cache_create("thread_slab", |
132 | thread_slab = slab_cache_create("thread_slab", |
132 | sizeof(thread_t),0, |
133 | sizeof(thread_t),0, |
133 | thr_constructor, thr_destructor, 0); |
134 | thr_constructor, thr_destructor, 0); |
134 | } |
135 | } |
135 | 136 | ||
136 | 137 | ||
137 | /** Make thread ready |
138 | /** Make thread ready |
138 | * |
139 | * |
139 | * Switch thread t to the ready state. |
140 | * Switch thread t to the ready state. |
140 | * |
141 | * |
141 | * @param t Thread to make ready. |
142 | * @param t Thread to make ready. |
142 | * |
143 | * |
143 | */ |
144 | */ |
144 | void thread_ready(thread_t *t) |
145 | void thread_ready(thread_t *t) |
145 | { |
146 | { |
146 | cpu_t *cpu; |
147 | cpu_t *cpu; |
147 | runq_t *r; |
148 | runq_t *r; |
148 | ipl_t ipl; |
149 | ipl_t ipl; |
149 | int i, avg; |
150 | int i, avg; |
150 | 151 | ||
151 | ipl = interrupts_disable(); |
152 | ipl = interrupts_disable(); |
152 | 153 | ||
153 | spinlock_lock(&t->lock); |
154 | spinlock_lock(&t->lock); |
154 | 155 | ||
155 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
156 | i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
156 | 157 | ||
157 | cpu = CPU; |
158 | cpu = CPU; |
158 | if (t->flags & X_WIRED) { |
159 | if (t->flags & X_WIRED) { |
159 | cpu = t->cpu; |
160 | cpu = t->cpu; |
160 | } |
161 | } |
161 | spinlock_unlock(&t->lock); |
162 | spinlock_unlock(&t->lock); |
162 | 163 | ||
163 | /* |
164 | /* |
164 | * Append t to respective ready queue on respective processor. |
165 | * Append t to respective ready queue on respective processor. |
165 | */ |
166 | */ |
166 | r = &cpu->rq[i]; |
167 | r = &cpu->rq[i]; |
167 | spinlock_lock(&r->lock); |
168 | spinlock_lock(&r->lock); |
168 | list_append(&t->rq_link, &r->rq_head); |
169 | list_append(&t->rq_link, &r->rq_head); |
169 | r->n++; |
170 | r->n++; |
170 | spinlock_unlock(&r->lock); |
171 | spinlock_unlock(&r->lock); |
171 | 172 | ||
172 | atomic_inc(&nrdy); |
173 | atomic_inc(&nrdy); |
173 | avg = atomic_get(&nrdy) / config.cpu_active; |
174 | avg = atomic_get(&nrdy) / config.cpu_active; |
174 | atomic_inc(&cpu->nrdy); |
175 | atomic_inc(&cpu->nrdy); |
175 | 176 | ||
176 | interrupts_restore(ipl); |
177 | interrupts_restore(ipl); |
177 | } |
178 | } |
178 | 179 | ||
179 | 180 | ||
180 | /** Destroy thread memory structure |
181 | /** Destroy thread memory structure |
181 | * |
182 | * |
182 | * Detach thread from all queues, cpus etc. and destroy it. |
183 | * Detach thread from all queues, cpus etc. and destroy it. |
183 | * |
184 | * |
184 | * Assume thread->lock is held!! |
185 | * Assume thread->lock is held!! |
185 | */ |
186 | */ |
186 | void thread_destroy(thread_t *t) |
187 | void thread_destroy(thread_t *t) |
187 | { |
188 | { |
188 | ASSERT(t->state == Exiting); |
189 | ASSERT(t->state == Exiting); |
189 | ASSERT(t->task); |
190 | ASSERT(t->task); |
190 | ASSERT(t->cpu); |
191 | ASSERT(t->cpu); |
191 | 192 | ||
192 | spinlock_lock(&t->cpu->lock); |
193 | spinlock_lock(&t->cpu->lock); |
193 | if(t->cpu->fpu_owner==t) |
194 | if(t->cpu->fpu_owner==t) |
194 | t->cpu->fpu_owner=NULL; |
195 | t->cpu->fpu_owner=NULL; |
195 | spinlock_unlock(&t->cpu->lock); |
196 | spinlock_unlock(&t->cpu->lock); |
196 | 197 | ||
197 | /* |
198 | /* |
198 | * Detach from the containing task. |
199 | * Detach from the containing task. |
199 | */ |
200 | */ |
200 | spinlock_lock(&t->task->lock); |
201 | spinlock_lock(&t->task->lock); |
201 | list_remove(&t->th_link); |
202 | list_remove(&t->th_link); |
202 | spinlock_unlock(&t->task->lock); |
203 | spinlock_unlock(&t->task->lock); |
203 | 204 | ||
204 | spinlock_unlock(&t->lock); |
205 | spinlock_unlock(&t->lock); |
205 | 206 | ||
206 | spinlock_lock(&threads_lock); |
207 | spinlock_lock(&threads_lock); |
207 | list_remove(&t->threads_link); |
208 | list_remove(&t->threads_link); |
208 | spinlock_unlock(&threads_lock); |
209 | spinlock_unlock(&threads_lock); |
209 | 210 | ||
210 | slab_free(thread_slab, t); |
211 | slab_free(thread_slab, t); |
211 | } |
212 | } |
212 | 213 | ||
213 | 214 | ||
214 | /** Create new thread |
215 | /** Create new thread |
215 | * |
216 | * |
216 | * Create a new thread. |
217 | * Create a new thread. |
217 | * |
218 | * |
218 | * @param func Thread's implementing function. |
219 | * @param func Thread's implementing function. |
219 | * @param arg Thread's implementing function argument. |
220 | * @param arg Thread's implementing function argument. |
220 | * @param task Task to which the thread belongs. |
221 | * @param task Task to which the thread belongs. |
221 | * @param flags Thread flags. |
222 | * @param flags Thread flags. |
222 | * |
223 | * |
223 | * @return New thread's structure on success, NULL on failure. |
224 | * @return New thread's structure on success, NULL on failure. |
224 | * |
225 | * |
225 | */ |
226 | */ |
226 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
227 | thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags) |
227 | { |
228 | { |
228 | thread_t *t; |
229 | thread_t *t; |
229 | ipl_t ipl; |
230 | ipl_t ipl; |
230 | 231 | ||
231 | t = (thread_t *) slab_alloc(thread_slab, 0); |
232 | t = (thread_t *) slab_alloc(thread_slab, 0); |
- | 233 | if (!t) |
|
- | 234 | return NULL; |
|
232 | 235 | ||
233 | /* Not needed, but good for debugging */ |
236 | /* Not needed, but good for debugging */ |
234 | memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0); |
237 | memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0); |
235 | 238 | ||
236 | ipl = interrupts_disable(); |
239 | ipl = interrupts_disable(); |
237 | spinlock_lock(&tidlock); |
240 | spinlock_lock(&tidlock); |
238 | t->tid = ++last_tid; |
241 | t->tid = ++last_tid; |
239 | spinlock_unlock(&tidlock); |
242 | spinlock_unlock(&tidlock); |
240 | interrupts_restore(ipl); |
243 | interrupts_restore(ipl); |
241 | 244 | ||
242 | context_save(&t->saved_context); |
245 | context_save(&t->saved_context); |
243 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
246 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
244 | 247 | ||
245 | the_initialize((the_t *) t->kstack); |
248 | the_initialize((the_t *) t->kstack); |
246 | 249 | ||
247 | ipl = interrupts_disable(); |
250 | ipl = interrupts_disable(); |
248 | t->saved_context.ipl = interrupts_read(); |
251 | t->saved_context.ipl = interrupts_read(); |
249 | interrupts_restore(ipl); |
252 | interrupts_restore(ipl); |
250 | 253 | ||
251 | t->thread_code = func; |
254 | t->thread_code = func; |
252 | t->thread_arg = arg; |
255 | t->thread_arg = arg; |
253 | t->ticks = -1; |
256 | t->ticks = -1; |
254 | t->priority = -1; /* start in rq[0] */ |
257 | t->priority = -1; /* start in rq[0] */ |
255 | t->cpu = NULL; |
258 | t->cpu = NULL; |
256 | t->flags = 0; |
259 | t->flags = 0; |
257 | t->state = Entering; |
260 | t->state = Entering; |
258 | t->call_me = NULL; |
261 | t->call_me = NULL; |
259 | t->call_me_with = NULL; |
262 | t->call_me_with = NULL; |
260 | 263 | ||
261 | timeout_initialize(&t->sleep_timeout); |
264 | timeout_initialize(&t->sleep_timeout); |
262 | t->sleep_queue = NULL; |
265 | t->sleep_queue = NULL; |
263 | t->timeout_pending = 0; |
266 | t->timeout_pending = 0; |
264 | 267 | ||
265 | t->rwlock_holder_type = RWLOCK_NONE; |
268 | t->rwlock_holder_type = RWLOCK_NONE; |
266 | 269 | ||
267 | t->task = task; |
270 | t->task = task; |
268 | 271 | ||
269 | t->fpu_context_exists=0; |
272 | t->fpu_context_exists=0; |
270 | t->fpu_context_engaged=0; |
273 | t->fpu_context_engaged=0; |
271 | 274 | ||
272 | /* |
275 | /* |
273 | * Register this thread in the system-wide list. |
276 | * Register this thread in the system-wide list. |
274 | */ |
277 | */ |
275 | ipl = interrupts_disable(); |
278 | ipl = interrupts_disable(); |
276 | spinlock_lock(&threads_lock); |
279 | spinlock_lock(&threads_lock); |
277 | list_append(&t->threads_link, &threads_head); |
280 | list_append(&t->threads_link, &threads_head); |
278 | spinlock_unlock(&threads_lock); |
281 | spinlock_unlock(&threads_lock); |
279 | 282 | ||
280 | /* |
283 | /* |
281 | * Attach to the containing task. |
284 | * Attach to the containing task. |
282 | */ |
285 | */ |
283 | spinlock_lock(&task->lock); |
286 | spinlock_lock(&task->lock); |
284 | list_append(&t->th_link, &task->th_head); |
287 | list_append(&t->th_link, &task->th_head); |
285 | spinlock_unlock(&task->lock); |
288 | spinlock_unlock(&task->lock); |
286 | 289 | ||
287 | interrupts_restore(ipl); |
290 | interrupts_restore(ipl); |
288 | 291 | ||
289 | return t; |
292 | return t; |
290 | } |
293 | } |
291 | 294 | ||
292 | 295 | ||
293 | /** Make thread exiting |
296 | /** Make thread exiting |
294 | * |
297 | * |
295 | * End current thread execution and switch it to the exiting |
298 | * End current thread execution and switch it to the exiting |
296 | * state. All pending timeouts are executed. |
299 | * state. All pending timeouts are executed. |
297 | * |
300 | * |
298 | */ |
301 | */ |
299 | void thread_exit(void) |
302 | void thread_exit(void) |
300 | { |
303 | { |
301 | ipl_t ipl; |
304 | ipl_t ipl; |
302 | 305 | ||
303 | restart: |
306 | restart: |
304 | ipl = interrupts_disable(); |
307 | ipl = interrupts_disable(); |
305 | spinlock_lock(&THREAD->lock); |
308 | spinlock_lock(&THREAD->lock); |
306 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
309 | if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
307 | spinlock_unlock(&THREAD->lock); |
310 | spinlock_unlock(&THREAD->lock); |
308 | interrupts_restore(ipl); |
311 | interrupts_restore(ipl); |
309 | goto restart; |
312 | goto restart; |
310 | } |
313 | } |
311 | THREAD->state = Exiting; |
314 | THREAD->state = Exiting; |
312 | spinlock_unlock(&THREAD->lock); |
315 | spinlock_unlock(&THREAD->lock); |
313 | scheduler(); |
316 | scheduler(); |
314 | } |
317 | } |
315 | 318 | ||
316 | 319 | ||
317 | /** Thread sleep |
320 | /** Thread sleep |
318 | * |
321 | * |
319 | * Suspend execution of the current thread. |
322 | * Suspend execution of the current thread. |
320 | * |
323 | * |
321 | * @param sec Number of seconds to sleep. |
324 | * @param sec Number of seconds to sleep. |
322 | * |
325 | * |
323 | */ |
326 | */ |
324 | void thread_sleep(__u32 sec) |
327 | void thread_sleep(__u32 sec) |
325 | { |
328 | { |
326 | thread_usleep(sec*1000000); |
329 | thread_usleep(sec*1000000); |
327 | } |
330 | } |
328 | 331 | ||
329 | 332 | ||
330 | /** Thread usleep |
333 | /** Thread usleep |
331 | * |
334 | * |
332 | * Suspend execution of the current thread. |
335 | * Suspend execution of the current thread. |
333 | * |
336 | * |
334 | * @param usec Number of microseconds to sleep. |
337 | * @param usec Number of microseconds to sleep. |
335 | * |
338 | * |
336 | */ |
339 | */ |
337 | void thread_usleep(__u32 usec) |
340 | void thread_usleep(__u32 usec) |
338 | { |
341 | { |
339 | waitq_t wq; |
342 | waitq_t wq; |
340 | 343 | ||
341 | waitq_initialize(&wq); |
344 | waitq_initialize(&wq); |
342 | 345 | ||
343 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
346 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
344 | } |
347 | } |
345 | 348 | ||
346 | 349 | ||
347 | /** Register thread out-of-context invocation |
350 | /** Register thread out-of-context invocation |
348 | * |
351 | * |
349 | * Register a function and its argument to be executed |
352 | * Register a function and its argument to be executed |
350 | * on next context switch to the current thread. |
353 | * on next context switch to the current thread. |
351 | * |
354 | * |
352 | * @param call_me Out-of-context function. |
355 | * @param call_me Out-of-context function. |
353 | * @param call_me_with Out-of-context function argument. |
356 | * @param call_me_with Out-of-context function argument. |
354 | * |
357 | * |
355 | */ |
358 | */ |
356 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
359 | void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
357 | { |
360 | { |
358 | ipl_t ipl; |
361 | ipl_t ipl; |
359 | 362 | ||
360 | ipl = interrupts_disable(); |
363 | ipl = interrupts_disable(); |
361 | spinlock_lock(&THREAD->lock); |
364 | spinlock_lock(&THREAD->lock); |
362 | THREAD->call_me = call_me; |
365 | THREAD->call_me = call_me; |
363 | THREAD->call_me_with = call_me_with; |
366 | THREAD->call_me_with = call_me_with; |
364 | spinlock_unlock(&THREAD->lock); |
367 | spinlock_unlock(&THREAD->lock); |
365 | interrupts_restore(ipl); |
368 | interrupts_restore(ipl); |
366 | } |
369 | } |
367 | 370 | ||
368 | /** Print list of threads debug info */ |
371 | /** Print list of threads debug info */ |
369 | void thread_print_list(void) |
372 | void thread_print_list(void) |
370 | { |
373 | { |
371 | link_t *cur; |
374 | link_t *cur; |
372 | thread_t *t; |
375 | thread_t *t; |
373 | ipl_t ipl; |
376 | ipl_t ipl; |
374 | 377 | ||
375 | /* Messing with thread structures, avoid deadlock */ |
378 | /* Messing with thread structures, avoid deadlock */ |
376 | ipl = interrupts_disable(); |
379 | ipl = interrupts_disable(); |
377 | spinlock_lock(&threads_lock); |
380 | spinlock_lock(&threads_lock); |
378 | 381 | ||
379 | for (cur=threads_head.next; cur!=&threads_head; cur=cur->next) { |
382 | for (cur=threads_head.next; cur!=&threads_head; cur=cur->next) { |
380 | t = list_get_instance(cur, thread_t, threads_link); |
383 | t = list_get_instance(cur, thread_t, threads_link); |
381 | printf("Thr: %d(%s) ", t->tid, thread_states[t->state]); |
384 | printf("Thr: %d(%s) ", t->tid, thread_states[t->state]); |
382 | if (t->cpu) |
385 | if (t->cpu) |
383 | printf("cpu%d ", t->cpu->id); |
386 | printf("cpu%d ", t->cpu->id); |
384 | 387 | ||
385 | printf("\n"); |
388 | printf("\n"); |
386 | } |
389 | } |
387 | 390 | ||
388 | spinlock_unlock(&threads_lock); |
391 | spinlock_unlock(&threads_lock); |
389 | interrupts_enable(); |
392 | interrupts_enable(); |
390 | } |
393 | } |
391 | 394 |