Rev 118 | Rev 167 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 118 | Rev 125 | ||
---|---|---|---|
Line 100... | Line 100... | ||
100 | /* |
100 | /* |
101 | * If the load balancing thread is not running, wake it up and |
101 | * If the load balancing thread is not running, wake it up and |
102 | * set CPU-private flag that the kcpulb has been started. |
102 | * set CPU-private flag that the kcpulb has been started. |
103 | */ |
103 | */ |
104 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
104 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
105 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
105 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
106 | goto loop; |
106 | goto loop; |
107 | } |
107 | } |
108 | #endif /* __SMP__ */ |
108 | #endif /* __SMP__ */ |
109 | 109 | ||
110 | /* |
110 | /* |
Line 236... | Line 236... | ||
236 | if (!context_save(&THREAD->saved_context)) { |
236 | if (!context_save(&THREAD->saved_context)) { |
237 | /* |
237 | /* |
238 | * This is the place where threads leave scheduler(); |
238 | * This is the place where threads leave scheduler(); |
239 | */ |
239 | */ |
240 | before_thread_runs(); |
240 | before_thread_runs(); |
241 | spinlock_unlock(&THREAD->lock); |
241 | spinlock_unlock(&THREAD->lock); |
242 | cpu_priority_restore(THREAD->saved_context.pri); |
242 | cpu_priority_restore(THREAD->saved_context.pri); |
243 | return; |
243 | return; |
244 | } |
244 | } |
245 | THREAD->saved_context.pri = pri; |
245 | THREAD->saved_context.pri = pri; |
246 | } |
246 | } |
Line 276... | Line 276... | ||
276 | int priority; |
276 | int priority; |
277 | 277 | ||
278 | if (THREAD) { |
278 | if (THREAD) { |
279 | switch (THREAD->state) { |
279 | switch (THREAD->state) { |
280 | case Running: |
280 | case Running: |
281 | THREAD->state = Ready; |
281 | THREAD->state = Ready; |
282 | spinlock_unlock(&THREAD->lock); |
282 | spinlock_unlock(&THREAD->lock); |
283 | thread_ready(THREAD); |
283 | thread_ready(THREAD); |
284 | break; |
284 | break; |
285 | 285 | ||
286 | case Exiting: |
286 | case Exiting: |
287 | frame_free((__address) THREAD->kstack); |
287 | frame_free((__address) THREAD->kstack); |
288 | if (THREAD->ustack) { |
288 | if (THREAD->ustack) { |
289 | frame_free((__address) THREAD->ustack); |
289 | frame_free((__address) THREAD->ustack); |
290 | } |
290 | } |
291 | 291 | ||
292 | /* |
292 | /* |
293 | * Detach from the containing task. |
293 | * Detach from the containing task. |
294 | */ |
294 | */ |
295 | spinlock_lock(&TASK->lock); |
295 | spinlock_lock(&TASK->lock); |
296 | list_remove(&THREAD->th_link); |
296 | list_remove(&THREAD->th_link); |
297 | spinlock_unlock(&TASK->lock); |
297 | spinlock_unlock(&TASK->lock); |
298 | 298 | ||
299 | spinlock_unlock(&THREAD->lock); |
299 | spinlock_unlock(&THREAD->lock); |
300 | 300 | ||
301 | spinlock_lock(&threads_lock); |
301 | spinlock_lock(&threads_lock); |
302 | list_remove(&THREAD->threads_link); |
302 | list_remove(&THREAD->threads_link); |
303 | spinlock_unlock(&threads_lock); |
303 | spinlock_unlock(&threads_lock); |
304 | 304 | ||
305 | spinlock_lock(&CPU->lock); |
305 | spinlock_lock(&CPU->lock); |
306 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
306 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
307 | spinlock_unlock(&CPU->lock); |
307 | spinlock_unlock(&CPU->lock); |
308 | 308 | ||
309 | - | ||
310 | free(THREAD); |
309 | free(THREAD); |
311 | 310 | ||
312 | break; |
311 | break; |
313 | 312 | ||
314 | case Sleeping: |
313 | case Sleeping: |
315 | /* |
314 | /* |
316 | * Prefer the thread after it's woken up. |
315 | * Prefer the thread after it's woken up. |
317 | */ |
316 | */ |
318 | THREAD->pri = -1; |
317 | THREAD->pri = -1; |
319 | 318 | ||
320 | /* |
319 | /* |
321 | * We need to release wq->lock which we locked in waitq_sleep(). |
320 | * We need to release wq->lock which we locked in waitq_sleep(). |
322 | * Address of wq->lock is kept in THREAD->sleep_queue. |
321 | * Address of wq->lock is kept in THREAD->sleep_queue. |
323 | */ |
322 | */ |
324 | spinlock_unlock(&THREAD->sleep_queue->lock); |
323 | spinlock_unlock(&THREAD->sleep_queue->lock); |
325 | 324 | ||
326 | /* |
325 | /* |
327 | * Check for possible requests for out-of-context invocation. |
326 | * Check for possible requests for out-of-context invocation. |
328 | */ |
327 | */ |
329 | if (THREAD->call_me) { |
328 | if (THREAD->call_me) { |
330 | THREAD->call_me(THREAD->call_me_with); |
329 | THREAD->call_me(THREAD->call_me_with); |
331 | THREAD->call_me = NULL; |
330 | THREAD->call_me = NULL; |
332 | THREAD->call_me_with = NULL; |
331 | THREAD->call_me_with = NULL; |
333 | } |
332 | } |
334 | 333 | ||
335 | spinlock_unlock(&THREAD->lock); |
334 | spinlock_unlock(&THREAD->lock); |
336 | 335 | ||
337 | break; |
336 | break; |
338 | 337 | ||
339 | default: |
338 | default: |
340 | /* |
339 | /* |
341 | * Entering state is unexpected. |
340 | * Entering state is unexpected. |
342 | */ |
341 | */ |
343 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
342 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
344 | break; |
343 | break; |
345 | } |
344 | } |
346 | THREAD = NULL; |
345 | THREAD = NULL; |
347 | } |
346 | } |
348 | 347 | ||
349 | THREAD = find_best_thread(); |
348 | THREAD = find_best_thread(); |
350 | 349 | ||
351 | spinlock_lock(&THREAD->lock); |
350 | spinlock_lock(&THREAD->lock); |
352 | priority = THREAD->pri; |
351 | priority = THREAD->pri; |
353 | spinlock_unlock(&THREAD->lock); |
352 | spinlock_unlock(&THREAD->lock); |
Line 467... | Line 466... | ||
467 | t = NULL; |
466 | t = NULL; |
468 | l = r->rq_head.prev; /* search rq from the back */ |
467 | l = r->rq_head.prev; /* search rq from the back */ |
469 | while (l != &r->rq_head) { |
468 | while (l != &r->rq_head) { |
470 | t = list_get_instance(l, thread_t, rq_link); |
469 | t = list_get_instance(l, thread_t, rq_link); |
471 | /* |
470 | /* |
472 | * We don't want to steal CPU-wired threads neither threads already stolen. |
471 | * We don't want to steal CPU-wired threads neither threads already stolen. |
473 | * The latter prevents threads from migrating between CPU's without ever being run. |
472 | * The latter prevents threads from migrating between CPU's without ever being run. |
474 | * We don't want to steal threads whose FPU context is still in CPU. |
473 | * We don't want to steal threads whose FPU context is still in CPU. |
475 | */ |
474 | */ |
476 | spinlock_lock(&t->lock); |
475 | spinlock_lock(&t->lock); |
477 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
476 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
478 | 477 | ||
479 | /* |
478 | /* |
Line 495... | Line 494... | ||
495 | cpu->nrdy--; |
494 | cpu->nrdy--; |
496 | spinlock_unlock(&cpu->lock); |
495 | spinlock_unlock(&cpu->lock); |
497 | 496 | ||
498 | atomic_dec(&nrdy); |
497 | atomic_dec(&nrdy); |
499 | 498 | ||
500 | r->n--; |
499 | r->n--; |
501 | list_remove(&t->rq_link); |
500 | list_remove(&t->rq_link); |
502 | 501 | ||
503 | break; |
502 | break; |
504 | } |
503 | } |
505 | spinlock_unlock(&t->lock); |
504 | spinlock_unlock(&t->lock); |
Line 525... | Line 524... | ||
525 | 524 | ||
526 | if (--count == 0) |
525 | if (--count == 0) |
527 | goto satisfied; |
526 | goto satisfied; |
528 | 527 | ||
529 | /* |
528 | /* |
530 | * We are not satisfied yet, focus on another CPU next time. |
529 | * We are not satisfied yet, focus on another CPU next time. |
531 | */ |
530 | */ |
532 | k++; |
531 | k++; |
533 | 532 | ||
534 | continue; |
533 | continue; |
535 | } |
534 | } |
Line 550... | Line 549... | ||
550 | */ |
549 | */ |
551 | scheduler(); |
550 | scheduler(); |
552 | } |
551 | } |
553 | 552 | ||
554 | goto not_satisfied; |
553 | goto not_satisfied; |
555 | 554 | ||
556 | satisfied: |
555 | satisfied: |
557 | /* |
556 | /* |
558 | * Tell find_best_thread() to wake us up later again. |
557 | * Tell find_best_thread() to wake us up later again. |
559 | */ |
558 | */ |
560 | CPU->kcpulbstarted = 0; |
559 | CPU->kcpulbstarted = 0; |