Rev 114 | Rev 118 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 114 | Rev 115 | ||
---|---|---|---|
Line 129... | Line 129... | ||
129 | */ |
129 | */ |
130 | spinlock_unlock(&r->lock); |
130 | spinlock_unlock(&r->lock); |
131 | continue; |
131 | continue; |
132 | } |
132 | } |
133 | 133 | ||
134 | /* avoid deadlock with relink_rq */ |
134 | /* avoid deadlock with relink_rq() */ |
135 | if (!spinlock_trylock(&CPU->lock)) { |
135 | if (!spinlock_trylock(&CPU->lock)) { |
136 | /* |
136 | /* |
137 | * Unlock r and try again. |
137 | * Unlock r and try again. |
138 | */ |
138 | */ |
139 | spinlock_unlock(&r->lock); |
139 | spinlock_unlock(&r->lock); |
Line 444... | Line 444... | ||
444 | link_t *l; |
444 | link_t *l; |
445 | runq_t *r; |
445 | runq_t *r; |
446 | cpu_t *cpu; |
446 | cpu_t *cpu; |
447 | 447 | ||
448 | cpu = &cpus[(i + k) % config.cpu_active]; |
448 | cpu = &cpus[(i + k) % config.cpu_active]; |
449 | r = &cpu->rq[j]; |
- | |
450 | 449 | ||
451 | /* |
450 | /* |
452 | * Not interested in ourselves. |
451 | * Not interested in ourselves. |
453 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
452 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
454 | */ |
453 | */ |
455 | if (CPU == cpu) |
454 | if (CPU == cpu) |
456 | continue; |
455 | continue; |
457 | 456 | ||
458 | restart: pri = cpu_priority_high(); |
457 | restart: pri = cpu_priority_high(); |
- | 458 | r = &cpu->rq[j]; |
|
459 | spinlock_lock(&r->lock); |
459 | spinlock_lock(&r->lock); |
460 | if (r->n == 0) { |
460 | if (r->n == 0) { |
461 | spinlock_unlock(&r->lock); |
461 | spinlock_unlock(&r->lock); |
462 | cpu_priority_restore(pri); |
462 | cpu_priority_restore(pri); |
463 | continue; |
463 | continue; |
Line 468... | Line 468... | ||
468 | while (l != &r->rq_head) { |
468 | while (l != &r->rq_head) { |
469 | t = list_get_instance(l, thread_t, rq_link); |
469 | t = list_get_instance(l, thread_t, rq_link); |
470 | /* |
470 | /* |
471 | * We don't want to steal CPU-wired threads neither threads already stolen. |
471 | * We don't want to steal CPU-wired threads neither threads already stolen. |
472 | * The latter prevents threads from migrating between CPU's without ever being run. |
472 | * The latter prevents threads from migrating between CPU's without ever being run. |
473 | * We don't want to steal threads whose FPU context is still in CPU |
473 | * We don't want to steal threads whose FPU context is still in CPU. |
474 | */ |
474 | */ |
475 | spinlock_lock(&t->lock); |
475 | spinlock_lock(&t->lock); |
476 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
476 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
- | 477 | ||
477 | /* |
478 | /* |
478 | * Remove t from r. |
479 | * Remove t from r. |
479 | */ |
480 | */ |
480 | 481 | ||
481 | spinlock_unlock(&t->lock); |
482 | spinlock_unlock(&t->lock); |