Rev 167 | Rev 184 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <proc/scheduler.h> |
||
30 | #include <proc/thread.h> |
||
31 | #include <proc/task.h> |
||
32 | #include <cpu.h> |
||
33 | #include <mm/vm.h> |
||
34 | #include <config.h> |
||
35 | #include <context.h> |
||
36 | #include <func.h> |
||
37 | #include <arch.h> |
||
38 | #include <arch/asm.h> |
||
39 | #include <list.h> |
||
68 | decky | 40 | #include <panic.h> |
1 | jermar | 41 | #include <typedefs.h> |
42 | #include <mm/page.h> |
||
43 | #include <synch/spinlock.h> |
||
76 | jermar | 44 | #include <arch/faddr.h> |
111 | palkovsky | 45 | #include <arch/atomic.h> |
1 | jermar | 46 | |
47 | volatile int nrdy; |
||
48 | |||
107 | decky | 49 | |
118 | jermar | 50 | /** Take actions before new thread runs |
107 | decky | 51 | * |
118 | jermar | 52 | * Perform actions that need to be |
53 | * taken before the newly selected |
||
54 | * tread is passed control. |
||
107 | decky | 55 | * |
56 | */ |
||
52 | vana | 57 | void before_thread_runs(void) |
58 | { |
||
59 | before_thread_runs_arch(); |
||
57 | vana | 60 | fpu_context_restore(&(THREAD->saved_fpu_context)); |
52 | vana | 61 | } |
62 | |||
63 | |||
107 | decky | 64 | /** Initialize scheduler |
65 | * |
||
66 | * Initialize kernel scheduler. |
||
67 | * |
||
68 | */ |
||
1 | jermar | 69 | void scheduler_init(void) |
70 | { |
||
71 | } |
||
72 | |||
107 | decky | 73 | |
74 | /** Get thread to be scheduled |
||
75 | * |
||
76 | * Get the optimal thread to be scheduled |
||
109 | jermar | 77 | * according to thread accounting and scheduler |
107 | decky | 78 | * policy. |
79 | * |
||
80 | * @return Thread to be scheduled. |
||
81 | * |
||
82 | */ |
||
1 | jermar | 83 | struct thread *find_best_thread(void) |
84 | { |
||
85 | thread_t *t; |
||
86 | runq_t *r; |
||
87 | int i, n; |
||
88 | |||
89 | loop: |
||
90 | cpu_priority_high(); |
||
91 | |||
15 | jermar | 92 | spinlock_lock(&CPU->lock); |
93 | n = CPU->nrdy; |
||
94 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 95 | |
96 | cpu_priority_low(); |
||
97 | |||
98 | if (n == 0) { |
||
99 | #ifdef __SMP__ |
||
100 | /* |
||
101 | * If the load balancing thread is not running, wake it up and |
||
102 | * set CPU-private flag that the kcpulb has been started. |
||
103 | */ |
||
15 | jermar | 104 | if (test_and_set(&CPU->kcpulbstarted) == 0) { |
125 | jermar | 105 | waitq_wakeup(&CPU->kcpulb_wq, 0); |
1 | jermar | 106 | goto loop; |
107 | } |
||
108 | #endif /* __SMP__ */ |
||
109 | |||
110 | /* |
||
111 | * For there was nothing to run, the CPU goes to sleep |
||
112 | * until a hardware interrupt or an IPI comes. |
||
113 | * This improves energy saving and hyperthreading. |
||
114 | * On the other hand, several hardware interrupts can be ignored. |
||
115 | */ |
||
116 | cpu_sleep(); |
||
117 | goto loop; |
||
118 | } |
||
119 | |||
120 | cpu_priority_high(); |
||
114 | jermar | 121 | |
122 | i = 0; |
||
123 | retry: |
||
124 | for (; i<RQ_COUNT; i++) { |
||
15 | jermar | 125 | r = &CPU->rq[i]; |
1 | jermar | 126 | spinlock_lock(&r->lock); |
127 | if (r->n == 0) { |
||
128 | /* |
||
129 | * If this queue is empty, try a lower-priority queue. |
||
130 | */ |
||
131 | spinlock_unlock(&r->lock); |
||
132 | continue; |
||
133 | } |
||
134 | |||
115 | jermar | 135 | /* avoid deadlock with relink_rq() */ |
114 | jermar | 136 | if (!spinlock_trylock(&CPU->lock)) { |
137 | /* |
||
138 | * Unlock r and try again. |
||
139 | */ |
||
140 | spinlock_unlock(&r->lock); |
||
141 | goto retry; |
||
142 | } |
||
15 | jermar | 143 | CPU->nrdy--; |
144 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 145 | |
114 | jermar | 146 | atomic_dec(&nrdy); |
1 | jermar | 147 | r->n--; |
148 | |||
149 | /* |
||
150 | * Take the first thread from the queue. |
||
151 | */ |
||
152 | t = list_get_instance(r->rq_head.next, thread_t, rq_link); |
||
153 | list_remove(&t->rq_link); |
||
154 | |||
155 | spinlock_unlock(&r->lock); |
||
156 | |||
157 | spinlock_lock(&t->lock); |
||
15 | jermar | 158 | t->cpu = CPU; |
1 | jermar | 159 | |
160 | t->ticks = us2ticks((i+1)*10000); |
||
161 | t->pri = i; /* eventually correct rq index */ |
||
162 | |||
163 | /* |
||
164 | * Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
||
165 | */ |
||
166 | t->flags &= ~X_STOLEN; |
||
167 | spinlock_unlock(&t->lock); |
||
168 | |||
169 | return t; |
||
170 | } |
||
171 | goto loop; |
||
172 | |||
173 | } |
||
174 | |||
107 | decky | 175 | |
176 | /** Prevent rq starvation |
||
177 | * |
||
178 | * Prevent low priority threads from starving in rq's. |
||
179 | * |
||
180 | * When the function decides to relink rq's, it reconnects |
||
181 | * respective pointers so that in result threads with 'pri' |
||
182 | * greater or equal 'start' are moved to a higher-priority queue. |
||
183 | * |
||
184 | * @param start Threshold priority. |
||
185 | * |
||
1 | jermar | 186 | */ |
187 | void relink_rq(int start) |
||
188 | { |
||
189 | link_t head; |
||
190 | runq_t *r; |
||
191 | int i, n; |
||
192 | |||
193 | list_initialize(&head); |
||
15 | jermar | 194 | spinlock_lock(&CPU->lock); |
195 | if (CPU->needs_relink > NEEDS_RELINK_MAX) { |
||
1 | jermar | 196 | for (i = start; i<RQ_COUNT-1; i++) { |
197 | /* remember and empty rq[i + 1] */ |
||
15 | jermar | 198 | r = &CPU->rq[i + 1]; |
1 | jermar | 199 | spinlock_lock(&r->lock); |
200 | list_concat(&head, &r->rq_head); |
||
201 | n = r->n; |
||
202 | r->n = 0; |
||
203 | spinlock_unlock(&r->lock); |
||
204 | |||
205 | /* append rq[i + 1] to rq[i] */ |
||
15 | jermar | 206 | r = &CPU->rq[i]; |
1 | jermar | 207 | spinlock_lock(&r->lock); |
208 | list_concat(&r->rq_head, &head); |
||
209 | r->n += n; |
||
210 | spinlock_unlock(&r->lock); |
||
211 | } |
||
15 | jermar | 212 | CPU->needs_relink = 0; |
1 | jermar | 213 | } |
15 | jermar | 214 | spinlock_unlock(&CPU->lock); |
1 | jermar | 215 | |
216 | } |
||
217 | |||
107 | decky | 218 | |
219 | /** The scheduler |
||
220 | * |
||
221 | * The thread scheduling procedure. |
||
222 | * |
||
1 | jermar | 223 | */ |
224 | void scheduler(void) |
||
225 | { |
||
226 | volatile pri_t pri; |
||
227 | |||
228 | pri = cpu_priority_high(); |
||
229 | |||
230 | if (haltstate) |
||
231 | halt(); |
||
232 | |||
15 | jermar | 233 | if (THREAD) { |
234 | spinlock_lock(&THREAD->lock); |
||
57 | vana | 235 | fpu_context_save(&(THREAD->saved_fpu_context)); |
15 | jermar | 236 | if (!context_save(&THREAD->saved_context)) { |
1 | jermar | 237 | /* |
238 | * This is the place where threads leave scheduler(); |
||
239 | */ |
||
22 | jermar | 240 | before_thread_runs(); |
125 | jermar | 241 | spinlock_unlock(&THREAD->lock); |
15 | jermar | 242 | cpu_priority_restore(THREAD->saved_context.pri); |
1 | jermar | 243 | return; |
244 | } |
||
170 | jermar | 245 | |
246 | /* |
||
247 | * CPU priority of preempted thread is recorded here |
||
248 | * to facilitate scheduler() invocations from |
||
249 | * cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
||
250 | */ |
||
15 | jermar | 251 | THREAD->saved_context.pri = pri; |
1 | jermar | 252 | } |
253 | |||
254 | /* |
||
255 | * We may not keep the old stack. |
||
256 | * Reason: If we kept the old stack and got blocked, for instance, in |
||
257 | * find_best_thread(), the old thread could get rescheduled by another |
||
258 | * CPU and overwrite the part of its own stack that was also used by |
||
259 | * the scheduler on this CPU. |
||
260 | * |
||
261 | * Moreover, we have to bypass the compiler-generated POP sequence |
||
262 | * which is fooled by SP being set to the very top of the stack. |
||
263 | * Therefore the scheduler() function continues in |
||
264 | * scheduler_separated_stack(). |
||
265 | */ |
||
15 | jermar | 266 | context_save(&CPU->saved_context); |
97 | jermar | 267 | context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE); |
15 | jermar | 268 | context_restore(&CPU->saved_context); |
1 | jermar | 269 | /* not reached */ |
270 | } |
||
271 | |||
107 | decky | 272 | |
273 | /** Scheduler stack switch wrapper |
||
274 | * |
||
275 | * Second part of the scheduler() function |
||
276 | * using new stack. Handling the actual context |
||
277 | * switch to a new thread. |
||
278 | * |
||
279 | */ |
||
1 | jermar | 280 | void scheduler_separated_stack(void) |
281 | { |
||
282 | int priority; |
||
283 | |||
15 | jermar | 284 | if (THREAD) { |
285 | switch (THREAD->state) { |
||
1 | jermar | 286 | case Running: |
125 | jermar | 287 | THREAD->state = Ready; |
288 | spinlock_unlock(&THREAD->lock); |
||
289 | thread_ready(THREAD); |
||
290 | break; |
||
1 | jermar | 291 | |
292 | case Exiting: |
||
125 | jermar | 293 | frame_free((__address) THREAD->kstack); |
294 | if (THREAD->ustack) { |
||
295 | frame_free((__address) THREAD->ustack); |
||
296 | } |
||
1 | jermar | 297 | |
125 | jermar | 298 | /* |
299 | * Detach from the containing task. |
||
300 | */ |
||
301 | spinlock_lock(&TASK->lock); |
||
302 | list_remove(&THREAD->th_link); |
||
303 | spinlock_unlock(&TASK->lock); |
||
73 | vana | 304 | |
125 | jermar | 305 | spinlock_unlock(&THREAD->lock); |
306 | |||
307 | spinlock_lock(&threads_lock); |
||
308 | list_remove(&THREAD->threads_link); |
||
309 | spinlock_unlock(&threads_lock); |
||
73 | vana | 310 | |
125 | jermar | 311 | spinlock_lock(&CPU->lock); |
312 | if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; |
||
313 | spinlock_unlock(&CPU->lock); |
||
314 | |||
315 | free(THREAD); |
||
316 | |||
317 | break; |
||
318 | |||
1 | jermar | 319 | case Sleeping: |
125 | jermar | 320 | /* |
321 | * Prefer the thread after it's woken up. |
||
322 | */ |
||
323 | THREAD->pri = -1; |
||
1 | jermar | 324 | |
125 | jermar | 325 | /* |
326 | * We need to release wq->lock which we locked in waitq_sleep(). |
||
327 | * Address of wq->lock is kept in THREAD->sleep_queue. |
||
328 | */ |
||
329 | spinlock_unlock(&THREAD->sleep_queue->lock); |
||
1 | jermar | 330 | |
125 | jermar | 331 | /* |
332 | * Check for possible requests for out-of-context invocation. |
||
333 | */ |
||
334 | if (THREAD->call_me) { |
||
335 | THREAD->call_me(THREAD->call_me_with); |
||
336 | THREAD->call_me = NULL; |
||
337 | THREAD->call_me_with = NULL; |
||
338 | } |
||
1 | jermar | 339 | |
125 | jermar | 340 | spinlock_unlock(&THREAD->lock); |
1 | jermar | 341 | |
125 | jermar | 342 | break; |
343 | |||
1 | jermar | 344 | default: |
125 | jermar | 345 | /* |
346 | * Entering state is unexpected. |
||
347 | */ |
||
348 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
||
349 | break; |
||
1 | jermar | 350 | } |
15 | jermar | 351 | THREAD = NULL; |
1 | jermar | 352 | } |
125 | jermar | 353 | |
15 | jermar | 354 | THREAD = find_best_thread(); |
1 | jermar | 355 | |
15 | jermar | 356 | spinlock_lock(&THREAD->lock); |
357 | priority = THREAD->pri; |
||
358 | spinlock_unlock(&THREAD->lock); |
||
1 | jermar | 359 | |
360 | relink_rq(priority); |
||
361 | |||
15 | jermar | 362 | spinlock_lock(&THREAD->lock); |
1 | jermar | 363 | |
364 | /* |
||
365 | * If both the old and the new task are the same, lots of work is avoided. |
||
366 | */ |
||
15 | jermar | 367 | if (TASK != THREAD->task) { |
1 | jermar | 368 | vm_t *m1 = NULL; |
369 | vm_t *m2; |
||
370 | |||
15 | jermar | 371 | if (TASK) { |
372 | spinlock_lock(&TASK->lock); |
||
373 | m1 = TASK->vm; |
||
374 | spinlock_unlock(&TASK->lock); |
||
1 | jermar | 375 | } |
376 | |||
15 | jermar | 377 | spinlock_lock(&THREAD->task->lock); |
378 | m2 = THREAD->task->vm; |
||
379 | spinlock_unlock(&THREAD->task->lock); |
||
1 | jermar | 380 | |
381 | /* |
||
382 | * Note that it is possible for two tasks to share one vm mapping. |
||
383 | */ |
||
384 | if (m1 != m2) { |
||
385 | /* |
||
386 | * Both tasks and vm mappings are different. |
||
387 | * Replace the old one with the new one. |
||
388 | */ |
||
389 | vm_install(m2); |
||
390 | } |
||
15 | jermar | 391 | TASK = THREAD->task; |
1 | jermar | 392 | } |
393 | |||
15 | jermar | 394 | THREAD->state = Running; |
1 | jermar | 395 | |
396 | #ifdef SCHEDULER_VERBOSE |
||
15 | jermar | 397 | printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
1 | jermar | 398 | #endif |
399 | |||
15 | jermar | 400 | context_restore(&THREAD->saved_context); |
1 | jermar | 401 | /* not reached */ |
402 | } |
||
403 | |||
107 | decky | 404 | |
1 | jermar | 405 | #ifdef __SMP__ |
107 | decky | 406 | /** Load balancing thread |
407 | * |
||
408 | * SMP load balancing thread, supervising thread supplies |
||
409 | * for the CPU it's wired to. |
||
410 | * |
||
411 | * @param arg Generic thread argument (unused). |
||
412 | * |
||
1 | jermar | 413 | */ |
414 | void kcpulb(void *arg) |
||
415 | { |
||
416 | thread_t *t; |
||
417 | int count, i, j, k = 0; |
||
418 | pri_t pri; |
||
419 | |||
420 | loop: |
||
421 | /* |
||
422 | * Sleep until there's some work to do. |
||
423 | */ |
||
15 | jermar | 424 | waitq_sleep(&CPU->kcpulb_wq); |
1 | jermar | 425 | |
426 | not_satisfied: |
||
427 | /* |
||
428 | * Calculate the number of threads that will be migrated/stolen from |
||
429 | * other CPU's. Note that situation can have changed between two |
||
430 | * passes. Each time get the most up to date counts. |
||
431 | */ |
||
432 | pri = cpu_priority_high(); |
||
15 | jermar | 433 | spinlock_lock(&CPU->lock); |
1 | jermar | 434 | count = nrdy / config.cpu_active; |
15 | jermar | 435 | count -= CPU->nrdy; |
436 | spinlock_unlock(&CPU->lock); |
||
1 | jermar | 437 | cpu_priority_restore(pri); |
438 | |||
439 | if (count <= 0) |
||
440 | goto satisfied; |
||
441 | |||
442 | /* |
||
443 | * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. |
||
444 | */ |
||
445 | for (j=RQ_COUNT-1; j >= 0; j--) { |
||
446 | for (i=0; i < config.cpu_active; i++) { |
||
447 | link_t *l; |
||
448 | runq_t *r; |
||
449 | cpu_t *cpu; |
||
450 | |||
451 | cpu = &cpus[(i + k) % config.cpu_active]; |
||
452 | |||
453 | /* |
||
454 | * Not interested in ourselves. |
||
455 | * Doesn't require interrupt disabling for kcpulb is X_WIRED. |
||
456 | */ |
||
15 | jermar | 457 | if (CPU == cpu) |
115 | jermar | 458 | continue; |
1 | jermar | 459 | |
460 | restart: pri = cpu_priority_high(); |
||
115 | jermar | 461 | r = &cpu->rq[j]; |
1 | jermar | 462 | spinlock_lock(&r->lock); |
463 | if (r->n == 0) { |
||
464 | spinlock_unlock(&r->lock); |
||
465 | cpu_priority_restore(pri); |
||
466 | continue; |
||
467 | } |
||
468 | |||
469 | t = NULL; |
||
470 | l = r->rq_head.prev; /* search rq from the back */ |
||
471 | while (l != &r->rq_head) { |
||
472 | t = list_get_instance(l, thread_t, rq_link); |
||
473 | /* |
||
125 | jermar | 474 | * We don't want to steal CPU-wired threads neither threads already stolen. |
1 | jermar | 475 | * The latter prevents threads from migrating between CPU's without ever being run. |
125 | jermar | 476 | * We don't want to steal threads whose FPU context is still in CPU. |
73 | vana | 477 | */ |
1 | jermar | 478 | spinlock_lock(&t->lock); |
73 | vana | 479 | if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
115 | jermar | 480 | |
1 | jermar | 481 | /* |
482 | * Remove t from r. |
||
483 | */ |
||
484 | |||
485 | spinlock_unlock(&t->lock); |
||
486 | |||
487 | /* |
||
488 | * Here we have to avoid deadlock with relink_rq(), |
||
489 | * because it locks cpu and r in a different order than we do. |
||
490 | */ |
||
491 | if (!spinlock_trylock(&cpu->lock)) { |
||
492 | /* Release all locks and try again. */ |
||
493 | spinlock_unlock(&r->lock); |
||
494 | cpu_priority_restore(pri); |
||
495 | goto restart; |
||
496 | } |
||
497 | cpu->nrdy--; |
||
498 | spinlock_unlock(&cpu->lock); |
||
499 | |||
111 | palkovsky | 500 | atomic_dec(&nrdy); |
1 | jermar | 501 | |
125 | jermar | 502 | r->n--; |
1 | jermar | 503 | list_remove(&t->rq_link); |
504 | |||
505 | break; |
||
506 | } |
||
507 | spinlock_unlock(&t->lock); |
||
508 | l = l->prev; |
||
509 | t = NULL; |
||
510 | } |
||
511 | spinlock_unlock(&r->lock); |
||
512 | |||
513 | if (t) { |
||
514 | /* |
||
515 | * Ready t on local CPU |
||
516 | */ |
||
517 | spinlock_lock(&t->lock); |
||
518 | #ifdef KCPULB_VERBOSE |
||
15 | jermar | 519 | printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); |
1 | jermar | 520 | #endif |
521 | t->flags |= X_STOLEN; |
||
522 | spinlock_unlock(&t->lock); |
||
523 | |||
524 | thread_ready(t); |
||
525 | |||
526 | cpu_priority_restore(pri); |
||
527 | |||
528 | if (--count == 0) |
||
529 | goto satisfied; |
||
530 | |||
531 | /* |
||
125 | jermar | 532 | * We are not satisfied yet, focus on another CPU next time. |
1 | jermar | 533 | */ |
534 | k++; |
||
535 | |||
536 | continue; |
||
537 | } |
||
538 | cpu_priority_restore(pri); |
||
539 | } |
||
540 | } |
||
541 | |||
15 | jermar | 542 | if (CPU->nrdy) { |
1 | jermar | 543 | /* |
544 | * Be a little bit light-weight and let migrated threads run. |
||
545 | */ |
||
546 | scheduler(); |
||
547 | } |
||
548 | else { |
||
549 | /* |
||
550 | * We failed to migrate a single thread. |
||
551 | * Something more sophisticated should be done. |
||
552 | */ |
||
553 | scheduler(); |
||
554 | } |
||
555 | |||
556 | goto not_satisfied; |
||
125 | jermar | 557 | |
1 | jermar | 558 | satisfied: |
559 | /* |
||
560 | * Tell find_best_thread() to wake us up later again. |
||
561 | */ |
||
15 | jermar | 562 | CPU->kcpulbstarted = 0; |
1 | jermar | 563 | goto loop; |
564 | } |
||
565 | |||
566 | #endif /* __SMP__ */ |