Rev 827 | Rev 898 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 827 | Rev 897 | ||
---|---|---|---|
Line 47... | Line 47... | ||
47 | #include <print.h> |
47 | #include <print.h> |
48 | #include <debug.h> |
48 | #include <debug.h> |
49 | 49 | ||
50 | atomic_t nrdy; |
50 | atomic_t nrdy; |
51 | 51 | ||
52 | /** Take actions before new thread runs |
52 | /** Take actions before new thread runs. |
53 | * |
53 | * |
54 | * Perform actions that need to be |
54 | * Perform actions that need to be |
55 | * taken before the newly selected |
55 | * taken before the newly selected |
56 | * tread is passed control. |
56 | * tread is passed control. |
57 | * |
57 | * |
Line 75... | Line 75... | ||
75 | THREAD->fpu_context_exists=1; |
75 | THREAD->fpu_context_exists=1; |
76 | } |
76 | } |
77 | #endif |
77 | #endif |
78 | } |
78 | } |
79 | 79 | ||
- | 80 | /** Take actions after old thread ran. |
|
- | 81 | * |
|
- | 82 | * Perform actions that need to be |
|
- | 83 | * taken after the running thread |
|
- | 84 | * was preempted by the scheduler. |
|
- | 85 | * |
|
- | 86 | * THREAD->lock is locked on entry |
|
- | 87 | * |
|
- | 88 | */ |
|
- | 89 | void after_thread_ran(void) |
|
- | 90 | { |
|
- | 91 | after_thread_ran_arch(); |
|
- | 92 | } |
|
- | 93 | ||
80 | #ifdef CONFIG_FPU_LAZY |
94 | #ifdef CONFIG_FPU_LAZY |
81 | void scheduler_fpu_lazy_request(void) |
95 | void scheduler_fpu_lazy_request(void) |
82 | { |
96 | { |
83 | fpu_enable(); |
97 | fpu_enable(); |
84 | spinlock_lock(&CPU->lock); |
98 | spinlock_lock(&CPU->lock); |
Line 255... | Line 269... | ||
255 | int priority; |
269 | int priority; |
256 | 270 | ||
257 | ASSERT(CPU != NULL); |
271 | ASSERT(CPU != NULL); |
258 | 272 | ||
259 | if (THREAD) { |
273 | if (THREAD) { |
- | 274 | /* must be run after switch to scheduler stack */ |
|
- | 275 | after_thread_ran(); |
|
- | 276 | ||
260 | switch (THREAD->state) { |
277 | switch (THREAD->state) { |
261 | case Running: |
278 | case Running: |
262 | THREAD->state = Ready; |
279 | THREAD->state = Ready; |
263 | spinlock_unlock(&THREAD->lock); |
280 | spinlock_unlock(&THREAD->lock); |
264 | thread_ready(THREAD); |
281 | thread_ready(THREAD); |
Line 298... | Line 315... | ||
298 | * Entering state is unexpected. |
315 | * Entering state is unexpected. |
299 | */ |
316 | */ |
300 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
317 | panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
301 | break; |
318 | break; |
302 | } |
319 | } |
- | 320 | ||
303 | THREAD = NULL; |
321 | THREAD = NULL; |
304 | } |
322 | } |
305 | 323 | ||
306 | 324 | ||
307 | THREAD = find_best_thread(); |
325 | THREAD = find_best_thread(); |
Line 349... | Line 367... | ||
349 | #ifdef SCHEDULER_VERBOSE |
367 | #ifdef SCHEDULER_VERBOSE |
350 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
368 | printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
351 | #endif |
369 | #endif |
352 | 370 | ||
353 | /* |
371 | /* |
- | 372 | * Some architectures provide late kernel PA2KA(identity) |
|
- | 373 | * mapping in a page fault handler. However, the page fault |
|
- | 374 | * handler uses the kernel stack of the running thread and |
|
- | 375 | * therefore cannot be used to map it. The kernel stack, if |
|
- | 376 | * necessary, is to be mapped in before_thread_runs(). This |
|
- | 377 | * function must be executed before the switch to the new stack. |
|
- | 378 | */ |
|
- | 379 | before_thread_runs(); |
|
- | 380 | ||
- | 381 | /* |
|
354 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
382 | * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
355 | */ |
383 | */ |
356 | the_copy(THE, (the_t *) THREAD->kstack); |
384 | the_copy(THE, (the_t *) THREAD->kstack); |
357 | 385 | ||
358 | context_restore(&THREAD->saved_context); |
386 | context_restore(&THREAD->saved_context); |
Line 385... | Line 413... | ||
385 | #endif |
413 | #endif |
386 | if (!context_save(&THREAD->saved_context)) { |
414 | if (!context_save(&THREAD->saved_context)) { |
387 | /* |
415 | /* |
388 | * This is the place where threads leave scheduler(); |
416 | * This is the place where threads leave scheduler(); |
389 | */ |
417 | */ |
390 | before_thread_runs(); |
- | |
391 | spinlock_unlock(&THREAD->lock); |
418 | spinlock_unlock(&THREAD->lock); |
392 | interrupts_restore(THREAD->saved_context.ipl); |
419 | interrupts_restore(THREAD->saved_context.ipl); |
393 | return; |
420 | return; |
394 | } |
421 | } |
395 | 422 |