Rev 1787 | Rev 1856 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 1787 | Rev 1855 | ||
|---|---|---|---|
| Line 33... | Line 33... | ||
| 33 | */ |
33 | */ |
| 34 | 34 | ||
| 35 | #include <proc/scheduler.h> |
35 | #include <proc/scheduler.h> |
| 36 | #include <proc/thread.h> |
36 | #include <proc/thread.h> |
| 37 | #include <arch.h> |
37 | #include <arch.h> |
| - | 38 | #include <arch/asm.h> |
|
| 38 | #include <arch/mm/tlb.h> |
39 | #include <arch/mm/tlb.h> |
| 39 | #include <arch/mm/page.h> |
40 | #include <arch/mm/page.h> |
| 40 | #include <config.h> |
41 | #include <config.h> |
| 41 | #include <align.h> |
42 | #include <align.h> |
| - | 43 | #include <macros.h> |
|
| 42 | 44 | ||
| 43 | /** Perform sparc64 specific tasks needed before the new task is run. */ |
45 | /** Perform sparc64 specific tasks needed before the new task is run. */ |
| 44 | void before_task_runs_arch(void) |
46 | void before_task_runs_arch(void) |
| 45 | { |
47 | { |
| 46 | } |
48 | } |
| 47 | 49 | ||
| - | 50 | /** Perform sparc64 specific steps before scheduling a thread. |
|
| - | 51 | * |
|
| 48 | /** Ensure that thread's kernel stack is locked in TLB. */ |
52 | * Ensure that thread's kernel stack, as well as userspace window |
| - | 53 | * buffer for userspace threads, are locked in DTLB. |
|
| - | 54 | */ |
|
| 49 | void before_thread_runs_arch(void) |
55 | void before_thread_runs_arch(void) |
| 50 | { |
56 | { |
| 51 | uintptr_t base; |
57 | uintptr_t base; |
| 52 | 58 | ||
| 53 | base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
59 | base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
| 54 | 60 | ||
| 55 | if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
61 | if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) { |
| 56 | /* |
62 | /* |
| 57 | * Kernel stack of this thread is not locked in DTLB. |
63 | * Kernel stack of this thread is not locked in DTLB. |
| 58 | * First, make sure it is not mapped already. |
64 | * First, make sure it is not mapped already. |
| 59 | * If not, create a locked mapping for it. |
65 | * If not, create a locked mapping for it. |
| 60 | */ |
66 | */ |
| 61 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
67 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
| 62 | dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
68 | dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
| - | 69 | } |
|
| - | 70 | ||
| - | 71 | if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
|
| - | 72 | /* |
|
| - | 73 | * If this thread executes also in userspace, we have to lock |
|
| - | 74 | * its userspace window buffer into DTLB. |
|
| - | 75 | */ |
|
| - | 76 | ASSERT(THREAD->arch.uspace_window_buffer); |
|
| - | 77 | uintptr_t uw_buf = (uintptr_t) THREAD->arch.uspace_window_buffer; |
|
| - | 78 | if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) { |
|
| - | 79 | /* |
|
| - | 80 | * The buffer is not covered by the 4M locked kernel DTLB entry. |
|
| - | 81 | */ |
|
| - | 82 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) uw_buf); |
|
| - | 83 | dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true); |
|
| - | 84 | } |
|
| 63 | } |
85 | } |
| 64 | } |
86 | } |
| 65 | 87 | ||
| 66 | /** Unlock thread's stack from TLB, if necessary. */ |
88 | /** Perform sparc64 specific steps before a thread stops running. |
| - | 89 | * |
|
| - | 90 | * Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack |
|
| - | 91 | * and userspace window buffer). |
|
| - | 92 | */ |
|
| 67 | void after_thread_ran_arch(void) |
93 | void after_thread_ran_arch(void) |
| 68 | { |
94 | { |
| 69 | uintptr_t base; |
95 | uintptr_t base; |
| 70 | 96 | ||
| 71 | base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
97 | base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
| 72 | 98 | ||
| 73 | if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
99 | if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) { |
| 74 | /* |
100 | /* |
| 75 | * Kernel stack of this thread is locked in DTLB. |
101 | * Kernel stack of this thread is locked in DTLB. |
| 76 | * Destroy the mapping. |
102 | * Destroy the mapping. |
| 77 | */ |
103 | */ |
| 78 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
104 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
| 79 | } |
105 | } |
| - | 106 | ||
| - | 107 | if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
|
| - | 108 | /* |
|
| - | 109 | * If this thread executes also in userspace, we have to force all |
|
| - | 110 | * its still-active userspace windows into the userspace window buffer |
|
| - | 111 | * and demap the buffer from DTLB. |
|
| - | 112 | */ |
|
| - | 113 | ASSERT(THREAD->arch.uspace_window_buffer); |
|
| - | 114 | ||
| - | 115 | flushw(); /* force all userspace windows into memory */ |
|
| - | 116 | ||
| - | 117 | uintptr_t uw_buf = (uintptr_t) THREAD->arch.uspace_window_buffer; |
|
| - | 118 | if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) { |
|
| - | 119 | /* |
|
| - | 120 | * The buffer is not covered by the 4M locked kernel DTLB entry |
|
| - | 121 | * and therefore it was given a dedicated locked DTLB entry. |
|
| - | 122 | * Demap it. |
|
| - | 123 | */ |
|
| - | 124 | dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) uw_buf); |
|
| - | 125 | } |
|
| - | 126 | } |
|
| 80 | } |
127 | } |
| 81 | 128 | ||
| 82 | /** @} |
129 | /** @} |
| 83 | */ |
130 | */ |