Rev 2089 | Rev 2161 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2089 | Rev 2141 | ||
---|---|---|---|
Line 32... | Line 32... | ||
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <arch/mm/tsb.h> |
35 | #include <arch/mm/tsb.h> |
36 | #include <arch/mm/tlb.h> |
36 | #include <arch/mm/tlb.h> |
- | 37 | #include <arch/mm/page.h> |
|
37 | #include <arch/barrier.h> |
38 | #include <arch/barrier.h> |
38 | #include <mm/as.h> |
39 | #include <mm/as.h> |
39 | #include <arch/types.h> |
40 | #include <arch/types.h> |
40 | #include <macros.h> |
41 | #include <macros.h> |
41 | #include <debug.h> |
42 | #include <debug.h> |
42 | 43 | ||
43 | #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1) |
44 | #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1) |
44 | 45 | ||
45 | /** Invalidate portion of TSB. |
46 | /** Invalidate portion of TSB. |
46 | * |
47 | * |
47 | * We assume that the address space is already locked. Note that respective |
48 | * We assume that the address space is already locked. Note that respective |
48 | * portions of both TSBs are invalidated at a time. |
49 | * portions of both TSBs are invalidated at a time. |
Line 57... | Line 58... | ||
57 | index_t i0, i; |
58 | index_t i0, i; |
58 | count_t cnt; |
59 | count_t cnt; |
59 | 60 | ||
60 | ASSERT(as->arch.itsb && as->arch.dtsb); |
61 | ASSERT(as->arch.itsb && as->arch.dtsb); |
61 | 62 | ||
62 | i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK; |
63 | i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
63 | cnt = min(pages, ITSB_ENTRY_COUNT); |
64 | cnt = min(pages * MMU_PAGES_PER_PAGE, ITSB_ENTRY_COUNT); |
64 | 65 | ||
65 | for (i = 0; i < cnt; i++) { |
66 | for (i = 0; i < cnt; i++) { |
66 | as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = |
67 | as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = |
67 | true; |
68 | true; |
68 | as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = |
69 | as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = |
69 | true; |
70 | true; |
70 | } |
71 | } |
71 | } |
72 | } |
72 | 73 | ||
73 | /** Copy software PTE to ITSB. |
74 | /** Copy software PTE to ITSB. |
74 | * |
75 | * |
75 | * @param t Software PTE. |
76 | * @param t Software PTE. |
- | 77 | * @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
|
76 | */ |
78 | */ |
77 | void itsb_pte_copy(pte_t *t) |
79 | void itsb_pte_copy(pte_t *t, index_t index) |
78 | { |
80 | { |
79 | as_t *as; |
81 | as_t *as; |
80 | tsb_entry_t *tsb; |
82 | tsb_entry_t *tsb; |
- | 83 | index_t entry; |
|
81 | 84 | ||
82 | as = t->as; |
85 | as = t->as; |
83 | tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; |
86 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
- | 87 | tsb = &as->arch.itsb[entry]; |
|
84 | 88 | ||
85 | /* |
89 | /* |
86 | * We use write barriers to make sure that the TSB load |
90 | * We use write barriers to make sure that the TSB load |
87 | * won't use inconsistent data or that the fault will |
91 | * won't use inconsistent data or that the fault will |
88 | * be repeated. |
92 | * be repeated. |
Line 93... | Line 97... | ||
93 | * set to 0) */ |
97 | * set to 0) */ |
94 | 98 | ||
95 | write_barrier(); |
99 | write_barrier(); |
96 | 100 | ||
97 | tsb->tag.context = as->asid; |
101 | tsb->tag.context = as->asid; |
98 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
102 | tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
- | 103 | VA_TAG_PAGE_SHIFT; |
|
99 | tsb->data.value = 0; |
104 | tsb->data.value = 0; |
100 | tsb->data.size = PAGESIZE_8K; |
105 | tsb->data.size = PAGESIZE_8K; |
101 | tsb->data.pfn = t->frame >> FRAME_WIDTH; |
106 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
102 | tsb->data.cp = t->c; |
107 | tsb->data.cp = t->c; |
103 | tsb->data.p = t->k; /* p as privileged */ |
108 | tsb->data.p = t->k; /* p as privileged */ |
104 | tsb->data.v = t->p; |
109 | tsb->data.v = t->p; |
105 | 110 | ||
106 | write_barrier(); |
111 | write_barrier(); |
Line 108... | Line 113... | ||
108 | tsb->tag.invalid = false; /* mark the entry as valid */ |
113 | tsb->tag.invalid = false; /* mark the entry as valid */ |
109 | } |
114 | } |
110 | 115 | ||
111 | /** Copy software PTE to DTSB. |
116 | /** Copy software PTE to DTSB. |
112 | * |
117 | * |
113 | * @param t Software PTE. |
118 | * @param t Software PTE. |
- | 119 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
|
114 | * @param ro If true, the mapping is copied read-only. |
120 | * @param ro If true, the mapping is copied read-only. |
115 | */ |
121 | */ |
116 | void dtsb_pte_copy(pte_t *t, bool ro) |
122 | void dtsb_pte_copy(pte_t *t, index_t index, bool ro) |
117 | { |
123 | { |
118 | as_t *as; |
124 | as_t *as; |
119 | tsb_entry_t *tsb; |
125 | tsb_entry_t *tsb; |
- | 126 | index_t entry; |
|
120 | 127 | ||
121 | as = t->as; |
128 | as = t->as; |
122 | tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; |
129 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
- | 130 | tsb = &as->arch.dtsb[entry]; |
|
123 | 131 | ||
124 | /* |
132 | /* |
125 | * We use write barriers to make sure that the TSB load |
133 | * We use write barriers to make sure that the TSB load |
126 | * won't use inconsistent data or that the fault will |
134 | * won't use inconsistent data or that the fault will |
127 | * be repeated. |
135 | * be repeated. |
Line 132... | Line 140... | ||
132 | * set to 0) */ |
140 | * set to 0) */ |
133 | 141 | ||
134 | write_barrier(); |
142 | write_barrier(); |
135 | 143 | ||
136 | tsb->tag.context = as->asid; |
144 | tsb->tag.context = as->asid; |
137 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
145 | tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
- | 146 | VA_TAG_PAGE_SHIFT; |
|
138 | tsb->data.value = 0; |
147 | tsb->data.value = 0; |
139 | tsb->data.size = PAGESIZE_8K; |
148 | tsb->data.size = PAGESIZE_8K; |
140 | tsb->data.pfn = t->frame >> FRAME_WIDTH; |
149 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
141 | tsb->data.cp = t->c; |
150 | tsb->data.cp = t->c; |
142 | #ifdef CONFIG_VIRT_IDX_DCACHE |
151 | #ifdef CONFIG_VIRT_IDX_DCACHE |
143 | tsb->data.cv = t->c; |
152 | tsb->data.cv = t->c; |
144 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
153 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
145 | tsb->data.p = t->k; /* p as privileged */ |
154 | tsb->data.p = t->k; /* p as privileged */ |