Rev 3862 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3862 | Rev 4129 | ||
---|---|---|---|
Line 70... | Line 70... | ||
70 | else |
70 | else |
71 | cnt = pages; |
71 | cnt = pages; |
72 | 72 | ||
73 | for (i = 0; i < cnt; i++) { |
73 | for (i = 0; i < cnt; i++) { |
74 | ((tsb_entry_t *) as->arch.tsb_description.tsb_base)[ |
74 | ((tsb_entry_t *) as->arch.tsb_description.tsb_base)[ |
75 | (i0 + i) & (TSB_ENTRY_COUNT - 1)].tag.invalid = true; |
75 | (i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false; |
76 | } |
76 | } |
77 | } |
77 | } |
78 | 78 | ||
79 | /** Copy software PTE to ITSB. |
79 | /** Copy software PTE to ITSB. |
80 | * |
80 | * |
81 | * @param t Software PTE. |
81 | * @param t Software PTE. |
82 | * @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
- | |
83 | */ |
82 | */ |
84 | void itsb_pte_copy(pte_t *t, index_t index) |
83 | void itsb_pte_copy(pte_t *t) |
85 | { |
84 | { |
86 | #if 0 |
- | |
87 | as_t *as; |
85 | as_t *as; |
88 | tsb_entry_t *tsb; |
86 | tsb_entry_t *tsb; |
89 | index_t entry; |
87 | index_t entry; |
90 | 88 | ||
91 | ASSERT(index <= 1); |
- | |
92 | - | ||
93 | as = t->as; |
89 | as = t->as; |
94 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
90 | entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
95 | ASSERT(entry < ITSB_ENTRY_COUNT); |
91 | ASSERT(entry < TSB_ENTRY_COUNT); |
96 | tsb = &as->arch.itsb[entry]; |
92 | tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry]; |
97 | 93 | ||
98 | /* |
94 | /* |
99 | * We use write barriers to make sure that the TSB load |
95 | * We use write barriers to make sure that the TSB load |
100 | * won't use inconsistent data or that the fault will |
96 | * won't use inconsistent data or that the fault will |
101 | * be repeated. |
97 | * be repeated. |
102 | */ |
98 | */ |
103 | 99 | ||
104 | tsb->tag.invalid = true; /* invalidate the entry |
- | |
105 | * (tag target has this |
100 | tsb->data.v = false; |
106 | * set to 0) */ |
- | |
107 | 101 | ||
108 | write_barrier(); |
102 | write_barrier(); |
109 | 103 | ||
110 | tsb->tag.context = as->asid; |
104 | tsb->tag.context = as->asid; |
111 | /* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
- | |
112 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
105 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
- | 106 | ||
113 | tsb->data.value = 0; |
107 | tsb->data.value = 0; |
114 | tsb->data.size = PAGESIZE_8K; |
108 | tsb->data.nfo = false; |
115 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
109 | tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; |
- | 110 | tsb->data.ie = false; |
|
- | 111 | tsb->data.e = false; |
|
116 | tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
112 | tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
- | 113 | tsb->data.cv = false; |
|
117 | tsb->data.p = t->k; /* p as privileged, k as kernel */ |
114 | tsb->data.p = t->k; /* p as privileged, k as kernel */ |
- | 115 | tsb->data.x = true; |
|
- | 116 | tsb->data.w = false; |
|
118 | tsb->data.v = t->p; /* v as valid, p as present */ |
117 | tsb->data.size = PAGESIZE_8K; |
119 | 118 | ||
120 | write_barrier(); |
119 | write_barrier(); |
121 | 120 | ||
122 | tsb->tag.invalid = false; /* mark the entry as valid */ |
121 | tsb->data.v = t->p; /* v as valid, p as present */ |
123 | #endif |
- | |
124 | } |
122 | } |
125 | 123 | ||
126 | /** Copy software PTE to DTSB. |
124 | /** Copy software PTE to DTSB. |
127 | * |
125 | * |
128 | * @param t Software PTE. |
126 | * @param t Software PTE. |
129 | * @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
- | |
130 | * @param ro If true, the mapping is copied read-only. |
127 | * @param ro If true, the mapping is copied read-only. |
131 | */ |
128 | */ |
132 | void dtsb_pte_copy(pte_t *t, index_t index, bool ro) |
129 | void dtsb_pte_copy(pte_t *t, bool ro) |
133 | { |
130 | { |
134 | #if 0 |
- | |
135 | as_t *as; |
131 | as_t *as; |
136 | tsb_entry_t *tsb; |
132 | tsb_entry_t *tsb; |
137 | index_t entry; |
133 | index_t entry; |
138 | - | ||
139 | ASSERT(index <= 1); |
- | |
140 | 134 | ||
141 | as = t->as; |
135 | as = t->as; |
142 | entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
136 | entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
143 | ASSERT(entry < DTSB_ENTRY_COUNT); |
137 | ASSERT(entry < TSB_ENTRY_COUNT); |
144 | tsb = &as->arch.dtsb[entry]; |
138 | tsb = &((tsb_entry_t *) as->arch.tsb_description.tsb_base)[entry]; |
145 | 139 | ||
146 | /* |
140 | /* |
147 | * We use write barriers to make sure that the TSB load |
141 | * We use write barriers to make sure that the TSB load |
148 | * won't use inconsistent data or that the fault will |
142 | * won't use inconsistent data or that the fault will |
149 | * be repeated. |
143 | * be repeated. |
150 | */ |
144 | */ |
151 | 145 | ||
152 | tsb->tag.invalid = true; /* invalidate the entry |
- | |
153 | * (tag target has this |
146 | tsb->data.v = false; |
154 | * set to 0) */ |
- | |
155 | 147 | ||
156 | write_barrier(); |
148 | write_barrier(); |
157 | 149 | ||
158 | tsb->tag.context = as->asid; |
150 | tsb->tag.context = as->asid; |
159 | /* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
- | |
160 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
151 | tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
- | 152 | ||
161 | tsb->data.value = 0; |
153 | tsb->data.value = 0; |
162 | tsb->data.size = PAGESIZE_8K; |
154 | tsb->data.nfo = false; |
163 | tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
155 | tsb->data.ra = t->frame >> MMU_FRAME_WIDTH; |
164 | tsb->data.cp = t->c; |
156 | tsb->data.ie = false; |
- | 157 | tsb->data.e = false; |
|
- | 158 | tsb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ |
|
165 | #ifdef CONFIG_VIRT_IDX_DCACHE |
159 | #ifdef CONFIG_VIRT_IDX_DCACHE |
166 | tsb->data.cv = t->c; |
160 | tsb->data.cv = t->c; |
167 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
161 | #endif /* CONFIG_VIRT_IDX_DCACHE */ |
168 | tsb->data.p = t->k; /* p as privileged */ |
162 | tsb->data.p = t->k; /* p as privileged, k as kernel */ |
- | 163 | tsb->data.x = true; |
|
169 | tsb->data.w = ro ? false : t->w; |
164 | tsb->data.w = ro ? false : t->w; |
170 | tsb->data.v = t->p; |
165 | tsb->data.size = PAGESIZE_8K; |
171 | 166 | ||
172 | write_barrier(); |
167 | write_barrier(); |
173 | 168 | ||
174 | tsb->tag.invalid = false; /* mark the entry as valid */ |
169 | tsb->data.v = t->p; /* v as valid, p as present */ |
175 | #endif |
- | |
176 | } |
170 | } |
177 | 171 | ||
178 | /** @} |
172 | /** @} |
179 | */ |
173 | */ |
180 | 174 |