Rev 1147 | Rev 1150 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1147 | Rev 1148 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * This file contains address space manipulation functions. |
30 | * This file contains address space manipulation functions. |
31 | * Roughly speaking, this is a higher-level client of |
31 | * Roughly speaking, this is a higher-level client of |
32 | * Virtual Address Translation (VAT) subsystem. |
32 | * Virtual Address Translation (VAT) subsystem. |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <mm/as.h> |
35 | #include <mm/as.h> |
36 | #include <arch/mm/as.h> |
36 | #include <arch/mm/as.h> |
37 | #include <mm/page.h> |
37 | #include <mm/page.h> |
38 | #include <mm/frame.h> |
38 | #include <mm/frame.h> |
39 | #include <mm/slab.h> |
39 | #include <mm/slab.h> |
40 | #include <mm/tlb.h> |
40 | #include <mm/tlb.h> |
41 | #include <arch/mm/page.h> |
41 | #include <arch/mm/page.h> |
42 | #include <genarch/mm/page_pt.h> |
42 | #include <genarch/mm/page_pt.h> |
43 | #include <genarch/mm/page_ht.h> |
43 | #include <genarch/mm/page_ht.h> |
44 | #include <mm/asid.h> |
44 | #include <mm/asid.h> |
45 | #include <arch/mm/asid.h> |
45 | #include <arch/mm/asid.h> |
46 | #include <arch/types.h> |
46 | #include <arch/types.h> |
47 | #include <typedefs.h> |
47 | #include <typedefs.h> |
48 | #include <synch/spinlock.h> |
48 | #include <synch/spinlock.h> |
49 | #include <config.h> |
49 | #include <config.h> |
50 | #include <adt/list.h> |
50 | #include <adt/list.h> |
51 | #include <adt/btree.h> |
51 | #include <adt/btree.h> |
52 | #include <panic.h> |
52 | #include <panic.h> |
53 | #include <arch/asm.h> |
53 | #include <arch/asm.h> |
54 | #include <debug.h> |
54 | #include <debug.h> |
55 | #include <memstr.h> |
55 | #include <memstr.h> |
56 | #include <macros.h> |
56 | #include <macros.h> |
57 | #include <arch.h> |
57 | #include <arch.h> |
58 | #include <print.h> |
58 | #include <print.h> |
59 | 59 | ||
60 | as_operations_t *as_operations = NULL; |
60 | as_operations_t *as_operations = NULL; |
61 | 61 | ||
62 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
62 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
63 | SPINLOCK_INITIALIZE(as_lock); |
63 | SPINLOCK_INITIALIZE(as_lock); |
64 | 64 | ||
65 | /** |
65 | /** |
66 | * This list contains address spaces that are not active on any |
66 | * This list contains address spaces that are not active on any |
67 | * processor and that have valid ASID. |
67 | * processor and that have valid ASID. |
68 | */ |
68 | */ |
69 | LIST_INITIALIZE(inactive_as_with_asid_head); |
69 | LIST_INITIALIZE(inactive_as_with_asid_head); |
70 | 70 | ||
71 | /** Kernel address space. */ |
71 | /** Kernel address space. */ |
72 | as_t *AS_KERNEL = NULL; |
72 | as_t *AS_KERNEL = NULL; |
73 | 73 | ||
74 | static int get_area_flags(as_area_t *a); |
74 | static int get_area_flags(as_area_t *a); |
75 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
75 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
76 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
76 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
77 | 77 | ||
78 | /** Initialize address space subsystem. */ |
78 | /** Initialize address space subsystem. */ |
79 | void as_init(void) |
79 | void as_init(void) |
80 | { |
80 | { |
81 | as_arch_init(); |
81 | as_arch_init(); |
82 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
82 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
83 | if (!AS_KERNEL) |
83 | if (!AS_KERNEL) |
84 | panic("can't create kernel address space\n"); |
84 | panic("can't create kernel address space\n"); |
85 | } |
85 | } |
86 | 86 | ||
87 | /** Create address space. |
87 | /** Create address space. |
88 | * |
88 | * |
89 | * @param flags Flags that influence way in wich the address space is created. |
89 | * @param flags Flags that influence way in wich the address space is created. |
90 | */ |
90 | */ |
91 | as_t *as_create(int flags) |
91 | as_t *as_create(int flags) |
92 | { |
92 | { |
93 | as_t *as; |
93 | as_t *as; |
94 | 94 | ||
95 | as = (as_t *) malloc(sizeof(as_t), 0); |
95 | as = (as_t *) malloc(sizeof(as_t), 0); |
96 | link_initialize(&as->inactive_as_with_asid_link); |
96 | link_initialize(&as->inactive_as_with_asid_link); |
97 | spinlock_initialize(&as->lock, "as_lock"); |
97 | spinlock_initialize(&as->lock, "as_lock"); |
98 | btree_create(&as->as_area_btree); |
98 | btree_create(&as->as_area_btree); |
99 | 99 | ||
100 | if (flags & FLAG_AS_KERNEL) |
100 | if (flags & FLAG_AS_KERNEL) |
101 | as->asid = ASID_KERNEL; |
101 | as->asid = ASID_KERNEL; |
102 | else |
102 | else |
103 | as->asid = ASID_INVALID; |
103 | as->asid = ASID_INVALID; |
104 | 104 | ||
105 | as->refcount = 0; |
105 | as->refcount = 0; |
106 | as->page_table = page_table_create(flags); |
106 | as->page_table = page_table_create(flags); |
107 | 107 | ||
108 | return as; |
108 | return as; |
109 | } |
109 | } |
110 | 110 | ||
111 | /** Free Adress space */ |
111 | /** Free Adress space */ |
112 | void as_free(as_t *as) |
112 | void as_free(as_t *as) |
113 | { |
113 | { |
114 | ASSERT(as->refcount == 0); |
114 | ASSERT(as->refcount == 0); |
115 | 115 | ||
116 | /* TODO: free as_areas and other resources held by as */ |
116 | /* TODO: free as_areas and other resources held by as */ |
117 | /* TODO: free page table */ |
117 | /* TODO: free page table */ |
118 | free(as); |
118 | free(as); |
119 | } |
119 | } |
120 | 120 | ||
121 | /** Create address space area of common attributes. |
121 | /** Create address space area of common attributes. |
122 | * |
122 | * |
123 | * The created address space area is added to the target address space. |
123 | * The created address space area is added to the target address space. |
124 | * |
124 | * |
125 | * @param as Target address space. |
125 | * @param as Target address space. |
126 | * @param flags Flags of the area. |
126 | * @param flags Flags of the area. |
127 | * @param size Size of area. |
127 | * @param size Size of area. |
128 | * @param base Base address of area. |
128 | * @param base Base address of area. |
129 | * |
129 | * |
130 | * @return Address space area on success or NULL on failure. |
130 | * @return Address space area on success or NULL on failure. |
131 | */ |
131 | */ |
132 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base) |
132 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base) |
133 | { |
133 | { |
134 | ipl_t ipl; |
134 | ipl_t ipl; |
135 | as_area_t *a; |
135 | as_area_t *a; |
136 | 136 | ||
137 | if (base % PAGE_SIZE) |
137 | if (base % PAGE_SIZE) |
138 | return NULL; |
138 | return NULL; |
139 | 139 | ||
140 | /* Writeable executable areas are not supported. */ |
140 | /* Writeable executable areas are not supported. */ |
141 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
141 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
142 | return NULL; |
142 | return NULL; |
143 | 143 | ||
144 | ipl = interrupts_disable(); |
144 | ipl = interrupts_disable(); |
145 | spinlock_lock(&as->lock); |
145 | spinlock_lock(&as->lock); |
146 | 146 | ||
147 | if (!check_area_conflicts(as, base, size, NULL)) { |
147 | if (!check_area_conflicts(as, base, size, NULL)) { |
148 | spinlock_unlock(&as->lock); |
148 | spinlock_unlock(&as->lock); |
149 | interrupts_restore(ipl); |
149 | interrupts_restore(ipl); |
150 | return NULL; |
150 | return NULL; |
151 | } |
151 | } |
152 | 152 | ||
153 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
153 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
154 | 154 | ||
155 | spinlock_initialize(&a->lock, "as_area_lock"); |
155 | spinlock_initialize(&a->lock, "as_area_lock"); |
156 | 156 | ||
157 | a->flags = flags; |
157 | a->flags = flags; |
158 | a->pages = SIZE2FRAMES(size); |
158 | a->pages = SIZE2FRAMES(size); |
159 | a->base = base; |
159 | a->base = base; |
160 | 160 | ||
161 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
161 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
162 | 162 | ||
163 | spinlock_unlock(&as->lock); |
163 | spinlock_unlock(&as->lock); |
164 | interrupts_restore(ipl); |
164 | interrupts_restore(ipl); |
165 | 165 | ||
166 | return a; |
166 | return a; |
167 | } |
167 | } |
168 | 168 | ||
169 | /** Initialize mapping for one page of address space. |
169 | /** Initialize mapping for one page of address space. |
170 | * |
170 | * |
171 | * This functions maps 'page' to 'frame' according |
171 | * This functions maps 'page' to 'frame' according |
172 | * to attributes of the address space area to |
172 | * to attributes of the address space area to |
173 | * wich 'page' belongs. |
173 | * wich 'page' belongs. |
174 | * |
174 | * |
175 | * @param as Target address space. |
175 | * @param as Target address space. |
176 | * @param page Virtual page within the area. |
176 | * @param page Virtual page within the area. |
177 | * @param frame Physical frame to which page will be mapped. |
177 | * @param frame Physical frame to which page will be mapped. |
178 | */ |
178 | */ |
179 | void as_set_mapping(as_t *as, __address page, __address frame) |
179 | void as_set_mapping(as_t *as, __address page, __address frame) |
180 | { |
180 | { |
181 | as_area_t *area; |
181 | as_area_t *area; |
182 | ipl_t ipl; |
182 | ipl_t ipl; |
183 | 183 | ||
184 | ipl = interrupts_disable(); |
184 | ipl = interrupts_disable(); |
185 | page_table_lock(as, true); |
185 | page_table_lock(as, true); |
186 | 186 | ||
187 | area = find_area_and_lock(as, page); |
187 | area = find_area_and_lock(as, page); |
188 | if (!area) { |
188 | if (!area) { |
189 | panic("page not part of any as_area\n"); |
189 | panic("page not part of any as_area\n"); |
190 | } |
190 | } |
191 | 191 | ||
192 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
192 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
193 | 193 | ||
194 | spinlock_unlock(&area->lock); |
194 | spinlock_unlock(&area->lock); |
195 | page_table_unlock(as, true); |
195 | page_table_unlock(as, true); |
196 | interrupts_restore(ipl); |
196 | interrupts_restore(ipl); |
197 | } |
197 | } |
198 | 198 | ||
199 | /** Handle page fault within the current address space. |
199 | /** Handle page fault within the current address space. |
200 | * |
200 | * |
201 | * This is the high-level page fault handler. |
201 | * This is the high-level page fault handler. |
202 | * Interrupts are assumed disabled. |
202 | * Interrupts are assumed disabled. |
203 | * |
203 | * |
204 | * @param page Faulting page. |
204 | * @param page Faulting page. |
205 | * |
205 | * |
206 | * @return 0 on page fault, 1 on success. |
206 | * @return 0 on page fault, 1 on success. |
207 | */ |
207 | */ |
208 | int as_page_fault(__address page) |
208 | int as_page_fault(__address page) |
209 | { |
209 | { |
210 | pte_t *pte; |
210 | pte_t *pte; |
211 | as_area_t *area; |
211 | as_area_t *area; |
212 | __address frame; |
212 | __address frame; |
213 | 213 | ||
214 | ASSERT(AS); |
214 | ASSERT(AS); |
215 | 215 | ||
216 | spinlock_lock(&AS->lock); |
216 | spinlock_lock(&AS->lock); |
217 | area = find_area_and_lock(AS, page); |
217 | area = find_area_and_lock(AS, page); |
218 | if (!area) { |
218 | if (!area) { |
219 | /* |
219 | /* |
220 | * No area contained mapping for 'page'. |
220 | * No area contained mapping for 'page'. |
221 | * Signal page fault to low-level handler. |
221 | * Signal page fault to low-level handler. |
222 | */ |
222 | */ |
223 | spinlock_unlock(&AS->lock); |
223 | spinlock_unlock(&AS->lock); |
224 | return 0; |
224 | return 0; |
225 | } |
225 | } |
226 | 226 | ||
227 | page_table_lock(AS, false); |
227 | page_table_lock(AS, false); |
228 | 228 | ||
229 | /* |
229 | /* |
230 | * To avoid race condition between two page faults |
230 | * To avoid race condition between two page faults |
231 | * on the same address, we need to make sure |
231 | * on the same address, we need to make sure |
232 | * the mapping has not been already inserted. |
232 | * the mapping has not been already inserted. |
233 | */ |
233 | */ |
234 | if ((pte = page_mapping_find(AS, page))) { |
234 | if ((pte = page_mapping_find(AS, page))) { |
235 | if (PTE_PRESENT(pte)) { |
235 | if (PTE_PRESENT(pte)) { |
236 | page_table_unlock(AS, false); |
236 | page_table_unlock(AS, false); |
237 | spinlock_unlock(&area->lock); |
237 | spinlock_unlock(&area->lock); |
238 | spinlock_unlock(&AS->lock); |
238 | spinlock_unlock(&AS->lock); |
239 | return 1; |
239 | return 1; |
240 | } |
240 | } |
241 | } |
241 | } |
242 | 242 | ||
243 | /* |
243 | /* |
244 | * In general, there can be several reasons that |
244 | * In general, there can be several reasons that |
245 | * can have caused this fault. |
245 | * can have caused this fault. |
246 | * |
246 | * |
247 | * - non-existent mapping: the area is a scratch |
247 | * - non-existent mapping: the area is a scratch |
248 | * area (e.g. stack) and so far has not been |
248 | * area (e.g. stack) and so far has not been |
249 | * allocated a frame for the faulting page |
249 | * allocated a frame for the faulting page |
250 | * |
250 | * |
251 | * - non-present mapping: another possibility, |
251 | * - non-present mapping: another possibility, |
252 | * currently not implemented, would be frame |
252 | * currently not implemented, would be frame |
253 | * reuse; when this becomes a possibility, |
253 | * reuse; when this becomes a possibility, |
254 | * do not forget to distinguish between |
254 | * do not forget to distinguish between |
255 | * the different causes |
255 | * the different causes |
256 | */ |
256 | */ |
257 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
257 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
258 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
258 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
259 | 259 | ||
260 | /* |
260 | /* |
261 | * Map 'page' to 'frame'. |
261 | * Map 'page' to 'frame'. |
262 | * Note that TLB shootdown is not attempted as only new information is being |
262 | * Note that TLB shootdown is not attempted as only new information is being |
263 | * inserted into page tables. |
263 | * inserted into page tables. |
264 | */ |
264 | */ |
265 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
265 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
266 | page_table_unlock(AS, false); |
266 | page_table_unlock(AS, false); |
267 | 267 | ||
268 | spinlock_unlock(&area->lock); |
268 | spinlock_unlock(&area->lock); |
269 | spinlock_unlock(&AS->lock); |
269 | spinlock_unlock(&AS->lock); |
270 | return 1; |
270 | return 1; |
271 | } |
271 | } |
272 | 272 | ||
273 | /** Switch address spaces. |
273 | /** Switch address spaces. |
274 | * |
274 | * |
275 | * @param old Old address space or NULL. |
275 | * @param old Old address space or NULL. |
276 | * @param new New address space. |
276 | * @param new New address space. |
277 | */ |
277 | */ |
278 | void as_switch(as_t *old, as_t *new) |
278 | void as_switch(as_t *old, as_t *new) |
279 | { |
279 | { |
280 | ipl_t ipl; |
280 | ipl_t ipl; |
281 | bool needs_asid = false; |
281 | bool needs_asid = false; |
282 | 282 | ||
283 | ipl = interrupts_disable(); |
283 | ipl = interrupts_disable(); |
284 | spinlock_lock(&as_lock); |
284 | spinlock_lock(&as_lock); |
285 | 285 | ||
286 | /* |
286 | /* |
287 | * First, take care of the old address space. |
287 | * First, take care of the old address space. |
288 | */ |
288 | */ |
289 | if (old) { |
289 | if (old) { |
290 | spinlock_lock(&old->lock); |
290 | spinlock_lock(&old->lock); |
291 | ASSERT(old->refcount); |
291 | ASSERT(old->refcount); |
292 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
292 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
293 | /* |
293 | /* |
294 | * The old address space is no longer active on |
294 | * The old address space is no longer active on |
295 | * any processor. It can be appended to the |
295 | * any processor. It can be appended to the |
296 | * list of inactive address spaces with assigned |
296 | * list of inactive address spaces with assigned |
297 | * ASID. |
297 | * ASID. |
298 | */ |
298 | */ |
299 | ASSERT(old->asid != ASID_INVALID); |
299 | ASSERT(old->asid != ASID_INVALID); |
300 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
300 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
301 | } |
301 | } |
302 | spinlock_unlock(&old->lock); |
302 | spinlock_unlock(&old->lock); |
303 | } |
303 | } |
304 | 304 | ||
305 | /* |
305 | /* |
306 | * Second, prepare the new address space. |
306 | * Second, prepare the new address space. |
307 | */ |
307 | */ |
308 | spinlock_lock(&new->lock); |
308 | spinlock_lock(&new->lock); |
309 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
309 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
310 | if (new->asid != ASID_INVALID) |
310 | if (new->asid != ASID_INVALID) |
311 | list_remove(&new->inactive_as_with_asid_link); |
311 | list_remove(&new->inactive_as_with_asid_link); |
312 | else |
312 | else |
313 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
313 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
314 | } |
314 | } |
315 | SET_PTL0_ADDRESS(new->page_table); |
315 | SET_PTL0_ADDRESS(new->page_table); |
316 | spinlock_unlock(&new->lock); |
316 | spinlock_unlock(&new->lock); |
317 | 317 | ||
318 | if (needs_asid) { |
318 | if (needs_asid) { |
319 | /* |
319 | /* |
320 | * Allocation of new ASID was deferred |
320 | * Allocation of new ASID was deferred |
321 | * until now in order to avoid deadlock. |
321 | * until now in order to avoid deadlock. |
322 | */ |
322 | */ |
323 | asid_t asid; |
323 | asid_t asid; |
324 | 324 | ||
325 | asid = asid_get(); |
325 | asid = asid_get(); |
326 | spinlock_lock(&new->lock); |
326 | spinlock_lock(&new->lock); |
327 | new->asid = asid; |
327 | new->asid = asid; |
328 | spinlock_unlock(&new->lock); |
328 | spinlock_unlock(&new->lock); |
329 | } |
329 | } |
330 | spinlock_unlock(&as_lock); |
330 | spinlock_unlock(&as_lock); |
331 | interrupts_restore(ipl); |
331 | interrupts_restore(ipl); |
332 | 332 | ||
333 | /* |
333 | /* |
334 | * Perform architecture-specific steps. |
334 | * Perform architecture-specific steps. |
335 | * (e.g. write ASID to hardware register etc.) |
335 | * (e.g. write ASID to hardware register etc.) |
336 | */ |
336 | */ |
337 | as_install_arch(new); |
337 | as_install_arch(new); |
338 | 338 | ||
339 | AS = new; |
339 | AS = new; |
340 | } |
340 | } |
341 | 341 | ||
342 | /** Compute flags for virtual address translation subsytem. |
342 | /** Compute flags for virtual address translation subsytem. |
343 | * |
343 | * |
344 | * The address space area must be locked. |
344 | * The address space area must be locked. |
345 | * Interrupts must be disabled. |
345 | * Interrupts must be disabled. |
346 | * |
346 | * |
347 | * @param a Address space area. |
347 | * @param a Address space area. |
348 | * |
348 | * |
349 | * @return Flags to be used in page_mapping_insert(). |
349 | * @return Flags to be used in page_mapping_insert(). |
350 | */ |
350 | */ |
351 | int get_area_flags(as_area_t *a) |
351 | int get_area_flags(as_area_t *a) |
352 | { |
352 | { |
353 | int flags; |
353 | int flags; |
354 | 354 | ||
355 | flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
355 | flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
356 | 356 | ||
357 | if (a->flags & AS_AREA_READ) |
357 | if (a->flags & AS_AREA_READ) |
358 | flags |= PAGE_READ; |
358 | flags |= PAGE_READ; |
359 | 359 | ||
360 | if (a->flags & AS_AREA_WRITE) |
360 | if (a->flags & AS_AREA_WRITE) |
361 | flags |= PAGE_WRITE; |
361 | flags |= PAGE_WRITE; |
362 | 362 | ||
363 | if (a->flags & AS_AREA_EXEC) |
363 | if (a->flags & AS_AREA_EXEC) |
364 | flags |= PAGE_EXEC; |
364 | flags |= PAGE_EXEC; |
365 | 365 | ||
366 | return flags; |
366 | return flags; |
367 | } |
367 | } |
368 | 368 | ||
369 | /** Create page table. |
369 | /** Create page table. |
370 | * |
370 | * |
371 | * Depending on architecture, create either address space |
371 | * Depending on architecture, create either address space |
372 | * private or global page table. |
372 | * private or global page table. |
373 | * |
373 | * |
374 | * @param flags Flags saying whether the page table is for kernel address space. |
374 | * @param flags Flags saying whether the page table is for kernel address space. |
375 | * |
375 | * |
376 | * @return First entry of the page table. |
376 | * @return First entry of the page table. |
377 | */ |
377 | */ |
378 | pte_t *page_table_create(int flags) |
378 | pte_t *page_table_create(int flags) |
379 | { |
379 | { |
380 | ASSERT(as_operations); |
380 | ASSERT(as_operations); |
381 | ASSERT(as_operations->page_table_create); |
381 | ASSERT(as_operations->page_table_create); |
382 | 382 | ||
383 | return as_operations->page_table_create(flags); |
383 | return as_operations->page_table_create(flags); |
384 | } |
384 | } |
385 | 385 | ||
386 | /** Lock page table. |
386 | /** Lock page table. |
387 | * |
387 | * |
388 | * This function should be called before any page_mapping_insert(), |
388 | * This function should be called before any page_mapping_insert(), |
389 | * page_mapping_remove() and page_mapping_find(). |
389 | * page_mapping_remove() and page_mapping_find(). |
390 | * |
390 | * |
391 | * Locking order is such that address space areas must be locked |
391 | * Locking order is such that address space areas must be locked |
392 | * prior to this call. Address space can be locked prior to this |
392 | * prior to this call. Address space can be locked prior to this |
393 | * call in which case the lock argument is false. |
393 | * call in which case the lock argument is false. |
394 | * |
394 | * |
395 | * @param as Address space. |
395 | * @param as Address space. |
396 | * @param as_locked If false, do not attempt to lock as->lock. |
396 | * @param as_locked If false, do not attempt to lock as->lock. |
397 | */ |
397 | */ |
398 | void page_table_lock(as_t *as, bool lock) |
398 | void page_table_lock(as_t *as, bool lock) |
399 | { |
399 | { |
400 | ASSERT(as_operations); |
400 | ASSERT(as_operations); |
401 | ASSERT(as_operations->page_table_lock); |
401 | ASSERT(as_operations->page_table_lock); |
402 | 402 | ||
403 | as_operations->page_table_lock(as, lock); |
403 | as_operations->page_table_lock(as, lock); |
404 | } |
404 | } |
405 | 405 | ||
406 | /** Unlock page table. |
406 | /** Unlock page table. |
407 | * |
407 | * |
408 | * @param as Address space. |
408 | * @param as Address space. |
409 | * @param as_locked If false, do not attempt to unlock as->lock. |
409 | * @param as_locked If false, do not attempt to unlock as->lock. |
410 | */ |
410 | */ |
411 | void page_table_unlock(as_t *as, bool unlock) |
411 | void page_table_unlock(as_t *as, bool unlock) |
412 | { |
412 | { |
413 | ASSERT(as_operations); |
413 | ASSERT(as_operations); |
414 | ASSERT(as_operations->page_table_unlock); |
414 | ASSERT(as_operations->page_table_unlock); |
415 | 415 | ||
416 | as_operations->page_table_unlock(as, unlock); |
416 | as_operations->page_table_unlock(as, unlock); |
417 | } |
417 | } |
418 | 418 | ||
419 | /** Find address space area and change it. |
419 | /** Find address space area and change it. |
420 | * |
420 | * |
421 | * @param as Address space. |
421 | * @param as Address space. |
422 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
422 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
423 | * @param size New size of the virtual memory block starting at address. |
423 | * @param size New size of the virtual memory block starting at address. |
424 | * @param flags Flags influencing the remap operation. Currently unused. |
424 | * @param flags Flags influencing the remap operation. Currently unused. |
425 | * |
425 | * |
426 | * @return address on success, (__address) -1 otherwise. |
426 | * @return address on success, (__address) -1 otherwise. |
427 | */ |
427 | */ |
428 | __address as_remap(as_t *as, __address address, size_t size, int flags) |
428 | __address as_remap(as_t *as, __address address, size_t size, int flags) |
429 | { |
429 | { |
430 | as_area_t *area = NULL; |
430 | as_area_t *area = NULL; |
431 | ipl_t ipl; |
431 | ipl_t ipl; |
432 | size_t pages; |
432 | size_t pages; |
433 | 433 | ||
434 | ipl = interrupts_disable(); |
434 | ipl = interrupts_disable(); |
435 | spinlock_lock(&as->lock); |
435 | spinlock_lock(&as->lock); |
436 | 436 | ||
437 | /* |
437 | /* |
438 | * Locate the area. |
438 | * Locate the area. |
439 | */ |
439 | */ |
440 | area = find_area_and_lock(as, address); |
440 | area = find_area_and_lock(as, address); |
441 | if (!area) { |
441 | if (!area) { |
442 | spinlock_unlock(&as->lock); |
442 | spinlock_unlock(&as->lock); |
443 | interrupts_restore(ipl); |
443 | interrupts_restore(ipl); |
444 | return (__address) -1; |
444 | return (__address) -1; |
445 | } |
445 | } |
446 | 446 | ||
447 | pages = SIZE2FRAMES((address - area->base) + size); |
447 | pages = SIZE2FRAMES((address - area->base) + size); |
448 | if (pages < area->pages) { |
448 | if (pages < area->pages) { |
449 | int i; |
449 | int i; |
450 | 450 | ||
451 | /* |
451 | /* |
452 | * Shrinking the area. |
452 | * Shrinking the area. |
453 | * No need to check for overlaps. |
453 | * No need to check for overlaps. |
454 | */ |
454 | */ |
455 | for (i = pages; i < area->pages; i++) { |
455 | for (i = pages; i < area->pages; i++) { |
456 | pte_t *pte; |
456 | pte_t *pte; |
457 | 457 | ||
458 | /* |
458 | /* |
459 | * Releasing physical memory. |
459 | * Releasing physical memory. |
460 | * This depends on the fact that the memory was allocated using frame_alloc(). |
460 | * This depends on the fact that the memory was allocated using frame_alloc(). |
461 | */ |
461 | */ |
462 | page_table_lock(as, false); |
462 | page_table_lock(as, false); |
463 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
463 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
464 | if (pte && PTE_VALID(pte)) { |
464 | if (pte && PTE_VALID(pte)) { |
465 | __address frame; |
465 | __address frame; |
466 | 466 | ||
467 | ASSERT(PTE_PRESENT(pte)); |
467 | ASSERT(PTE_PRESENT(pte)); |
468 | frame = PTE_GET_FRAME(pte); |
468 | frame = PTE_GET_FRAME(pte); |
469 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
469 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
470 | page_table_unlock(as, false); |
470 | page_table_unlock(as, false); |
471 | 471 | ||
472 | frame_free(ADDR2PFN(frame)); |
472 | frame_free(ADDR2PFN(frame)); |
473 | } else { |
473 | } else { |
474 | page_table_unlock(as, false); |
474 | page_table_unlock(as, false); |
475 | } |
475 | } |
476 | } |
476 | } |
477 | /* |
477 | /* |
478 | * Invalidate TLB's. |
478 | * Invalidate TLB's. |
479 | */ |
479 | */ |
480 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
480 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
481 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
481 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
482 | tlb_shootdown_finalize(); |
482 | tlb_shootdown_finalize(); |
483 | } else { |
483 | } else { |
484 | /* |
484 | /* |
485 | * Growing the area. |
485 | * Growing the area. |
486 | * Check for overlaps with other address space areas. |
486 | * Check for overlaps with other address space areas. |
487 | */ |
487 | */ |
488 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
488 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
489 | spinlock_unlock(&area->lock); |
489 | spinlock_unlock(&area->lock); |
490 | spinlock_unlock(&as->lock); |
490 | spinlock_unlock(&as->lock); |
491 | interrupts_restore(ipl); |
491 | interrupts_restore(ipl); |
492 | return (__address) -1; |
492 | return (__address) -1; |
493 | } |
493 | } |
494 | } |
494 | } |
495 | 495 | ||
496 | area->pages = pages; |
496 | area->pages = pages; |
497 | 497 | ||
498 | spinlock_unlock(&area->lock); |
498 | spinlock_unlock(&area->lock); |
499 | spinlock_unlock(&as->lock); |
499 | spinlock_unlock(&as->lock); |
500 | interrupts_restore(ipl); |
500 | interrupts_restore(ipl); |
501 | 501 | ||
502 | return address; |
502 | return address; |
503 | } |
503 | } |
504 | 504 | ||
505 | /** Find address space area and lock it. |
505 | /** Find address space area and lock it. |
506 | * |
506 | * |
507 | * The address space must be locked and interrupts must be disabled. |
507 | * The address space must be locked and interrupts must be disabled. |
508 | * |
508 | * |
509 | * @param as Address space. |
509 | * @param as Address space. |
510 | * @param va Virtual address. |
510 | * @param va Virtual address. |
511 | * |
511 | * |
512 | * @return Locked address space area containing va on success or NULL on failure. |
512 | * @return Locked address space area containing va on success or NULL on failure. |
513 | */ |
513 | */ |
514 | as_area_t *find_area_and_lock(as_t *as, __address va) |
514 | as_area_t *find_area_and_lock(as_t *as, __address va) |
515 | { |
515 | { |
516 | as_area_t *a; |
516 | as_area_t *a; |
517 | btree_node_t *leaf, *lnode; |
517 | btree_node_t *leaf, *lnode; |
518 | int i; |
518 | int i; |
519 | 519 | ||
520 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
520 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
521 | if (a) { |
521 | if (a) { |
522 | /* va is the base address of an address space area */ |
522 | /* va is the base address of an address space area */ |
523 | spinlock_lock(&a->lock); |
523 | spinlock_lock(&a->lock); |
524 | return a; |
524 | return a; |
525 | } |
525 | } |
526 | 526 | ||
527 | /* |
527 | /* |
528 | * Search the leaf node and the righmost record of its left sibling |
528 | * Search the leaf node and the righmost record of its left sibling |
529 | * to find out whether this is a miss or va belongs to an address |
529 | * to find out whether this is a miss or va belongs to an address |
530 | * space area found there. |
530 | * space area found there. |
531 | */ |
531 | */ |
532 | 532 | ||
533 | /* First, search the leaf node itself. */ |
533 | /* First, search the leaf node itself. */ |
534 | for (i = 0; i < leaf->keys; i++) { |
534 | for (i = 0; i < leaf->keys; i++) { |
535 | a = (as_area_t *) leaf->value[i]; |
535 | a = (as_area_t *) leaf->value[i]; |
536 | spinlock_lock(&a->lock); |
536 | spinlock_lock(&a->lock); |
537 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
537 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
538 | return a; |
538 | return a; |
539 | } |
539 | } |
540 | spinlock_unlock(&a->lock); |
540 | spinlock_unlock(&a->lock); |
541 | } |
541 | } |
542 | 542 | ||
543 | /* |
543 | /* |
544 | * Second, locate the left sibling and test its last record. |
544 | * Second, locate the left sibling and test its last record. |
545 | * Because of its position in the B+-tree, it must have base < va. |
545 | * Because of its position in the B+tree, it must have base < va. |
546 | */ |
546 | */ |
547 | if ((lnode = btree_node_left_sibling(&as->as_area_btree, leaf))) { |
547 | if ((lnode = btree_node_left_sibling(&as->as_area_btree, leaf))) { |
548 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
548 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
549 | spinlock_lock(&a->lock); |
549 | spinlock_lock(&a->lock); |
550 | if (va < a->base + a->pages * PAGE_SIZE) { |
550 | if (va < a->base + a->pages * PAGE_SIZE) { |
551 | return a; |
551 | return a; |
552 | } |
552 | } |
553 | spinlock_unlock(&a->lock); |
553 | spinlock_unlock(&a->lock); |
554 | } |
554 | } |
555 | 555 | ||
556 | return NULL; |
556 | return NULL; |
557 | } |
557 | } |
558 | 558 | ||
559 | /** Check area conflicts with other areas. |
559 | /** Check area conflicts with other areas. |
560 | * |
560 | * |
561 | * The address space must be locked and interrupts must be disabled. |
561 | * The address space must be locked and interrupts must be disabled. |
562 | * |
562 | * |
563 | * @param as Address space. |
563 | * @param as Address space. |
564 | * @param va Starting virtual address of the area being tested. |
564 | * @param va Starting virtual address of the area being tested. |
565 | * @param size Size of the area being tested. |
565 | * @param size Size of the area being tested. |
566 | * @param avoid_area Do not touch this area. |
566 | * @param avoid_area Do not touch this area. |
567 | * |
567 | * |
568 | * @return True if there is no conflict, false otherwise. |
568 | * @return True if there is no conflict, false otherwise. |
569 | */ |
569 | */ |
570 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
570 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
571 | { |
571 | { |
572 | as_area_t *a; |
572 | as_area_t *a; |
573 | btree_node_t *leaf, *node; |
573 | btree_node_t *leaf, *node; |
574 | int i; |
574 | int i; |
575 | 575 | ||
576 | /* |
576 | /* |
577 | * We don't want any area to have conflicts with NULL page. |
577 | * We don't want any area to have conflicts with NULL page. |
578 | */ |
578 | */ |
579 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
579 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
580 | return false; |
580 | return false; |
581 | 581 | ||
582 | /* |
582 | /* |
583 | * The leaf node is found in O(log n), where n is proportional to |
583 | * The leaf node is found in O(log n), where n is proportional to |
584 | * the number of address space areas belonging to as. |
584 | * the number of address space areas belonging to as. |
585 | * The check for conflicts is then attempted on the rightmost |
585 | * The check for conflicts is then attempted on the rightmost |
586 | * record in the left sibling, the leftmost record in the right |
586 | * record in the left sibling, the leftmost record in the right |
587 | * sibling and all records in the leaf node itself. |
587 | * sibling and all records in the leaf node itself. |
588 | */ |
588 | */ |
589 | 589 | ||
590 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
590 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
591 | if (a != avoid_area) |
591 | if (a != avoid_area) |
592 | return false; |
592 | return false; |
593 | } |
593 | } |
594 | 594 | ||
595 | /* First, check the two border cases. */ |
595 | /* First, check the two border cases. */ |
596 | if ((node = btree_node_left_sibling(&as->as_area_btree, leaf))) { |
596 | if ((node = btree_node_left_sibling(&as->as_area_btree, leaf))) { |
597 | a = (as_area_t *) node->value[node->keys - 1]; |
597 | a = (as_area_t *) node->value[node->keys - 1]; |
598 | spinlock_lock(&a->lock); |
598 | spinlock_lock(&a->lock); |
599 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
599 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
600 | spinlock_unlock(&a->lock); |
600 | spinlock_unlock(&a->lock); |
601 | return false; |
601 | return false; |
602 | } |
602 | } |
603 | spinlock_unlock(&a->lock); |
603 | spinlock_unlock(&a->lock); |
604 | } |
604 | } |
605 | if ((node = btree_node_right_sibling(&as->as_area_btree, leaf))) { |
605 | if ((node = btree_node_right_sibling(&as->as_area_btree, leaf))) { |
606 | a = (as_area_t *) node->value[0]; |
606 | a = (as_area_t *) node->value[0]; |
607 | spinlock_lock(&a->lock); |
607 | spinlock_lock(&a->lock); |
608 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
608 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
609 | spinlock_unlock(&a->lock); |
609 | spinlock_unlock(&a->lock); |
610 | return false; |
610 | return false; |
611 | } |
611 | } |
612 | spinlock_unlock(&a->lock); |
612 | spinlock_unlock(&a->lock); |
613 | } |
613 | } |
614 | 614 | ||
615 | /* Second, check the leaf node. */ |
615 | /* Second, check the leaf node. */ |
616 | for (i = 0; i < leaf->keys; i++) { |
616 | for (i = 0; i < leaf->keys; i++) { |
617 | a = (as_area_t *) leaf->value[i]; |
617 | a = (as_area_t *) leaf->value[i]; |
618 | 618 | ||
619 | if (a == avoid_area) |
619 | if (a == avoid_area) |
620 | continue; |
620 | continue; |
621 | 621 | ||
622 | spinlock_lock(&a->lock); |
622 | spinlock_lock(&a->lock); |
623 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
623 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
624 | spinlock_unlock(&a->lock); |
624 | spinlock_unlock(&a->lock); |
625 | return false; |
625 | return false; |
626 | } |
626 | } |
627 | spinlock_unlock(&a->lock); |
627 | spinlock_unlock(&a->lock); |
628 | } |
628 | } |
629 | 629 | ||
630 | /* |
630 | /* |
631 | * So far, the area does not conflict with other areas. |
631 | * So far, the area does not conflict with other areas. |
632 | * Check if it doesn't conflict with kernel address space. |
632 | * Check if it doesn't conflict with kernel address space. |
633 | */ |
633 | */ |
634 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
634 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
635 | return !overlaps(va, size, |
635 | return !overlaps(va, size, |
636 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
636 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
637 | } |
637 | } |
638 | 638 | ||
639 | return true; |
639 | return true; |
640 | } |
640 | } |
641 | 641 |