Rev 1048 | Rev 1076 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1048 | Rev 1070 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * This file contains address space manipulation functions. |
30 | * This file contains address space manipulation functions. |
31 | * Roughly speaking, this is a higher-level client of |
31 | * Roughly speaking, this is a higher-level client of |
32 | * Virtual Address Translation (VAT) subsystem. |
32 | * Virtual Address Translation (VAT) subsystem. |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include <mm/as.h> |
35 | #include <mm/as.h> |
36 | #include <arch/mm/as.h> |
36 | #include <arch/mm/as.h> |
37 | #include <mm/page.h> |
37 | #include <mm/page.h> |
38 | #include <mm/frame.h> |
38 | #include <mm/frame.h> |
39 | #include <mm/slab.h> |
39 | #include <mm/slab.h> |
40 | #include <mm/tlb.h> |
40 | #include <mm/tlb.h> |
41 | #include <arch/mm/page.h> |
41 | #include <arch/mm/page.h> |
42 | #include <genarch/mm/page_pt.h> |
42 | #include <genarch/mm/page_pt.h> |
43 | #include <mm/asid.h> |
43 | #include <mm/asid.h> |
44 | #include <arch/mm/asid.h> |
44 | #include <arch/mm/asid.h> |
45 | #include <arch/types.h> |
45 | #include <arch/types.h> |
46 | #include <typedefs.h> |
46 | #include <typedefs.h> |
47 | #include <synch/spinlock.h> |
47 | #include <synch/spinlock.h> |
48 | #include <config.h> |
48 | #include <config.h> |
49 | #include <adt/list.h> |
49 | #include <adt/list.h> |
50 | #include <panic.h> |
50 | #include <panic.h> |
51 | #include <arch/asm.h> |
51 | #include <arch/asm.h> |
52 | #include <debug.h> |
52 | #include <debug.h> |
53 | #include <memstr.h> |
53 | #include <memstr.h> |
- | 54 | #include <macros.h> |
|
54 | #include <arch.h> |
55 | #include <arch.h> |
55 | #include <print.h> |
56 | #include <print.h> |
56 | 57 | ||
57 | as_operations_t *as_operations = NULL; |
58 | as_operations_t *as_operations = NULL; |
58 | 59 | ||
59 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
60 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
60 | SPINLOCK_INITIALIZE(as_lock); |
61 | SPINLOCK_INITIALIZE(as_lock); |
61 | 62 | ||
62 | /** |
63 | /** |
63 | * This list contains address spaces that are not active on any |
64 | * This list contains address spaces that are not active on any |
64 | * processor and that have valid ASID. |
65 | * processor and that have valid ASID. |
65 | */ |
66 | */ |
66 | LIST_INITIALIZE(inactive_as_with_asid_head); |
67 | LIST_INITIALIZE(inactive_as_with_asid_head); |
67 | 68 | ||
68 | /** Kernel address space. */ |
69 | /** Kernel address space. */ |
69 | as_t *AS_KERNEL = NULL; |
70 | as_t *AS_KERNEL = NULL; |
70 | 71 | ||
71 | static int get_area_flags(as_area_t *a); |
72 | static int get_area_flags(as_area_t *a); |
72 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
73 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
73 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
74 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
74 | 75 | ||
75 | /** Initialize address space subsystem. */ |
76 | /** Initialize address space subsystem. */ |
76 | void as_init(void) |
77 | void as_init(void) |
77 | { |
78 | { |
78 | as_arch_init(); |
79 | as_arch_init(); |
79 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
80 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
80 | if (!AS_KERNEL) |
81 | if (!AS_KERNEL) |
81 | panic("can't create kernel address space\n"); |
82 | panic("can't create kernel address space\n"); |
82 | } |
83 | } |
83 | 84 | ||
84 | /** Create address space. |
85 | /** Create address space. |
85 | * |
86 | * |
86 | * @param flags Flags that influence way in wich the address space is created. |
87 | * @param flags Flags that influence way in wich the address space is created. |
87 | */ |
88 | */ |
88 | as_t *as_create(int flags) |
89 | as_t *as_create(int flags) |
89 | { |
90 | { |
90 | as_t *as; |
91 | as_t *as; |
91 | 92 | ||
92 | as = (as_t *) malloc(sizeof(as_t), 0); |
93 | as = (as_t *) malloc(sizeof(as_t), 0); |
93 | link_initialize(&as->inactive_as_with_asid_link); |
94 | link_initialize(&as->inactive_as_with_asid_link); |
94 | spinlock_initialize(&as->lock, "as_lock"); |
95 | spinlock_initialize(&as->lock, "as_lock"); |
95 | list_initialize(&as->as_area_head); |
96 | list_initialize(&as->as_area_head); |
96 | 97 | ||
97 | if (flags & FLAG_AS_KERNEL) |
98 | if (flags & FLAG_AS_KERNEL) |
98 | as->asid = ASID_KERNEL; |
99 | as->asid = ASID_KERNEL; |
99 | else |
100 | else |
100 | as->asid = ASID_INVALID; |
101 | as->asid = ASID_INVALID; |
101 | 102 | ||
102 | as->refcount = 0; |
103 | as->refcount = 0; |
103 | as->page_table = page_table_create(flags); |
104 | as->page_table = page_table_create(flags); |
104 | 105 | ||
105 | return as; |
106 | return as; |
106 | } |
107 | } |
107 | 108 | ||
108 | /** Free Adress space */ |
109 | /** Free Adress space */ |
109 | void as_free(as_t *as) |
110 | void as_free(as_t *as) |
110 | { |
111 | { |
111 | ASSERT(as->refcount == 0); |
112 | ASSERT(as->refcount == 0); |
112 | 113 | ||
113 | /* TODO: free as_areas and other resources held by as */ |
114 | /* TODO: free as_areas and other resources held by as */ |
114 | /* TODO: free page table */ |
115 | /* TODO: free page table */ |
115 | free(as); |
116 | free(as); |
116 | } |
117 | } |
117 | 118 | ||
118 | /** Create address space area of common attributes. |
119 | /** Create address space area of common attributes. |
119 | * |
120 | * |
120 | * The created address space area is added to the target address space. |
121 | * The created address space area is added to the target address space. |
121 | * |
122 | * |
122 | * @param as Target address space. |
123 | * @param as Target address space. |
123 | * @param flags Flags of the area. |
124 | * @param flags Flags of the area. |
124 | * @param size Size of area. |
125 | * @param size Size of area. |
125 | * @param base Base address of area. |
126 | * @param base Base address of area. |
126 | * |
127 | * |
127 | * @return Address space area on success or NULL on failure. |
128 | * @return Address space area on success or NULL on failure. |
128 | */ |
129 | */ |
129 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base) |
130 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base) |
130 | { |
131 | { |
131 | ipl_t ipl; |
132 | ipl_t ipl; |
132 | as_area_t *a; |
133 | as_area_t *a; |
133 | 134 | ||
134 | if (base % PAGE_SIZE) |
135 | if (base % PAGE_SIZE) |
135 | return NULL; |
136 | return NULL; |
136 | 137 | ||
137 | /* Writeable executable areas are not supported. */ |
138 | /* Writeable executable areas are not supported. */ |
138 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
139 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
139 | return NULL; |
140 | return NULL; |
140 | 141 | ||
141 | ipl = interrupts_disable(); |
142 | ipl = interrupts_disable(); |
142 | spinlock_lock(&as->lock); |
143 | spinlock_lock(&as->lock); |
143 | 144 | ||
144 | if (!check_area_conflicts(as, base, size, NULL)) { |
145 | if (!check_area_conflicts(as, base, size, NULL)) { |
145 | spinlock_unlock(&as->lock); |
146 | spinlock_unlock(&as->lock); |
146 | interrupts_restore(ipl); |
147 | interrupts_restore(ipl); |
147 | return NULL; |
148 | return NULL; |
148 | } |
149 | } |
149 | 150 | ||
150 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
151 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
151 | 152 | ||
152 | spinlock_initialize(&a->lock, "as_area_lock"); |
153 | spinlock_initialize(&a->lock, "as_area_lock"); |
153 | 154 | ||
154 | link_initialize(&a->link); |
155 | link_initialize(&a->link); |
155 | a->flags = flags; |
156 | a->flags = flags; |
156 | a->pages = SIZE2FRAMES(size); |
157 | a->pages = SIZE2FRAMES(size); |
157 | a->base = base; |
158 | a->base = base; |
158 | 159 | ||
159 | list_append(&a->link, &as->as_area_head); |
160 | list_append(&a->link, &as->as_area_head); |
160 | 161 | ||
161 | spinlock_unlock(&as->lock); |
162 | spinlock_unlock(&as->lock); |
162 | interrupts_restore(ipl); |
163 | interrupts_restore(ipl); |
163 | 164 | ||
164 | return a; |
165 | return a; |
165 | } |
166 | } |
166 | 167 | ||
167 | /** Initialize mapping for one page of address space. |
168 | /** Initialize mapping for one page of address space. |
168 | * |
169 | * |
169 | * This functions maps 'page' to 'frame' according |
170 | * This functions maps 'page' to 'frame' according |
170 | * to attributes of the address space area to |
171 | * to attributes of the address space area to |
171 | * wich 'page' belongs. |
172 | * wich 'page' belongs. |
172 | * |
173 | * |
173 | * @param as Target address space. |
174 | * @param as Target address space. |
174 | * @param page Virtual page within the area. |
175 | * @param page Virtual page within the area. |
175 | * @param frame Physical frame to which page will be mapped. |
176 | * @param frame Physical frame to which page will be mapped. |
176 | */ |
177 | */ |
177 | void as_set_mapping(as_t *as, __address page, __address frame) |
178 | void as_set_mapping(as_t *as, __address page, __address frame) |
178 | { |
179 | { |
179 | as_area_t *area; |
180 | as_area_t *area; |
180 | ipl_t ipl; |
181 | ipl_t ipl; |
181 | 182 | ||
182 | ipl = interrupts_disable(); |
183 | ipl = interrupts_disable(); |
183 | page_table_lock(as, true); |
184 | page_table_lock(as, true); |
184 | 185 | ||
185 | area = find_area_and_lock(as, page); |
186 | area = find_area_and_lock(as, page); |
186 | if (!area) { |
187 | if (!area) { |
187 | panic("page not part of any as_area\n"); |
188 | panic("page not part of any as_area\n"); |
188 | } |
189 | } |
189 | 190 | ||
190 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
191 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
191 | 192 | ||
192 | spinlock_unlock(&area->lock); |
193 | spinlock_unlock(&area->lock); |
193 | page_table_unlock(as, true); |
194 | page_table_unlock(as, true); |
194 | interrupts_restore(ipl); |
195 | interrupts_restore(ipl); |
195 | } |
196 | } |
196 | 197 | ||
197 | /** Handle page fault within the current address space. |
198 | /** Handle page fault within the current address space. |
198 | * |
199 | * |
199 | * This is the high-level page fault handler. |
200 | * This is the high-level page fault handler. |
200 | * Interrupts are assumed disabled. |
201 | * Interrupts are assumed disabled. |
201 | * |
202 | * |
202 | * @param page Faulting page. |
203 | * @param page Faulting page. |
203 | * |
204 | * |
204 | * @return 0 on page fault, 1 on success. |
205 | * @return 0 on page fault, 1 on success. |
205 | */ |
206 | */ |
206 | int as_page_fault(__address page) |
207 | int as_page_fault(__address page) |
207 | { |
208 | { |
208 | pte_t *pte; |
209 | pte_t *pte; |
209 | as_area_t *area; |
210 | as_area_t *area; |
210 | __address frame; |
211 | __address frame; |
211 | 212 | ||
212 | ASSERT(AS); |
213 | ASSERT(AS); |
213 | 214 | ||
214 | spinlock_lock(&AS->lock); |
215 | spinlock_lock(&AS->lock); |
215 | area = find_area_and_lock(AS, page); |
216 | area = find_area_and_lock(AS, page); |
216 | if (!area) { |
217 | if (!area) { |
217 | /* |
218 | /* |
218 | * No area contained mapping for 'page'. |
219 | * No area contained mapping for 'page'. |
219 | * Signal page fault to low-level handler. |
220 | * Signal page fault to low-level handler. |
220 | */ |
221 | */ |
221 | spinlock_unlock(&AS->lock); |
222 | spinlock_unlock(&AS->lock); |
222 | return 0; |
223 | return 0; |
223 | } |
224 | } |
224 | 225 | ||
225 | page_table_lock(AS, false); |
226 | page_table_lock(AS, false); |
226 | 227 | ||
227 | /* |
228 | /* |
228 | * To avoid race condition between two page faults |
229 | * To avoid race condition between two page faults |
229 | * on the same address, we need to make sure |
230 | * on the same address, we need to make sure |
230 | * the mapping has not been already inserted. |
231 | * the mapping has not been already inserted. |
231 | */ |
232 | */ |
232 | if ((pte = page_mapping_find(AS, page))) { |
233 | if ((pte = page_mapping_find(AS, page))) { |
233 | if (PTE_PRESENT(pte)) { |
234 | if (PTE_PRESENT(pte)) { |
234 | page_table_unlock(AS, false); |
235 | page_table_unlock(AS, false); |
235 | spinlock_unlock(&area->lock); |
236 | spinlock_unlock(&area->lock); |
236 | spinlock_unlock(&AS->lock); |
237 | spinlock_unlock(&AS->lock); |
237 | return 1; |
238 | return 1; |
238 | } |
239 | } |
239 | } |
240 | } |
240 | 241 | ||
241 | /* |
242 | /* |
242 | * In general, there can be several reasons that |
243 | * In general, there can be several reasons that |
243 | * can have caused this fault. |
244 | * can have caused this fault. |
244 | * |
245 | * |
245 | * - non-existent mapping: the area is a scratch |
246 | * - non-existent mapping: the area is a scratch |
246 | * area (e.g. stack) and so far has not been |
247 | * area (e.g. stack) and so far has not been |
247 | * allocated a frame for the faulting page |
248 | * allocated a frame for the faulting page |
248 | * |
249 | * |
249 | * - non-present mapping: another possibility, |
250 | * - non-present mapping: another possibility, |
250 | * currently not implemented, would be frame |
251 | * currently not implemented, would be frame |
251 | * reuse; when this becomes a possibility, |
252 | * reuse; when this becomes a possibility, |
252 | * do not forget to distinguish between |
253 | * do not forget to distinguish between |
253 | * the different causes |
254 | * the different causes |
254 | */ |
255 | */ |
255 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
256 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
256 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
257 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
257 | 258 | ||
258 | /* |
259 | /* |
259 | * Map 'page' to 'frame'. |
260 | * Map 'page' to 'frame'. |
260 | * Note that TLB shootdown is not attempted as only new information is being |
261 | * Note that TLB shootdown is not attempted as only new information is being |
261 | * inserted into page tables. |
262 | * inserted into page tables. |
262 | */ |
263 | */ |
263 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
264 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
264 | page_table_unlock(AS, false); |
265 | page_table_unlock(AS, false); |
265 | 266 | ||
266 | spinlock_unlock(&area->lock); |
267 | spinlock_unlock(&area->lock); |
267 | spinlock_unlock(&AS->lock); |
268 | spinlock_unlock(&AS->lock); |
268 | return 1; |
269 | return 1; |
269 | } |
270 | } |
270 | 271 | ||
271 | /** Switch address spaces. |
272 | /** Switch address spaces. |
272 | * |
273 | * |
273 | * @param old Old address space or NULL. |
274 | * @param old Old address space or NULL. |
274 | * @param new New address space. |
275 | * @param new New address space. |
275 | */ |
276 | */ |
276 | void as_switch(as_t *old, as_t *new) |
277 | void as_switch(as_t *old, as_t *new) |
277 | { |
278 | { |
278 | ipl_t ipl; |
279 | ipl_t ipl; |
279 | bool needs_asid = false; |
280 | bool needs_asid = false; |
280 | 281 | ||
281 | ipl = interrupts_disable(); |
282 | ipl = interrupts_disable(); |
282 | spinlock_lock(&as_lock); |
283 | spinlock_lock(&as_lock); |
283 | 284 | ||
284 | /* |
285 | /* |
285 | * First, take care of the old address space. |
286 | * First, take care of the old address space. |
286 | */ |
287 | */ |
287 | if (old) { |
288 | if (old) { |
288 | spinlock_lock(&old->lock); |
289 | spinlock_lock(&old->lock); |
289 | ASSERT(old->refcount); |
290 | ASSERT(old->refcount); |
290 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
291 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
291 | /* |
292 | /* |
292 | * The old address space is no longer active on |
293 | * The old address space is no longer active on |
293 | * any processor. It can be appended to the |
294 | * any processor. It can be appended to the |
294 | * list of inactive address spaces with assigned |
295 | * list of inactive address spaces with assigned |
295 | * ASID. |
296 | * ASID. |
296 | */ |
297 | */ |
297 | ASSERT(old->asid != ASID_INVALID); |
298 | ASSERT(old->asid != ASID_INVALID); |
298 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
299 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
299 | } |
300 | } |
300 | spinlock_unlock(&old->lock); |
301 | spinlock_unlock(&old->lock); |
301 | } |
302 | } |
302 | 303 | ||
303 | /* |
304 | /* |
304 | * Second, prepare the new address space. |
305 | * Second, prepare the new address space. |
305 | */ |
306 | */ |
306 | spinlock_lock(&new->lock); |
307 | spinlock_lock(&new->lock); |
307 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
308 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
308 | if (new->asid != ASID_INVALID) |
309 | if (new->asid != ASID_INVALID) |
309 | list_remove(&new->inactive_as_with_asid_link); |
310 | list_remove(&new->inactive_as_with_asid_link); |
310 | else |
311 | else |
311 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
312 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
312 | } |
313 | } |
313 | SET_PTL0_ADDRESS(new->page_table); |
314 | SET_PTL0_ADDRESS(new->page_table); |
314 | spinlock_unlock(&new->lock); |
315 | spinlock_unlock(&new->lock); |
315 | 316 | ||
316 | if (needs_asid) { |
317 | if (needs_asid) { |
317 | /* |
318 | /* |
318 | * Allocation of new ASID was deferred |
319 | * Allocation of new ASID was deferred |
319 | * until now in order to avoid deadlock. |
320 | * until now in order to avoid deadlock. |
320 | */ |
321 | */ |
321 | asid_t asid; |
322 | asid_t asid; |
322 | 323 | ||
323 | asid = asid_get(); |
324 | asid = asid_get(); |
324 | spinlock_lock(&new->lock); |
325 | spinlock_lock(&new->lock); |
325 | new->asid = asid; |
326 | new->asid = asid; |
326 | spinlock_unlock(&new->lock); |
327 | spinlock_unlock(&new->lock); |
327 | } |
328 | } |
328 | spinlock_unlock(&as_lock); |
329 | spinlock_unlock(&as_lock); |
329 | interrupts_restore(ipl); |
330 | interrupts_restore(ipl); |
330 | 331 | ||
331 | /* |
332 | /* |
332 | * Perform architecture-specific steps. |
333 | * Perform architecture-specific steps. |
333 | * (e.g. write ASID to hardware register etc.) |
334 | * (e.g. write ASID to hardware register etc.) |
334 | */ |
335 | */ |
335 | as_install_arch(new); |
336 | as_install_arch(new); |
336 | 337 | ||
337 | AS = new; |
338 | AS = new; |
338 | } |
339 | } |
339 | 340 | ||
340 | /** Compute flags for virtual address translation subsytem. |
341 | /** Compute flags for virtual address translation subsytem. |
341 | * |
342 | * |
342 | * The address space area must be locked. |
343 | * The address space area must be locked. |
343 | * Interrupts must be disabled. |
344 | * Interrupts must be disabled. |
344 | * |
345 | * |
345 | * @param a Address space area. |
346 | * @param a Address space area. |
346 | * |
347 | * |
347 | * @return Flags to be used in page_mapping_insert(). |
348 | * @return Flags to be used in page_mapping_insert(). |
348 | */ |
349 | */ |
349 | int get_area_flags(as_area_t *a) |
350 | int get_area_flags(as_area_t *a) |
350 | { |
351 | { |
351 | int flags; |
352 | int flags; |
352 | 353 | ||
353 | flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
354 | flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
354 | 355 | ||
355 | if (a->flags & AS_AREA_READ) |
356 | if (a->flags & AS_AREA_READ) |
356 | flags |= PAGE_READ; |
357 | flags |= PAGE_READ; |
357 | 358 | ||
358 | if (a->flags & AS_AREA_WRITE) |
359 | if (a->flags & AS_AREA_WRITE) |
359 | flags |= PAGE_WRITE; |
360 | flags |= PAGE_WRITE; |
360 | 361 | ||
361 | if (a->flags & AS_AREA_EXEC) |
362 | if (a->flags & AS_AREA_EXEC) |
362 | flags |= PAGE_EXEC; |
363 | flags |= PAGE_EXEC; |
363 | 364 | ||
364 | return flags; |
365 | return flags; |
365 | } |
366 | } |
366 | 367 | ||
367 | /** Create page table. |
368 | /** Create page table. |
368 | * |
369 | * |
369 | * Depending on architecture, create either address space |
370 | * Depending on architecture, create either address space |
370 | * private or global page table. |
371 | * private or global page table. |
371 | * |
372 | * |
372 | * @param flags Flags saying whether the page table is for kernel address space. |
373 | * @param flags Flags saying whether the page table is for kernel address space. |
373 | * |
374 | * |
374 | * @return First entry of the page table. |
375 | * @return First entry of the page table. |
375 | */ |
376 | */ |
376 | pte_t *page_table_create(int flags) |
377 | pte_t *page_table_create(int flags) |
377 | { |
378 | { |
378 | ASSERT(as_operations); |
379 | ASSERT(as_operations); |
379 | ASSERT(as_operations->page_table_create); |
380 | ASSERT(as_operations->page_table_create); |
380 | 381 | ||
381 | return as_operations->page_table_create(flags); |
382 | return as_operations->page_table_create(flags); |
382 | } |
383 | } |
383 | 384 | ||
384 | /** Lock page table. |
385 | /** Lock page table. |
385 | * |
386 | * |
386 | * This function should be called before any page_mapping_insert(), |
387 | * This function should be called before any page_mapping_insert(), |
387 | * page_mapping_remove() and page_mapping_find(). |
388 | * page_mapping_remove() and page_mapping_find(). |
388 | * |
389 | * |
389 | * Locking order is such that address space areas must be locked |
390 | * Locking order is such that address space areas must be locked |
390 | * prior to this call. Address space can be locked prior to this |
391 | * prior to this call. Address space can be locked prior to this |
391 | * call in which case the lock argument is false. |
392 | * call in which case the lock argument is false. |
392 | * |
393 | * |
393 | * @param as Address space. |
394 | * @param as Address space. |
394 | * @param as_locked If false, do not attempt to lock as->lock. |
395 | * @param as_locked If false, do not attempt to lock as->lock. |
395 | */ |
396 | */ |
396 | void page_table_lock(as_t *as, bool lock) |
397 | void page_table_lock(as_t *as, bool lock) |
397 | { |
398 | { |
398 | ASSERT(as_operations); |
399 | ASSERT(as_operations); |
399 | ASSERT(as_operations->page_table_lock); |
400 | ASSERT(as_operations->page_table_lock); |
400 | 401 | ||
401 | as_operations->page_table_lock(as, lock); |
402 | as_operations->page_table_lock(as, lock); |
402 | } |
403 | } |
403 | 404 | ||
404 | /** Unlock page table. |
405 | /** Unlock page table. |
405 | * |
406 | * |
406 | * @param as Address space. |
407 | * @param as Address space. |
407 | * @param as_locked If false, do not attempt to unlock as->lock. |
408 | * @param as_locked If false, do not attempt to unlock as->lock. |
408 | */ |
409 | */ |
409 | void page_table_unlock(as_t *as, bool unlock) |
410 | void page_table_unlock(as_t *as, bool unlock) |
410 | { |
411 | { |
411 | ASSERT(as_operations); |
412 | ASSERT(as_operations); |
412 | ASSERT(as_operations->page_table_unlock); |
413 | ASSERT(as_operations->page_table_unlock); |
413 | 414 | ||
414 | as_operations->page_table_unlock(as, unlock); |
415 | as_operations->page_table_unlock(as, unlock); |
415 | } |
416 | } |
416 | 417 | ||
417 | /** Find address space area and change it. |
418 | /** Find address space area and change it. |
418 | * |
419 | * |
419 | * @param as Address space. |
420 | * @param as Address space. |
420 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
421 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
421 | * @param size New size of the virtual memory block starting at address. |
422 | * @param size New size of the virtual memory block starting at address. |
422 | * @param flags Flags influencing the remap operation. Currently unused. |
423 | * @param flags Flags influencing the remap operation. Currently unused. |
423 | * |
424 | * |
424 | * @return address on success, (__address) -1 otherwise. |
425 | * @return address on success, (__address) -1 otherwise. |
425 | */ |
426 | */ |
426 | __address as_remap(as_t *as, __address address, size_t size, int flags) |
427 | __address as_remap(as_t *as, __address address, size_t size, int flags) |
427 | { |
428 | { |
428 | as_area_t *area = NULL; |
429 | as_area_t *area = NULL; |
429 | ipl_t ipl; |
430 | ipl_t ipl; |
430 | size_t pages; |
431 | size_t pages; |
431 | 432 | ||
432 | ipl = interrupts_disable(); |
433 | ipl = interrupts_disable(); |
433 | spinlock_lock(&as->lock); |
434 | spinlock_lock(&as->lock); |
434 | 435 | ||
435 | /* |
436 | /* |
436 | * Locate the area. |
437 | * Locate the area. |
437 | */ |
438 | */ |
438 | area = find_area_and_lock(as, address); |
439 | area = find_area_and_lock(as, address); |
439 | if (!area) { |
440 | if (!area) { |
440 | spinlock_unlock(&as->lock); |
441 | spinlock_unlock(&as->lock); |
441 | interrupts_restore(ipl); |
442 | interrupts_restore(ipl); |
442 | return (__address) -1; |
443 | return (__address) -1; |
443 | } |
444 | } |
444 | 445 | ||
445 | pages = SIZE2FRAMES((address - area->base) + size); |
446 | pages = SIZE2FRAMES((address - area->base) + size); |
446 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
447 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
447 | spinlock_unlock(&as->lock); |
448 | spinlock_unlock(&as->lock); |
448 | interrupts_restore(ipl); |
449 | interrupts_restore(ipl); |
449 | return (__address) -1; |
450 | return (__address) -1; |
450 | } |
451 | } |
451 | 452 | ||
452 | if (pages < area->pages) { |
453 | if (pages < area->pages) { |
453 | int i; |
454 | int i; |
454 | 455 | ||
455 | /* |
456 | /* |
456 | * Shrinking the area. |
457 | * Shrinking the area. |
457 | */ |
458 | */ |
458 | for (i = pages; i < area->pages; i++) { |
459 | for (i = pages; i < area->pages; i++) { |
459 | pte_t *pte; |
460 | pte_t *pte; |
460 | 461 | ||
461 | /* |
462 | /* |
462 | * Releasing physical memory. |
463 | * Releasing physical memory. |
463 | * This depends on the fact that the memory was allocated using frame_alloc(). |
464 | * This depends on the fact that the memory was allocated using frame_alloc(). |
464 | */ |
465 | */ |
465 | page_table_lock(as, false); |
466 | page_table_lock(as, false); |
466 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
467 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
467 | if (pte && PTE_VALID(pte)) { |
468 | if (pte && PTE_VALID(pte)) { |
468 | __address frame; |
469 | __address frame; |
469 | 470 | ||
470 | ASSERT(PTE_PRESENT(pte)); |
471 | ASSERT(PTE_PRESENT(pte)); |
471 | frame = PTE_GET_FRAME(pte); |
472 | frame = PTE_GET_FRAME(pte); |
472 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
473 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
473 | page_table_unlock(as, false); |
474 | page_table_unlock(as, false); |
474 | 475 | ||
475 | frame_free(ADDR2PFN(frame)); |
476 | frame_free(ADDR2PFN(frame)); |
476 | } else { |
477 | } else { |
477 | page_table_unlock(as, false); |
478 | page_table_unlock(as, false); |
478 | } |
479 | } |
479 | } |
480 | } |
480 | /* |
481 | /* |
481 | * Invalidate TLB's. |
482 | * Invalidate TLB's. |
482 | */ |
483 | */ |
483 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
484 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
484 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
485 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
485 | tlb_shootdown_finalize(); |
486 | tlb_shootdown_finalize(); |
486 | } |
487 | } |
487 | 488 | ||
488 | area->pages = pages; |
489 | area->pages = pages; |
489 | 490 | ||
490 | spinlock_unlock(&area->lock); |
491 | spinlock_unlock(&area->lock); |
491 | spinlock_unlock(&as->lock); |
492 | spinlock_unlock(&as->lock); |
492 | interrupts_restore(ipl); |
493 | interrupts_restore(ipl); |
493 | 494 | ||
494 | return address; |
495 | return address; |
495 | } |
496 | } |
496 | 497 | ||
497 | /** Find address space area and lock it. |
498 | /** Find address space area and lock it. |
498 | * |
499 | * |
499 | * The address space must be locked and interrupts must be disabled. |
500 | * The address space must be locked and interrupts must be disabled. |
500 | * |
501 | * |
501 | * @param as Address space. |
502 | * @param as Address space. |
502 | * @param va Virtual address. |
503 | * @param va Virtual address. |
503 | * |
504 | * |
504 | * @return Locked address space area containing va on success or NULL on failure. |
505 | * @return Locked address space area containing va on success or NULL on failure. |
505 | */ |
506 | */ |
506 | as_area_t *find_area_and_lock(as_t *as, __address va) |
507 | as_area_t *find_area_and_lock(as_t *as, __address va) |
507 | { |
508 | { |
508 | link_t *cur; |
509 | link_t *cur; |
509 | as_area_t *a; |
510 | as_area_t *a; |
510 | 511 | ||
511 | for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) { |
512 | for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) { |
512 | a = list_get_instance(cur, as_area_t, link); |
513 | a = list_get_instance(cur, as_area_t, link); |
513 | spinlock_lock(&a->lock); |
514 | spinlock_lock(&a->lock); |
514 | 515 | ||
515 | if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE)) |
516 | if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE)) |
516 | return a; |
517 | return a; |
517 | 518 | ||
518 | spinlock_unlock(&a->lock); |
519 | spinlock_unlock(&a->lock); |
519 | } |
520 | } |
520 | 521 | ||
521 | return NULL; |
522 | return NULL; |
522 | } |
523 | } |
523 | 524 | ||
524 | /** Check area conflicts with other areas. |
525 | /** Check area conflicts with other areas. |
525 | * |
526 | * |
526 | * The address space must be locked and interrupts must be disabled. |
527 | * The address space must be locked and interrupts must be disabled. |
527 | * |
528 | * |
528 | * @param as Address space. |
529 | * @param as Address space. |
529 | * @param va Starting virtual address of the area being tested. |
530 | * @param va Starting virtual address of the area being tested. |
530 | * @param size Size of the area being tested. |
531 | * @param size Size of the area being tested. |
531 | * @param avoid_area Do not touch this area. |
532 | * @param avoid_area Do not touch this area. |
532 | * |
533 | * |
533 | * @return True if there is no conflict, false otherwise. |
534 | * @return True if there is no conflict, false otherwise. |
534 | */ |
535 | */ |
535 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
536 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
536 | { |
537 | { |
537 | link_t *cur; |
538 | link_t *cur; |
538 | as_area_t *a; |
539 | as_area_t *a; |
539 | 540 | ||
- | 541 | /* |
|
- | 542 | * We don't want any area to have conflicts with NULL page. |
|
- | 543 | */ |
|
- | 544 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
|
- | 545 | return false; |
|
- | 546 | ||
540 | for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) { |
547 | for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) { |
541 | __address start; |
548 | __address a_start; |
542 | __address end; |
549 | size_t a_size; |
543 | 550 | ||
544 | a = list_get_instance(cur, as_area_t, link); |
551 | a = list_get_instance(cur, as_area_t, link); |
545 | if (a == avoid_area) |
552 | if (a == avoid_area) |
546 | continue; |
553 | continue; |
547 | 554 | ||
548 | spinlock_lock(&a->lock); |
555 | spinlock_lock(&a->lock); |
549 | 556 | ||
550 | start = a->base; |
557 | a_start = a->base; |
551 | end = a->base + a->pages * PAGE_SIZE - 1; |
558 | a_size = a->pages * PAGE_SIZE; |
552 | 559 | ||
553 | spinlock_unlock(&a->lock); |
560 | spinlock_unlock(&a->lock); |
554 | 561 | ||
555 | if ((va >= start) && (va <= end)) { |
- | |
556 | /* |
- | |
557 | * Tested area is inside another area. |
562 | if (overlaps(va, size, a_start, a_size)) |
558 | */ |
- | |
559 | return false; |
563 | return false; |
560 | } |
- | |
561 | 564 | ||
562 | if ((start >= va) && (start < va + size)) { |
- | |
563 | /* |
- | |
564 | * Another area starts in tested area. |
- | |
565 | */ |
- | |
566 | return false; |
- | |
567 | } |
565 | } |
568 | 566 | ||
569 | if ((end >= va) && (end < va + size)) { |
- | |
570 | /* |
567 | /* |
571 | * Another area ends in tested area. |
568 | * So far, the area does not conflict with other areas. |
- | 569 | * Check if it doesn't conflict with kernel address space. |
|
572 | */ |
570 | */ |
- | 571 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
|
573 | return false; |
572 | return !overlaps(va, size, |
574 | } |
- | |
575 | - | ||
- | 573 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
|
576 | } |
574 | } |
577 | 575 | ||
578 | return true; |
576 | return true; |
579 | } |
577 | } |
580 | 578 |