Rev 1248 | Rev 1306 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
703 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2006 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1248 | jermar | 29 | /** |
30 | * @file as.c |
||
31 | * @brief Address space related functions. |
||
32 | * |
||
703 | jermar | 33 | * This file contains address space manipulation functions. |
34 | * Roughly speaking, this is a higher-level client of |
||
35 | * Virtual Address Translation (VAT) subsystem. |
||
1248 | jermar | 36 | * |
37 | * Functionality provided by this file allows one to |
||
38 | * create address space and create, resize and share |
||
39 | * address space areas. |
||
40 | * |
||
41 | * @see page.c |
||
42 | * |
||
703 | jermar | 43 | */ |
44 | |||
45 | #include <mm/as.h> |
||
756 | jermar | 46 | #include <arch/mm/as.h> |
703 | jermar | 47 | #include <mm/page.h> |
48 | #include <mm/frame.h> |
||
814 | palkovsky | 49 | #include <mm/slab.h> |
703 | jermar | 50 | #include <mm/tlb.h> |
51 | #include <arch/mm/page.h> |
||
52 | #include <genarch/mm/page_pt.h> |
||
1108 | jermar | 53 | #include <genarch/mm/page_ht.h> |
727 | jermar | 54 | #include <mm/asid.h> |
703 | jermar | 55 | #include <arch/mm/asid.h> |
56 | #include <synch/spinlock.h> |
||
788 | jermar | 57 | #include <adt/list.h> |
1147 | jermar | 58 | #include <adt/btree.h> |
1235 | jermar | 59 | #include <proc/task.h> |
1288 | jermar | 60 | #include <proc/thread.h> |
1235 | jermar | 61 | #include <arch/asm.h> |
703 | jermar | 62 | #include <panic.h> |
63 | #include <debug.h> |
||
1235 | jermar | 64 | #include <print.h> |
703 | jermar | 65 | #include <memstr.h> |
1070 | jermar | 66 | #include <macros.h> |
703 | jermar | 67 | #include <arch.h> |
1235 | jermar | 68 | #include <errno.h> |
69 | #include <config.h> |
||
70 | #include <arch/types.h> |
||
71 | #include <typedefs.h> |
||
1288 | jermar | 72 | #include <syscall/copy.h> |
73 | #include <arch/interrupt.h> |
||
703 | jermar | 74 | |
756 | jermar | 75 | as_operations_t *as_operations = NULL; |
703 | jermar | 76 | |
823 | jermar | 77 | /** Address space lock. It protects inactive_as_with_asid_head. */ |
78 | SPINLOCK_INITIALIZE(as_lock); |
||
79 | |||
80 | /** |
||
81 | * This list contains address spaces that are not active on any |
||
82 | * processor and that have valid ASID. |
||
83 | */ |
||
84 | LIST_INITIALIZE(inactive_as_with_asid_head); |
||
85 | |||
757 | jermar | 86 | /** Kernel address space. */ |
87 | as_t *AS_KERNEL = NULL; |
||
88 | |||
1235 | jermar | 89 | static int area_flags_to_page_flags(int aflags); |
754 | jermar | 90 | static int get_area_flags(as_area_t *a); |
977 | jermar | 91 | static as_area_t *find_area_and_lock(as_t *as, __address va); |
1048 | jermar | 92 | static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); |
703 | jermar | 93 | |
756 | jermar | 94 | /** Initialize address space subsystem. */ |
95 | void as_init(void) |
||
96 | { |
||
97 | as_arch_init(); |
||
789 | palkovsky | 98 | AS_KERNEL = as_create(FLAG_AS_KERNEL); |
756 | jermar | 99 | if (!AS_KERNEL) |
100 | panic("can't create kernel address space\n"); |
||
101 | } |
||
102 | |||
757 | jermar | 103 | /** Create address space. |
104 | * |
||
105 | * @param flags Flags that influence way in wich the address space is created. |
||
106 | */ |
||
756 | jermar | 107 | as_t *as_create(int flags) |
703 | jermar | 108 | { |
109 | as_t *as; |
||
110 | |||
822 | palkovsky | 111 | as = (as_t *) malloc(sizeof(as_t), 0); |
823 | jermar | 112 | link_initialize(&as->inactive_as_with_asid_link); |
822 | palkovsky | 113 | spinlock_initialize(&as->lock, "as_lock"); |
1147 | jermar | 114 | btree_create(&as->as_area_btree); |
822 | palkovsky | 115 | |
116 | if (flags & FLAG_AS_KERNEL) |
||
117 | as->asid = ASID_KERNEL; |
||
118 | else |
||
119 | as->asid = ASID_INVALID; |
||
120 | |||
823 | jermar | 121 | as->refcount = 0; |
822 | palkovsky | 122 | as->page_table = page_table_create(flags); |
703 | jermar | 123 | |
124 | return as; |
||
125 | } |
||
126 | |||
973 | palkovsky | 127 | /** Free Adress space */ |
128 | void as_free(as_t *as) |
||
129 | { |
||
130 | ASSERT(as->refcount == 0); |
||
131 | |||
132 | /* TODO: free as_areas and other resources held by as */ |
||
133 | /* TODO: free page table */ |
||
134 | free(as); |
||
135 | } |
||
136 | |||
703 | jermar | 137 | /** Create address space area of common attributes. |
138 | * |
||
139 | * The created address space area is added to the target address space. |
||
140 | * |
||
141 | * @param as Target address space. |
||
1239 | jermar | 142 | * @param flags Flags of the area memory. |
1048 | jermar | 143 | * @param size Size of area. |
703 | jermar | 144 | * @param base Base address of area. |
1239 | jermar | 145 | * @param attrs Attributes of the area. |
703 | jermar | 146 | * |
147 | * @return Address space area on success or NULL on failure. |
||
148 | */ |
||
1239 | jermar | 149 | as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) |
703 | jermar | 150 | { |
151 | ipl_t ipl; |
||
152 | as_area_t *a; |
||
153 | |||
154 | if (base % PAGE_SIZE) |
||
1048 | jermar | 155 | return NULL; |
156 | |||
1233 | jermar | 157 | if (!size) |
158 | return NULL; |
||
159 | |||
1048 | jermar | 160 | /* Writeable executable areas are not supported. */ |
161 | if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) |
||
162 | return NULL; |
||
703 | jermar | 163 | |
164 | ipl = interrupts_disable(); |
||
165 | spinlock_lock(&as->lock); |
||
166 | |||
1048 | jermar | 167 | if (!check_area_conflicts(as, base, size, NULL)) { |
168 | spinlock_unlock(&as->lock); |
||
169 | interrupts_restore(ipl); |
||
170 | return NULL; |
||
171 | } |
||
703 | jermar | 172 | |
822 | palkovsky | 173 | a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
703 | jermar | 174 | |
822 | palkovsky | 175 | spinlock_initialize(&a->lock, "as_area_lock"); |
176 | |||
1026 | jermar | 177 | a->flags = flags; |
1239 | jermar | 178 | a->attributes = attrs; |
1048 | jermar | 179 | a->pages = SIZE2FRAMES(size); |
822 | palkovsky | 180 | a->base = base; |
181 | |||
1147 | jermar | 182 | btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
822 | palkovsky | 183 | |
703 | jermar | 184 | spinlock_unlock(&as->lock); |
185 | interrupts_restore(ipl); |
||
704 | jermar | 186 | |
703 | jermar | 187 | return a; |
188 | } |
||
189 | |||
1235 | jermar | 190 | /** Find address space area and change it. |
191 | * |
||
192 | * @param as Address space. |
||
193 | * @param address Virtual address belonging to the area to be changed. Must be page-aligned. |
||
194 | * @param size New size of the virtual memory block starting at address. |
||
195 | * @param flags Flags influencing the remap operation. Currently unused. |
||
196 | * |
||
197 | * @return address on success, (__address) -1 otherwise. |
||
198 | */ |
||
199 | __address as_area_resize(as_t *as, __address address, size_t size, int flags) |
||
200 | { |
||
201 | as_area_t *area = NULL; |
||
202 | ipl_t ipl; |
||
203 | size_t pages; |
||
204 | |||
205 | ipl = interrupts_disable(); |
||
206 | spinlock_lock(&as->lock); |
||
207 | |||
208 | /* |
||
209 | * Locate the area. |
||
210 | */ |
||
211 | area = find_area_and_lock(as, address); |
||
212 | if (!area) { |
||
213 | spinlock_unlock(&as->lock); |
||
214 | interrupts_restore(ipl); |
||
215 | return (__address) -1; |
||
216 | } |
||
217 | |||
218 | if (area->flags & AS_AREA_DEVICE) { |
||
219 | /* |
||
220 | * Remapping of address space areas associated |
||
221 | * with memory mapped devices is not supported. |
||
222 | */ |
||
223 | spinlock_unlock(&area->lock); |
||
224 | spinlock_unlock(&as->lock); |
||
225 | interrupts_restore(ipl); |
||
226 | return (__address) -1; |
||
227 | } |
||
228 | |||
229 | pages = SIZE2FRAMES((address - area->base) + size); |
||
230 | if (!pages) { |
||
231 | /* |
||
232 | * Zero size address space areas are not allowed. |
||
233 | */ |
||
234 | spinlock_unlock(&area->lock); |
||
235 | spinlock_unlock(&as->lock); |
||
236 | interrupts_restore(ipl); |
||
237 | return (__address) -1; |
||
238 | } |
||
239 | |||
240 | if (pages < area->pages) { |
||
241 | int i; |
||
242 | |||
243 | /* |
||
244 | * Shrinking the area. |
||
245 | * No need to check for overlaps. |
||
246 | */ |
||
247 | for (i = pages; i < area->pages; i++) { |
||
248 | pte_t *pte; |
||
249 | |||
250 | /* |
||
251 | * Releasing physical memory. |
||
252 | * This depends on the fact that the memory was allocated using frame_alloc(). |
||
253 | */ |
||
254 | page_table_lock(as, false); |
||
255 | pte = page_mapping_find(as, area->base + i*PAGE_SIZE); |
||
256 | if (pte && PTE_VALID(pte)) { |
||
257 | __address frame; |
||
258 | |||
259 | ASSERT(PTE_PRESENT(pte)); |
||
260 | frame = PTE_GET_FRAME(pte); |
||
261 | page_mapping_remove(as, area->base + i*PAGE_SIZE); |
||
262 | page_table_unlock(as, false); |
||
263 | |||
264 | frame_free(ADDR2PFN(frame)); |
||
265 | } else { |
||
266 | page_table_unlock(as, false); |
||
267 | } |
||
268 | } |
||
269 | /* |
||
270 | * Invalidate TLB's. |
||
271 | */ |
||
272 | tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
273 | tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); |
||
274 | tlb_shootdown_finalize(); |
||
275 | } else { |
||
276 | /* |
||
277 | * Growing the area. |
||
278 | * Check for overlaps with other address space areas. |
||
279 | */ |
||
280 | if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { |
||
281 | spinlock_unlock(&area->lock); |
||
282 | spinlock_unlock(&as->lock); |
||
283 | interrupts_restore(ipl); |
||
284 | return (__address) -1; |
||
285 | } |
||
286 | } |
||
287 | |||
288 | area->pages = pages; |
||
289 | |||
290 | spinlock_unlock(&area->lock); |
||
291 | spinlock_unlock(&as->lock); |
||
292 | interrupts_restore(ipl); |
||
293 | |||
294 | return address; |
||
295 | } |
||
296 | |||
297 | /** Send address space area to another task. |
||
298 | * |
||
299 | * Address space area is sent to the specified task. |
||
300 | * If the destination task is willing to accept the |
||
301 | * area, a new area is created according to the |
||
302 | * source area. Moreover, any existing mapping |
||
303 | * is copied as well, providing thus a mechanism |
||
304 | * for sharing group of pages. The source address |
||
305 | * space area and any associated mapping is preserved. |
||
306 | * |
||
1239 | jermar | 307 | * @param dst_id Task ID of the accepting task. |
308 | * @param src_base Base address of the source address space area. |
||
1235 | jermar | 309 | * |
310 | * @return 0 on success or ENOENT if there is no such task or |
||
311 | * if there is no such address space area, |
||
312 | * EPERM if there was a problem in accepting the area or |
||
313 | * ENOMEM if there was a problem in allocating destination |
||
314 | * address space area. |
||
315 | */ |
||
1239 | jermar | 316 | int as_area_send(task_id_t dst_id, __address src_base) |
1235 | jermar | 317 | { |
318 | ipl_t ipl; |
||
319 | task_t *t; |
||
320 | count_t i; |
||
1239 | jermar | 321 | as_t *dst_as; |
1235 | jermar | 322 | __address dst_base; |
1239 | jermar | 323 | int src_flags; |
324 | size_t src_size; |
||
325 | as_area_t *src_area, *dst_area; |
||
1235 | jermar | 326 | |
327 | ipl = interrupts_disable(); |
||
328 | spinlock_lock(&tasks_lock); |
||
329 | |||
1239 | jermar | 330 | t = task_find_by_id(dst_id); |
1235 | jermar | 331 | if (!NULL) { |
332 | spinlock_unlock(&tasks_lock); |
||
333 | interrupts_restore(ipl); |
||
334 | return ENOENT; |
||
335 | } |
||
336 | |||
337 | spinlock_lock(&t->lock); |
||
338 | spinlock_unlock(&tasks_lock); |
||
339 | |||
1239 | jermar | 340 | dst_as = t->as; |
1235 | jermar | 341 | dst_base = (__address) t->accept_arg.base; |
342 | |||
1239 | jermar | 343 | if (dst_as == AS) { |
1235 | jermar | 344 | /* |
345 | * The two tasks share the entire address space. |
||
346 | * Return error since there is no point in continuing. |
||
347 | */ |
||
348 | spinlock_unlock(&t->lock); |
||
349 | interrupts_restore(ipl); |
||
350 | return EPERM; |
||
351 | } |
||
1238 | jermar | 352 | |
353 | spinlock_lock(&AS->lock); |
||
1239 | jermar | 354 | src_area = find_area_and_lock(AS, src_base); |
355 | if (!src_area) { |
||
1238 | jermar | 356 | /* |
357 | * Could not find the source address space area. |
||
358 | */ |
||
359 | spinlock_unlock(&t->lock); |
||
360 | spinlock_unlock(&AS->lock); |
||
361 | interrupts_restore(ipl); |
||
362 | return ENOENT; |
||
363 | } |
||
1239 | jermar | 364 | src_size = src_area->pages * PAGE_SIZE; |
365 | src_flags = src_area->flags; |
||
366 | spinlock_unlock(&src_area->lock); |
||
1238 | jermar | 367 | spinlock_unlock(&AS->lock); |
1235 | jermar | 368 | |
1239 | jermar | 369 | if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != src_size) || |
370 | (t->accept_arg.flags != src_flags)) { |
||
1235 | jermar | 371 | /* |
372 | * Discrepancy in either task ID, size or flags. |
||
373 | */ |
||
374 | spinlock_unlock(&t->lock); |
||
375 | interrupts_restore(ipl); |
||
376 | return EPERM; |
||
377 | } |
||
378 | |||
379 | /* |
||
1239 | jermar | 380 | * Create copy of the source address space area. |
381 | * The destination area is created with AS_AREA_ATTR_PARTIAL |
||
382 | * attribute set which prevents race condition with |
||
383 | * preliminary as_page_fault() calls. |
||
1235 | jermar | 384 | */ |
1239 | jermar | 385 | dst_area = as_area_create(dst_as, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL); |
386 | if (!dst_area) { |
||
1235 | jermar | 387 | /* |
388 | * Destination address space area could not be created. |
||
389 | */ |
||
390 | spinlock_unlock(&t->lock); |
||
391 | interrupts_restore(ipl); |
||
392 | return ENOMEM; |
||
393 | } |
||
394 | |||
395 | memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0); |
||
396 | spinlock_unlock(&t->lock); |
||
397 | |||
398 | /* |
||
399 | * Avoid deadlock by first locking the address space with lower address. |
||
400 | */ |
||
1239 | jermar | 401 | if (dst_as < AS) { |
402 | spinlock_lock(&dst_as->lock); |
||
1235 | jermar | 403 | spinlock_lock(&AS->lock); |
404 | } else { |
||
405 | spinlock_lock(&AS->lock); |
||
1239 | jermar | 406 | spinlock_lock(&dst_as->lock); |
1235 | jermar | 407 | } |
408 | |||
1239 | jermar | 409 | for (i = 0; i < SIZE2FRAMES(src_size); i++) { |
1235 | jermar | 410 | pte_t *pte; |
411 | __address frame; |
||
412 | |||
413 | page_table_lock(AS, false); |
||
1239 | jermar | 414 | pte = page_mapping_find(AS, src_base + i*PAGE_SIZE); |
1235 | jermar | 415 | if (pte && PTE_VALID(pte)) { |
416 | ASSERT(PTE_PRESENT(pte)); |
||
417 | frame = PTE_GET_FRAME(pte); |
||
1239 | jermar | 418 | if (!(src_flags & AS_AREA_DEVICE)) |
1236 | jermar | 419 | frame_reference_add(ADDR2PFN(frame)); |
1235 | jermar | 420 | page_table_unlock(AS, false); |
421 | } else { |
||
422 | page_table_unlock(AS, false); |
||
423 | continue; |
||
424 | } |
||
425 | |||
1239 | jermar | 426 | page_table_lock(dst_as, false); |
427 | page_mapping_insert(dst_as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags)); |
||
428 | page_table_unlock(dst_as, false); |
||
1235 | jermar | 429 | } |
1239 | jermar | 430 | |
431 | /* |
||
432 | * Now the destination address space area has been |
||
433 | * fully initialized. Clear the AS_AREA_ATTR_PARTIAL |
||
434 | * attribute. |
||
435 | */ |
||
436 | spinlock_lock(&dst_area->lock); |
||
437 | dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
||
438 | spinlock_unlock(&dst_area->lock); |
||
1235 | jermar | 439 | |
440 | spinlock_unlock(&AS->lock); |
||
1239 | jermar | 441 | spinlock_unlock(&dst_as->lock); |
1235 | jermar | 442 | interrupts_restore(ipl); |
443 | |||
444 | return 0; |
||
445 | } |
||
446 | |||
754 | jermar | 447 | /** Initialize mapping for one page of address space. |
703 | jermar | 448 | * |
754 | jermar | 449 | * This functions maps 'page' to 'frame' according |
450 | * to attributes of the address space area to |
||
451 | * wich 'page' belongs. |
||
703 | jermar | 452 | * |
840 | jermar | 453 | * @param as Target address space. |
754 | jermar | 454 | * @param page Virtual page within the area. |
455 | * @param frame Physical frame to which page will be mapped. |
||
703 | jermar | 456 | */ |
754 | jermar | 457 | void as_set_mapping(as_t *as, __address page, __address frame) |
703 | jermar | 458 | { |
977 | jermar | 459 | as_area_t *area; |
703 | jermar | 460 | ipl_t ipl; |
461 | |||
462 | ipl = interrupts_disable(); |
||
1044 | jermar | 463 | page_table_lock(as, true); |
703 | jermar | 464 | |
977 | jermar | 465 | area = find_area_and_lock(as, page); |
754 | jermar | 466 | if (!area) { |
467 | panic("page not part of any as_area\n"); |
||
468 | } |
||
469 | |||
756 | jermar | 470 | page_mapping_insert(as, page, frame, get_area_flags(area)); |
754 | jermar | 471 | |
472 | spinlock_unlock(&area->lock); |
||
1044 | jermar | 473 | page_table_unlock(as, true); |
703 | jermar | 474 | interrupts_restore(ipl); |
475 | } |
||
476 | |||
477 | /** Handle page fault within the current address space. |
||
478 | * |
||
479 | * This is the high-level page fault handler. |
||
480 | * Interrupts are assumed disabled. |
||
481 | * |
||
482 | * @param page Faulting page. |
||
1288 | jermar | 483 | * @param istate Pointer to interrupted state. |
703 | jermar | 484 | * |
1288 | jermar | 485 | * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). |
703 | jermar | 486 | */ |
1288 | jermar | 487 | int as_page_fault(__address page, istate_t *istate) |
703 | jermar | 488 | { |
1044 | jermar | 489 | pte_t *pte; |
977 | jermar | 490 | as_area_t *area; |
703 | jermar | 491 | __address frame; |
492 | |||
493 | ASSERT(AS); |
||
1044 | jermar | 494 | |
703 | jermar | 495 | spinlock_lock(&AS->lock); |
977 | jermar | 496 | area = find_area_and_lock(AS, page); |
703 | jermar | 497 | if (!area) { |
498 | /* |
||
499 | * No area contained mapping for 'page'. |
||
500 | * Signal page fault to low-level handler. |
||
501 | */ |
||
502 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 503 | goto page_fault; |
703 | jermar | 504 | } |
505 | |||
1239 | jermar | 506 | if (area->attributes & AS_AREA_ATTR_PARTIAL) { |
507 | /* |
||
508 | * The address space area is not fully initialized. |
||
509 | * Avoid possible race by returning error. |
||
510 | */ |
||
511 | spinlock_unlock(&area->lock); |
||
512 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 513 | goto page_fault; |
1239 | jermar | 514 | } |
515 | |||
1179 | jermar | 516 | ASSERT(!(area->flags & AS_AREA_DEVICE)); |
517 | |||
1044 | jermar | 518 | page_table_lock(AS, false); |
519 | |||
703 | jermar | 520 | /* |
1044 | jermar | 521 | * To avoid race condition between two page faults |
522 | * on the same address, we need to make sure |
||
523 | * the mapping has not been already inserted. |
||
524 | */ |
||
525 | if ((pte = page_mapping_find(AS, page))) { |
||
526 | if (PTE_PRESENT(pte)) { |
||
527 | page_table_unlock(AS, false); |
||
528 | spinlock_unlock(&area->lock); |
||
529 | spinlock_unlock(&AS->lock); |
||
530 | return 1; |
||
531 | } |
||
532 | } |
||
533 | |||
534 | /* |
||
754 | jermar | 535 | * In general, there can be several reasons that |
536 | * can have caused this fault. |
||
537 | * |
||
538 | * - non-existent mapping: the area is a scratch |
||
539 | * area (e.g. stack) and so far has not been |
||
540 | * allocated a frame for the faulting page |
||
541 | * |
||
542 | * - non-present mapping: another possibility, |
||
543 | * currently not implemented, would be frame |
||
544 | * reuse; when this becomes a possibility, |
||
545 | * do not forget to distinguish between |
||
546 | * the different causes |
||
703 | jermar | 547 | */ |
814 | palkovsky | 548 | frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); |
754 | jermar | 549 | memsetb(PA2KA(frame), FRAME_SIZE, 0); |
703 | jermar | 550 | |
551 | /* |
||
552 | * Map 'page' to 'frame'. |
||
553 | * Note that TLB shootdown is not attempted as only new information is being |
||
554 | * inserted into page tables. |
||
555 | */ |
||
756 | jermar | 556 | page_mapping_insert(AS, page, frame, get_area_flags(area)); |
1044 | jermar | 557 | page_table_unlock(AS, false); |
703 | jermar | 558 | |
559 | spinlock_unlock(&area->lock); |
||
560 | spinlock_unlock(&AS->lock); |
||
1288 | jermar | 561 | return AS_PF_OK; |
562 | |||
563 | page_fault: |
||
564 | if (!THREAD) |
||
565 | return AS_PF_FAULT; |
||
566 | |||
567 | if (THREAD->in_copy_from_uspace) { |
||
568 | THREAD->in_copy_from_uspace = false; |
||
569 | istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address); |
||
570 | } else if (THREAD->in_copy_to_uspace) { |
||
571 | THREAD->in_copy_to_uspace = false; |
||
572 | istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address); |
||
573 | } else { |
||
574 | return AS_PF_FAULT; |
||
575 | } |
||
576 | |||
577 | return AS_PF_DEFER; |
||
703 | jermar | 578 | } |
579 | |||
823 | jermar | 580 | /** Switch address spaces. |
703 | jermar | 581 | * |
823 | jermar | 582 | * @param old Old address space or NULL. |
583 | * @param new New address space. |
||
703 | jermar | 584 | */ |
823 | jermar | 585 | void as_switch(as_t *old, as_t *new) |
703 | jermar | 586 | { |
587 | ipl_t ipl; |
||
823 | jermar | 588 | bool needs_asid = false; |
703 | jermar | 589 | |
590 | ipl = interrupts_disable(); |
||
823 | jermar | 591 | spinlock_lock(&as_lock); |
703 | jermar | 592 | |
593 | /* |
||
823 | jermar | 594 | * First, take care of the old address space. |
595 | */ |
||
596 | if (old) { |
||
597 | spinlock_lock(&old->lock); |
||
598 | ASSERT(old->refcount); |
||
599 | if((--old->refcount == 0) && (old != AS_KERNEL)) { |
||
600 | /* |
||
601 | * The old address space is no longer active on |
||
602 | * any processor. It can be appended to the |
||
603 | * list of inactive address spaces with assigned |
||
604 | * ASID. |
||
605 | */ |
||
606 | ASSERT(old->asid != ASID_INVALID); |
||
607 | list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
||
608 | } |
||
609 | spinlock_unlock(&old->lock); |
||
610 | } |
||
611 | |||
612 | /* |
||
613 | * Second, prepare the new address space. |
||
614 | */ |
||
615 | spinlock_lock(&new->lock); |
||
616 | if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
||
617 | if (new->asid != ASID_INVALID) |
||
618 | list_remove(&new->inactive_as_with_asid_link); |
||
619 | else |
||
620 | needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
||
621 | } |
||
622 | SET_PTL0_ADDRESS(new->page_table); |
||
623 | spinlock_unlock(&new->lock); |
||
624 | |||
625 | if (needs_asid) { |
||
626 | /* |
||
627 | * Allocation of new ASID was deferred |
||
628 | * until now in order to avoid deadlock. |
||
629 | */ |
||
630 | asid_t asid; |
||
631 | |||
632 | asid = asid_get(); |
||
633 | spinlock_lock(&new->lock); |
||
634 | new->asid = asid; |
||
635 | spinlock_unlock(&new->lock); |
||
636 | } |
||
637 | spinlock_unlock(&as_lock); |
||
638 | interrupts_restore(ipl); |
||
639 | |||
640 | /* |
||
703 | jermar | 641 | * Perform architecture-specific steps. |
727 | jermar | 642 | * (e.g. write ASID to hardware register etc.) |
703 | jermar | 643 | */ |
823 | jermar | 644 | as_install_arch(new); |
703 | jermar | 645 | |
823 | jermar | 646 | AS = new; |
703 | jermar | 647 | } |
754 | jermar | 648 | |
1235 | jermar | 649 | /** Convert address space area flags to page flags. |
754 | jermar | 650 | * |
1235 | jermar | 651 | * @param aflags Flags of some address space area. |
754 | jermar | 652 | * |
1235 | jermar | 653 | * @return Flags to be passed to page_mapping_insert(). |
754 | jermar | 654 | */ |
1235 | jermar | 655 | int area_flags_to_page_flags(int aflags) |
754 | jermar | 656 | { |
657 | int flags; |
||
658 | |||
1178 | jermar | 659 | flags = PAGE_USER | PAGE_PRESENT; |
754 | jermar | 660 | |
1235 | jermar | 661 | if (aflags & AS_AREA_READ) |
1026 | jermar | 662 | flags |= PAGE_READ; |
663 | |||
1235 | jermar | 664 | if (aflags & AS_AREA_WRITE) |
1026 | jermar | 665 | flags |= PAGE_WRITE; |
666 | |||
1235 | jermar | 667 | if (aflags & AS_AREA_EXEC) |
1026 | jermar | 668 | flags |= PAGE_EXEC; |
669 | |||
1235 | jermar | 670 | if (!(aflags & AS_AREA_DEVICE)) |
1178 | jermar | 671 | flags |= PAGE_CACHEABLE; |
672 | |||
754 | jermar | 673 | return flags; |
674 | } |
||
756 | jermar | 675 | |
1235 | jermar | 676 | /** Compute flags for virtual address translation subsytem. |
677 | * |
||
678 | * The address space area must be locked. |
||
679 | * Interrupts must be disabled. |
||
680 | * |
||
681 | * @param a Address space area. |
||
682 | * |
||
683 | * @return Flags to be used in page_mapping_insert(). |
||
684 | */ |
||
685 | int get_area_flags(as_area_t *a) |
||
686 | { |
||
687 | return area_flags_to_page_flags(a->flags); |
||
688 | } |
||
689 | |||
756 | jermar | 690 | /** Create page table. |
691 | * |
||
692 | * Depending on architecture, create either address space |
||
693 | * private or global page table. |
||
694 | * |
||
695 | * @param flags Flags saying whether the page table is for kernel address space. |
||
696 | * |
||
697 | * @return First entry of the page table. |
||
698 | */ |
||
699 | pte_t *page_table_create(int flags) |
||
700 | { |
||
701 | ASSERT(as_operations); |
||
702 | ASSERT(as_operations->page_table_create); |
||
703 | |||
704 | return as_operations->page_table_create(flags); |
||
705 | } |
||
977 | jermar | 706 | |
1044 | jermar | 707 | /** Lock page table. |
708 | * |
||
709 | * This function should be called before any page_mapping_insert(), |
||
710 | * page_mapping_remove() and page_mapping_find(). |
||
711 | * |
||
712 | * Locking order is such that address space areas must be locked |
||
713 | * prior to this call. Address space can be locked prior to this |
||
714 | * call in which case the lock argument is false. |
||
715 | * |
||
716 | * @param as Address space. |
||
1248 | jermar | 717 | * @param lock If false, do not attempt to lock as->lock. |
1044 | jermar | 718 | */ |
719 | void page_table_lock(as_t *as, bool lock) |
||
720 | { |
||
721 | ASSERT(as_operations); |
||
722 | ASSERT(as_operations->page_table_lock); |
||
723 | |||
724 | as_operations->page_table_lock(as, lock); |
||
725 | } |
||
726 | |||
727 | /** Unlock page table. |
||
728 | * |
||
729 | * @param as Address space. |
||
1248 | jermar | 730 | * @param unlock If false, do not attempt to unlock as->lock. |
1044 | jermar | 731 | */ |
732 | void page_table_unlock(as_t *as, bool unlock) |
||
733 | { |
||
734 | ASSERT(as_operations); |
||
735 | ASSERT(as_operations->page_table_unlock); |
||
736 | |||
737 | as_operations->page_table_unlock(as, unlock); |
||
738 | } |
||
739 | |||
977 | jermar | 740 | |
741 | /** Find address space area and lock it. |
||
742 | * |
||
743 | * The address space must be locked and interrupts must be disabled. |
||
744 | * |
||
745 | * @param as Address space. |
||
746 | * @param va Virtual address. |
||
747 | * |
||
748 | * @return Locked address space area containing va on success or NULL on failure. |
||
749 | */ |
||
750 | as_area_t *find_area_and_lock(as_t *as, __address va) |
||
751 | { |
||
752 | as_area_t *a; |
||
1147 | jermar | 753 | btree_node_t *leaf, *lnode; |
754 | int i; |
||
977 | jermar | 755 | |
1147 | jermar | 756 | a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
757 | if (a) { |
||
758 | /* va is the base address of an address space area */ |
||
977 | jermar | 759 | spinlock_lock(&a->lock); |
1147 | jermar | 760 | return a; |
761 | } |
||
762 | |||
763 | /* |
||
1150 | jermar | 764 | * Search the leaf node and the righmost record of its left neighbour |
1147 | jermar | 765 | * to find out whether this is a miss or va belongs to an address |
766 | * space area found there. |
||
767 | */ |
||
768 | |||
769 | /* First, search the leaf node itself. */ |
||
770 | for (i = 0; i < leaf->keys; i++) { |
||
771 | a = (as_area_t *) leaf->value[i]; |
||
772 | spinlock_lock(&a->lock); |
||
773 | if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { |
||
774 | return a; |
||
775 | } |
||
776 | spinlock_unlock(&a->lock); |
||
777 | } |
||
977 | jermar | 778 | |
1147 | jermar | 779 | /* |
1150 | jermar | 780 | * Second, locate the left neighbour and test its last record. |
1148 | jermar | 781 | * Because of its position in the B+tree, it must have base < va. |
1147 | jermar | 782 | */ |
1150 | jermar | 783 | if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 784 | a = (as_area_t *) lnode->value[lnode->keys - 1]; |
785 | spinlock_lock(&a->lock); |
||
786 | if (va < a->base + a->pages * PAGE_SIZE) { |
||
1048 | jermar | 787 | return a; |
1147 | jermar | 788 | } |
977 | jermar | 789 | spinlock_unlock(&a->lock); |
790 | } |
||
791 | |||
792 | return NULL; |
||
793 | } |
||
1048 | jermar | 794 | |
795 | /** Check area conflicts with other areas. |
||
796 | * |
||
797 | * The address space must be locked and interrupts must be disabled. |
||
798 | * |
||
799 | * @param as Address space. |
||
800 | * @param va Starting virtual address of the area being tested. |
||
801 | * @param size Size of the area being tested. |
||
802 | * @param avoid_area Do not touch this area. |
||
803 | * |
||
804 | * @return True if there is no conflict, false otherwise. |
||
805 | */ |
||
806 | bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area) |
||
807 | { |
||
808 | as_area_t *a; |
||
1147 | jermar | 809 | btree_node_t *leaf, *node; |
810 | int i; |
||
1048 | jermar | 811 | |
1070 | jermar | 812 | /* |
813 | * We don't want any area to have conflicts with NULL page. |
||
814 | */ |
||
815 | if (overlaps(va, size, NULL, PAGE_SIZE)) |
||
816 | return false; |
||
817 | |||
1147 | jermar | 818 | /* |
819 | * The leaf node is found in O(log n), where n is proportional to |
||
820 | * the number of address space areas belonging to as. |
||
821 | * The check for conflicts is then attempted on the rightmost |
||
1150 | jermar | 822 | * record in the left neighbour, the leftmost record in the right |
823 | * neighbour and all records in the leaf node itself. |
||
1147 | jermar | 824 | */ |
1048 | jermar | 825 | |
1147 | jermar | 826 | if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { |
827 | if (a != avoid_area) |
||
828 | return false; |
||
829 | } |
||
830 | |||
831 | /* First, check the two border cases. */ |
||
1150 | jermar | 832 | if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 833 | a = (as_area_t *) node->value[node->keys - 1]; |
834 | spinlock_lock(&a->lock); |
||
835 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
||
836 | spinlock_unlock(&a->lock); |
||
837 | return false; |
||
838 | } |
||
839 | spinlock_unlock(&a->lock); |
||
840 | } |
||
1150 | jermar | 841 | if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { |
1147 | jermar | 842 | a = (as_area_t *) node->value[0]; |
843 | spinlock_lock(&a->lock); |
||
844 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
||
845 | spinlock_unlock(&a->lock); |
||
846 | return false; |
||
847 | } |
||
848 | spinlock_unlock(&a->lock); |
||
849 | } |
||
850 | |||
851 | /* Second, check the leaf node. */ |
||
852 | for (i = 0; i < leaf->keys; i++) { |
||
853 | a = (as_area_t *) leaf->value[i]; |
||
854 | |||
1048 | jermar | 855 | if (a == avoid_area) |
856 | continue; |
||
1147 | jermar | 857 | |
1048 | jermar | 858 | spinlock_lock(&a->lock); |
1147 | jermar | 859 | if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { |
860 | spinlock_unlock(&a->lock); |
||
861 | return false; |
||
862 | } |
||
1048 | jermar | 863 | spinlock_unlock(&a->lock); |
864 | } |
||
865 | |||
1070 | jermar | 866 | /* |
867 | * So far, the area does not conflict with other areas. |
||
868 | * Check if it doesn't conflict with kernel address space. |
||
869 | */ |
||
870 | if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
||
871 | return !overlaps(va, size, |
||
872 | KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); |
||
873 | } |
||
874 | |||
1048 | jermar | 875 | return true; |
876 | } |
||
1235 | jermar | 877 | |
878 | /* |
||
879 | * Address space related syscalls. |
||
880 | */ |
||
881 | |||
882 | /** Wrapper for as_area_create(). */ |
||
883 | __native sys_as_area_create(__address address, size_t size, int flags) |
||
884 | { |
||
1239 | jermar | 885 | if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE)) |
1235 | jermar | 886 | return (__native) address; |
887 | else |
||
888 | return (__native) -1; |
||
889 | } |
||
890 | |||
891 | /** Wrapper for as_area_resize. */ |
||
892 | __native sys_as_area_resize(__address address, size_t size, int flags) |
||
893 | { |
||
894 | return as_area_resize(AS, address, size, 0); |
||
895 | } |
||
896 | |||
897 | /** Prepare task for accepting address space area from another task. |
||
898 | * |
||
899 | * @param uspace_accept_arg Accept structure passed from userspace. |
||
900 | * |
||
901 | * @return EPERM if the task ID encapsulated in @uspace_accept_arg references |
||
902 | * TASK. Otherwise zero is returned. |
||
903 | */ |
||
904 | __native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg) |
||
905 | { |
||
906 | as_area_acptsnd_arg_t arg; |
||
1288 | jermar | 907 | int rc; |
1235 | jermar | 908 | |
1288 | jermar | 909 | rc = copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t)); |
910 | if (rc != 0) |
||
911 | return rc; |
||
1235 | jermar | 912 | |
913 | if (!arg.size) |
||
914 | return (__native) EPERM; |
||
915 | |||
916 | if (arg.task_id == TASK->taskid) { |
||
917 | /* |
||
918 | * Accepting from itself is not allowed. |
||
919 | */ |
||
920 | return (__native) EPERM; |
||
921 | } |
||
922 | |||
923 | memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t)); |
||
924 | |||
925 | return 0; |
||
926 | } |
||
927 | |||
928 | /** Wrapper for as_area_send. */ |
||
929 | __native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg) |
||
930 | { |
||
931 | as_area_acptsnd_arg_t arg; |
||
1288 | jermar | 932 | int rc; |
1235 | jermar | 933 | |
1288 | jermar | 934 | rc = copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t)); |
935 | if (rc != 0) |
||
936 | return rc; |
||
1235 | jermar | 937 | |
938 | if (!arg.size) |
||
939 | return (__native) EPERM; |
||
940 | |||
941 | if (arg.task_id == TASK->taskid) { |
||
942 | /* |
||
943 | * Sending to itself is not allowed. |
||
944 | */ |
||
945 | return (__native) EPERM; |
||
946 | } |
||
947 | |||
1238 | jermar | 948 | return (__native) as_area_send(arg.task_id, (__address) arg.base); |
1235 | jermar | 949 | } |