Subversion Repositories HelenOS

Rev

Rev 1380 | Rev 1387 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file    as.c
31
 * @brief   Address space related functions.
32
 *
703 jermar 33
 * This file contains address space manipulation functions.
34
 * Roughly speaking, this is a higher-level client of
35
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 36
 *
37
 * Functionality provided by this file allows one to
38
 * create address space and create, resize and share
39
 * address space areas.
40
 *
41
 * @see page.c
42
 *
703 jermar 43
 */
44
 
45
#include <mm/as.h>
756 jermar 46
#include <arch/mm/as.h>
703 jermar 47
#include <mm/page.h>
48
#include <mm/frame.h>
814 palkovsky 49
#include <mm/slab.h>
703 jermar 50
#include <mm/tlb.h>
51
#include <arch/mm/page.h>
52
#include <genarch/mm/page_pt.h>
1108 jermar 53
#include <genarch/mm/page_ht.h>
727 jermar 54
#include <mm/asid.h>
703 jermar 55
#include <arch/mm/asid.h>
56
#include <synch/spinlock.h>
1380 jermar 57
#include <synch/mutex.h>
788 jermar 58
#include <adt/list.h>
1147 jermar 59
#include <adt/btree.h>
1235 jermar 60
#include <proc/task.h>
1288 jermar 61
#include <proc/thread.h>
1235 jermar 62
#include <arch/asm.h>
703 jermar 63
#include <panic.h>
64
#include <debug.h>
1235 jermar 65
#include <print.h>
703 jermar 66
#include <memstr.h>
1070 jermar 67
#include <macros.h>
703 jermar 68
#include <arch.h>
1235 jermar 69
#include <errno.h>
70
#include <config.h>
71
#include <arch/types.h>
72
#include <typedefs.h>
1288 jermar 73
#include <syscall/copy.h>
74
#include <arch/interrupt.h>
703 jermar 75
 
756 jermar 76
as_operations_t *as_operations = NULL;
703 jermar 77
 
1380 jermar 78
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
823 jermar 79
SPINLOCK_INITIALIZE(as_lock);
80
 
81
/**
82
 * This list contains address spaces that are not active on any
83
 * processor and that have valid ASID.
84
 */
85
LIST_INITIALIZE(inactive_as_with_asid_head);
86
 
757 jermar 87
/** Kernel address space. */
88
as_t *AS_KERNEL = NULL;
89
 
1235 jermar 90
static int area_flags_to_page_flags(int aflags);
754 jermar 91
static int get_area_flags(as_area_t *a);
977 jermar 92
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 93
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
703 jermar 94
 
756 jermar 95
/** Initialize address space subsystem. */
96
void as_init(void)
97
{
98
    as_arch_init();
789 palkovsky 99
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 100
    if (!AS_KERNEL)
101
        panic("can't create kernel address space\n");
102
 
756 jermar 103
}
104
 
757 jermar 105
/** Create address space.
106
 *
107
 * @param flags Flags that influence way in wich the address space is created.
108
 */
756 jermar 109
as_t *as_create(int flags)
703 jermar 110
{
111
    as_t *as;
112
 
822 palkovsky 113
    as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 114
    link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 115
    mutex_initialize(&as->lock);
1147 jermar 116
    btree_create(&as->as_area_btree);
822 palkovsky 117
 
118
    if (flags & FLAG_AS_KERNEL)
119
        as->asid = ASID_KERNEL;
120
    else
121
        as->asid = ASID_INVALID;
122
 
823 jermar 123
    as->refcount = 0;
822 palkovsky 124
    as->page_table = page_table_create(flags);
703 jermar 125
 
126
    return as;
127
}
128
 
973 palkovsky 129
/** Free Adress space */
130
void as_free(as_t *as)
131
{
132
    ASSERT(as->refcount == 0);
133
 
134
    /* TODO: free as_areas and other resources held by as */
135
    /* TODO: free page table */
136
    free(as);
137
}
138
 
703 jermar 139
/** Create address space area of common attributes.
140
 *
141
 * The created address space area is added to the target address space.
142
 *
143
 * @param as Target address space.
1239 jermar 144
 * @param flags Flags of the area memory.
1048 jermar 145
 * @param size Size of area.
703 jermar 146
 * @param base Base address of area.
1239 jermar 147
 * @param attrs Attributes of the area.
703 jermar 148
 *
149
 * @return Address space area on success or NULL on failure.
150
 */
1239 jermar 151
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
703 jermar 152
{
153
    ipl_t ipl;
154
    as_area_t *a;
155
 
156
    if (base % PAGE_SIZE)
1048 jermar 157
        return NULL;
158
 
1233 jermar 159
    if (!size)
160
        return NULL;
161
 
1048 jermar 162
    /* Writeable executable areas are not supported. */
163
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
164
        return NULL;
703 jermar 165
 
166
    ipl = interrupts_disable();
1380 jermar 167
    mutex_lock(&as->lock);
703 jermar 168
 
1048 jermar 169
    if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 170
        mutex_unlock(&as->lock);
1048 jermar 171
        interrupts_restore(ipl);
172
        return NULL;
173
    }
703 jermar 174
 
822 palkovsky 175
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 176
 
1380 jermar 177
    mutex_initialize(&a->lock);
822 palkovsky 178
 
1026 jermar 179
    a->flags = flags;
1239 jermar 180
    a->attributes = attrs;
1048 jermar 181
    a->pages = SIZE2FRAMES(size);
822 palkovsky 182
    a->base = base;
183
 
1147 jermar 184
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 185
 
1380 jermar 186
    mutex_unlock(&as->lock);
703 jermar 187
    interrupts_restore(ipl);
704 jermar 188
 
703 jermar 189
    return a;
190
}
191
 
1235 jermar 192
/** Find address space area and change it.
193
 *
194
 * @param as Address space.
195
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
196
 * @param size New size of the virtual memory block starting at address.
197
 * @param flags Flags influencing the remap operation. Currently unused.
198
 *
1306 jermar 199
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 200
 */
1306 jermar 201
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 202
{
1306 jermar 203
    as_area_t *area;
1235 jermar 204
    ipl_t ipl;
205
    size_t pages;
206
 
207
    ipl = interrupts_disable();
1380 jermar 208
    mutex_lock(&as->lock);
1235 jermar 209
 
210
    /*
211
     * Locate the area.
212
     */
213
    area = find_area_and_lock(as, address);
214
    if (!area) {
1380 jermar 215
        mutex_unlock(&as->lock);
1235 jermar 216
        interrupts_restore(ipl);
1306 jermar 217
        return ENOENT;
1235 jermar 218
    }
219
 
220
    if (area->flags & AS_AREA_DEVICE) {
221
        /*
222
         * Remapping of address space areas associated
223
         * with memory mapped devices is not supported.
224
         */
1380 jermar 225
        mutex_unlock(&area->lock);
226
        mutex_unlock(&as->lock);
1235 jermar 227
        interrupts_restore(ipl);
1306 jermar 228
        return ENOTSUP;
1235 jermar 229
    }
230
 
231
    pages = SIZE2FRAMES((address - area->base) + size);
232
    if (!pages) {
233
        /*
234
         * Zero size address space areas are not allowed.
235
         */
1380 jermar 236
        mutex_unlock(&area->lock);
237
        mutex_unlock(&as->lock);
1235 jermar 238
        interrupts_restore(ipl);
1306 jermar 239
        return EPERM;
1235 jermar 240
    }
241
 
242
    if (pages < area->pages) {
243
        int i;
244
 
245
        /*
246
         * Shrinking the area.
247
         * No need to check for overlaps.
248
         */
249
        for (i = pages; i < area->pages; i++) {
250
            pte_t *pte;
251
 
252
            /*
253
             * Releasing physical memory.
254
             * This depends on the fact that the memory was allocated using frame_alloc().
255
             */
256
            page_table_lock(as, false);
257
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
258
            if (pte && PTE_VALID(pte)) {
259
                __address frame;
260
 
261
                ASSERT(PTE_PRESENT(pte));
262
                frame = PTE_GET_FRAME(pte);
263
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
264
                page_table_unlock(as, false);
265
 
266
                frame_free(ADDR2PFN(frame));
267
            } else {
268
                page_table_unlock(as, false);
269
            }
270
        }
271
        /*
272
         * Invalidate TLB's.
273
         */
274
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
275
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
276
        tlb_shootdown_finalize();
277
    } else {
278
        /*
279
         * Growing the area.
280
         * Check for overlaps with other address space areas.
281
         */
282
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 283
            mutex_unlock(&area->lock);
284
            mutex_unlock(&as->lock);       
1235 jermar 285
            interrupts_restore(ipl);
1306 jermar 286
            return EADDRNOTAVAIL;
1235 jermar 287
        }
288
    }
289
 
290
    area->pages = pages;
291
 
1380 jermar 292
    mutex_unlock(&area->lock);
293
    mutex_unlock(&as->lock);
1235 jermar 294
    interrupts_restore(ipl);
295
 
1306 jermar 296
    return 0;
1235 jermar 297
}
298
 
1306 jermar 299
/** Destroy address space area.
300
 *
301
 * @param as Address space.
302
 * @param address Address withing the area to be deleted.
303
 *
304
 * @return Zero on success or a value from @ref errno.h on failure.
305
 */
306
int as_area_destroy(as_t *as, __address address)
307
{
308
    as_area_t *area;
309
    __address base;
310
    ipl_t ipl;
311
    int i;
312
 
313
    ipl = interrupts_disable();
1380 jermar 314
    mutex_lock(&as->lock);
1306 jermar 315
 
316
    area = find_area_and_lock(as, address);
317
    if (!area) {
1380 jermar 318
        mutex_unlock(&as->lock);
1306 jermar 319
        interrupts_restore(ipl);
320
        return ENOENT;
321
    }
322
 
323
    base = area->base; 
324
    for (i = 0; i < area->pages; i++) {
325
        pte_t *pte;
326
 
327
        /*
328
         * Releasing physical memory.
329
         * Areas mapping memory-mapped devices are treated differently than
330
         * areas backing frame_alloc()'ed memory.
331
         */
332
        page_table_lock(as, false);
333
        pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
334
        if (pte && PTE_VALID(pte)) {
335
            ASSERT(PTE_PRESENT(pte));
336
            page_mapping_remove(as, area->base + i*PAGE_SIZE);
337
            if (area->flags & AS_AREA_DEVICE) {
338
                __address frame;
339
                frame = PTE_GET_FRAME(pte);
340
                frame_free(ADDR2PFN(frame));
341
            }
342
            page_table_unlock(as, false);
343
        } else {
344
            page_table_unlock(as, false);
345
        }
346
    }
347
    /*
348
     * Invalidate TLB's.
349
     */
350
    tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
351
    tlb_invalidate_pages(AS->asid, area->base, area->pages);
352
    tlb_shootdown_finalize();
353
 
1309 jermar 354
    area->attributes |= AS_AREA_ATTR_PARTIAL;
1380 jermar 355
    mutex_unlock(&area->lock);
1306 jermar 356
 
357
    /*
358
     * Remove the empty area from address space.
359
     */
360
    btree_remove(&AS->as_area_btree, base, NULL);
361
 
1309 jermar 362
    free(area);
363
 
1380 jermar 364
    mutex_unlock(&AS->lock);
1306 jermar 365
    interrupts_restore(ipl);
366
    return 0;
367
}
368
 
1329 palkovsky 369
/** Steal address space area from another task.
1235 jermar 370
 *
1329 palkovsky 371
 * Address space area is stolen from another task
372
 * Moreover, any existing mapping
1235 jermar 373
 * is copied as well, providing thus a mechanism
374
 * for sharing group of pages. The source address
375
 * space area and any associated mapping is preserved.
376
 *
1329 palkovsky 377
 * @param src_task Pointer of source task
1239 jermar 378
 * @param src_base Base address of the source address space area.
1329 palkovsky 379
 * @param acc_size Expected size of the source area
380
 * @param dst_base Target base address
1235 jermar 381
 *
1306 jermar 382
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 383
 *     if there is no such address space area,
384
 *     EPERM if there was a problem in accepting the area or
385
 *     ENOMEM if there was a problem in allocating destination
386
 *     address space area.
387
 */
1329 palkovsky 388
int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
389
          __address dst_base)
1235 jermar 390
{
391
    ipl_t ipl;
392
    count_t i;
1329 palkovsky 393
    as_t *src_as;      
1239 jermar 394
    int src_flags;
395
    size_t src_size;
396
    as_area_t *src_area, *dst_area;
1329 palkovsky 397
 
1235 jermar 398
    ipl = interrupts_disable();
1329 palkovsky 399
    spinlock_lock(&src_task->lock);
400
    src_as = src_task->as;
1235 jermar 401
 
1380 jermar 402
    mutex_lock(&src_as->lock);
1329 palkovsky 403
    src_area = find_area_and_lock(src_as, src_base);
1239 jermar 404
    if (!src_area) {
1238 jermar 405
        /*
406
         * Could not find the source address space area.
407
         */
1329 palkovsky 408
        spinlock_unlock(&src_task->lock);
1380 jermar 409
        mutex_unlock(&src_as->lock);
1238 jermar 410
        interrupts_restore(ipl);
411
        return ENOENT;
412
    }
1239 jermar 413
    src_size = src_area->pages * PAGE_SIZE;
414
    src_flags = src_area->flags;
1380 jermar 415
    mutex_unlock(&src_area->lock);
416
    mutex_unlock(&src_as->lock);
1235 jermar 417
 
1329 palkovsky 418
 
419
    if (src_size != acc_size) {
420
        spinlock_unlock(&src_task->lock);
1235 jermar 421
        interrupts_restore(ipl);
422
        return EPERM;
423
    }
424
    /*
1239 jermar 425
     * Create copy of the source address space area.
426
     * The destination area is created with AS_AREA_ATTR_PARTIAL
427
     * attribute set which prevents race condition with
428
     * preliminary as_page_fault() calls.
1235 jermar 429
     */
1329 palkovsky 430
    dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
1239 jermar 431
    if (!dst_area) {
1235 jermar 432
        /*
433
         * Destination address space area could not be created.
434
         */
1329 palkovsky 435
        spinlock_unlock(&src_task->lock);
1235 jermar 436
        interrupts_restore(ipl);
437
        return ENOMEM;
438
    }
439
 
1329 palkovsky 440
    spinlock_unlock(&src_task->lock);
1235 jermar 441
 
442
    /*
443
     * Avoid deadlock by first locking the address space with lower address.
444
     */
1329 palkovsky 445
    if (AS < src_as) {
1380 jermar 446
        mutex_lock(&AS->lock);
447
        mutex_lock(&src_as->lock);
1235 jermar 448
    } else {
1380 jermar 449
        mutex_lock(&AS->lock);
450
        mutex_lock(&src_as->lock);
1235 jermar 451
    }
452
 
1239 jermar 453
    for (i = 0; i < SIZE2FRAMES(src_size); i++) {
1235 jermar 454
        pte_t *pte;
455
        __address frame;
456
 
1329 palkovsky 457
        page_table_lock(src_as, false);
458
        pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
1235 jermar 459
        if (pte && PTE_VALID(pte)) {
460
            ASSERT(PTE_PRESENT(pte));
461
            frame = PTE_GET_FRAME(pte);
1239 jermar 462
            if (!(src_flags & AS_AREA_DEVICE))
1236 jermar 463
                frame_reference_add(ADDR2PFN(frame));
1329 palkovsky 464
            page_table_unlock(src_as, false);
1235 jermar 465
        } else {
1329 palkovsky 466
            page_table_unlock(src_as, false);
1235 jermar 467
            continue;
468
        }
469
 
1329 palkovsky 470
        page_table_lock(AS, false);
471
        page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
472
        page_table_unlock(AS, false);
1235 jermar 473
    }
1239 jermar 474
 
475
    /*
476
     * Now the destination address space area has been
477
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
478
     * attribute.
479
     */
1380 jermar 480
    mutex_lock(&dst_area->lock);
1239 jermar 481
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1380 jermar 482
    mutex_unlock(&dst_area->lock);
1235 jermar 483
 
1380 jermar 484
    mutex_unlock(&AS->lock);
485
    mutex_unlock(&src_as->lock);
1235 jermar 486
    interrupts_restore(ipl);
487
 
488
    return 0;
489
}
490
 
754 jermar 491
/** Initialize mapping for one page of address space.
703 jermar 492
 *
754 jermar 493
 * This functions maps 'page' to 'frame' according
494
 * to attributes of the address space area to
495
 * wich 'page' belongs.
703 jermar 496
 *
840 jermar 497
 * @param as Target address space.
754 jermar 498
 * @param page Virtual page within the area.
499
 * @param frame Physical frame to which page will be mapped.
703 jermar 500
 */
754 jermar 501
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 502
{
977 jermar 503
    as_area_t *area;
703 jermar 504
    ipl_t ipl;
505
 
506
    ipl = interrupts_disable();
1044 jermar 507
    page_table_lock(as, true);
703 jermar 508
 
977 jermar 509
    area = find_area_and_lock(as, page);
754 jermar 510
    if (!area) {
511
        panic("page not part of any as_area\n");
512
    }
513
 
756 jermar 514
    page_mapping_insert(as, page, frame, get_area_flags(area));
754 jermar 515
 
1380 jermar 516
    mutex_unlock(&area->lock);
1044 jermar 517
    page_table_unlock(as, true);
703 jermar 518
    interrupts_restore(ipl);
519
}
520
 
521
/** Handle page fault within the current address space.
522
 *
523
 * This is the high-level page fault handler.
524
 * Interrupts are assumed disabled.
525
 *
526
 * @param page Faulting page.
1288 jermar 527
 * @param istate Pointer to interrupted state.
703 jermar 528
 *
1288 jermar 529
 * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 530
 */
1288 jermar 531
int as_page_fault(__address page, istate_t *istate)
703 jermar 532
{
1044 jermar 533
    pte_t *pte;
977 jermar 534
    as_area_t *area;
703 jermar 535
    __address frame;
536
 
1380 jermar 537
    if (!THREAD)
538
        return 0;
539
 
703 jermar 540
    ASSERT(AS);
1044 jermar 541
 
1380 jermar 542
    mutex_lock(&AS->lock);
977 jermar 543
    area = find_area_and_lock(AS, page);   
703 jermar 544
    if (!area) {
545
        /*
546
         * No area contained mapping for 'page'.
547
         * Signal page fault to low-level handler.
548
         */
1380 jermar 549
        mutex_unlock(&AS->lock);
1288 jermar 550
        goto page_fault;
703 jermar 551
    }
552
 
1239 jermar 553
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
554
        /*
555
         * The address space area is not fully initialized.
556
         * Avoid possible race by returning error.
557
         */
1380 jermar 558
        mutex_unlock(&area->lock);
559
        mutex_unlock(&AS->lock);
1288 jermar 560
        goto page_fault;       
1239 jermar 561
    }
562
 
1179 jermar 563
    ASSERT(!(area->flags & AS_AREA_DEVICE));
564
 
1044 jermar 565
    page_table_lock(AS, false);
566
 
703 jermar 567
    /*
1044 jermar 568
     * To avoid race condition between two page faults
569
     * on the same address, we need to make sure
570
     * the mapping has not been already inserted.
571
     */
572
    if ((pte = page_mapping_find(AS, page))) {
573
        if (PTE_PRESENT(pte)) {
574
            page_table_unlock(AS, false);
1380 jermar 575
            mutex_unlock(&area->lock);
576
            mutex_unlock(&AS->lock);
1044 jermar 577
            return 1;
578
        }
579
    }
580
 
581
    /*
754 jermar 582
     * In general, there can be several reasons that
583
     * can have caused this fault.
584
     *
585
     * - non-existent mapping: the area is a scratch
586
     *   area (e.g. stack) and so far has not been
587
     *   allocated a frame for the faulting page
588
     *
589
     * - non-present mapping: another possibility,
590
     *   currently not implemented, would be frame
591
     *   reuse; when this becomes a possibility,
592
     *   do not forget to distinguish between
593
     *   the different causes
703 jermar 594
     */
814 palkovsky 595
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
754 jermar 596
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
703 jermar 597
 
598
    /*
599
     * Map 'page' to 'frame'.
600
     * Note that TLB shootdown is not attempted as only new information is being
601
     * inserted into page tables.
602
     */
756 jermar 603
    page_mapping_insert(AS, page, frame, get_area_flags(area));
1044 jermar 604
    page_table_unlock(AS, false);
703 jermar 605
 
1380 jermar 606
    mutex_unlock(&area->lock);
607
    mutex_unlock(&AS->lock);
1288 jermar 608
    return AS_PF_OK;
609
 
610
page_fault:
611
    if (!THREAD)
612
        return AS_PF_FAULT;
613
 
614
    if (THREAD->in_copy_from_uspace) {
615
        THREAD->in_copy_from_uspace = false;
616
        istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
617
    } else if (THREAD->in_copy_to_uspace) {
618
        THREAD->in_copy_to_uspace = false;
619
        istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
620
    } else {
621
        return AS_PF_FAULT;
622
    }
623
 
624
    return AS_PF_DEFER;
703 jermar 625
}
626
 
823 jermar 627
/** Switch address spaces.
703 jermar 628
 *
1380 jermar 629
 * Note that this function cannot sleep as it is essentially a part of
630
 * the scheduling. Sleeping here would lead to deadlock on wakeup.
631
 *
823 jermar 632
 * @param old Old address space or NULL.
633
 * @param new New address space.
703 jermar 634
 */
823 jermar 635
void as_switch(as_t *old, as_t *new)
703 jermar 636
{
637
    ipl_t ipl;
823 jermar 638
    bool needs_asid = false;
703 jermar 639
 
640
    ipl = interrupts_disable();
823 jermar 641
    spinlock_lock(&as_lock);
703 jermar 642
 
643
    /*
823 jermar 644
     * First, take care of the old address space.
645
     */
646
    if (old) {
1380 jermar 647
        mutex_lock_active(&old->lock);
823 jermar 648
        ASSERT(old->refcount);
649
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
650
            /*
651
             * The old address space is no longer active on
652
             * any processor. It can be appended to the
653
             * list of inactive address spaces with assigned
654
             * ASID.
655
             */
656
             ASSERT(old->asid != ASID_INVALID);
657
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
658
        }
1380 jermar 659
        mutex_unlock(&old->lock);
823 jermar 660
    }
661
 
662
    /*
663
     * Second, prepare the new address space.
664
     */
1380 jermar 665
    mutex_lock_active(&new->lock);
823 jermar 666
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
667
        if (new->asid != ASID_INVALID)
668
            list_remove(&new->inactive_as_with_asid_link);
669
        else
670
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
671
    }
672
    SET_PTL0_ADDRESS(new->page_table);
1380 jermar 673
    mutex_unlock(&new->lock);
823 jermar 674
 
675
    if (needs_asid) {
676
        /*
677
         * Allocation of new ASID was deferred
678
         * until now in order to avoid deadlock.
679
         */
680
        asid_t asid;
681
 
682
        asid = asid_get();
1380 jermar 683
        mutex_lock_active(&new->lock);
823 jermar 684
        new->asid = asid;
1380 jermar 685
        mutex_unlock(&new->lock);
823 jermar 686
    }
687
    spinlock_unlock(&as_lock);
688
    interrupts_restore(ipl);
689
 
690
    /*
703 jermar 691
     * Perform architecture-specific steps.
727 jermar 692
     * (e.g. write ASID to hardware register etc.)
703 jermar 693
     */
823 jermar 694
    as_install_arch(new);
703 jermar 695
 
823 jermar 696
    AS = new;
703 jermar 697
}
754 jermar 698
 
1235 jermar 699
/** Convert address space area flags to page flags.
754 jermar 700
 *
1235 jermar 701
 * @param aflags Flags of some address space area.
754 jermar 702
 *
1235 jermar 703
 * @return Flags to be passed to page_mapping_insert().
754 jermar 704
 */
1235 jermar 705
int area_flags_to_page_flags(int aflags)
754 jermar 706
{
707
    int flags;
708
 
1178 jermar 709
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 710
 
1235 jermar 711
    if (aflags & AS_AREA_READ)
1026 jermar 712
        flags |= PAGE_READ;
713
 
1235 jermar 714
    if (aflags & AS_AREA_WRITE)
1026 jermar 715
        flags |= PAGE_WRITE;
716
 
1235 jermar 717
    if (aflags & AS_AREA_EXEC)
1026 jermar 718
        flags |= PAGE_EXEC;
719
 
1235 jermar 720
    if (!(aflags & AS_AREA_DEVICE))
1178 jermar 721
        flags |= PAGE_CACHEABLE;
722
 
754 jermar 723
    return flags;
724
}
756 jermar 725
 
1235 jermar 726
/** Compute flags for virtual address translation subsytem.
727
 *
728
 * The address space area must be locked.
729
 * Interrupts must be disabled.
730
 *
731
 * @param a Address space area.
732
 *
733
 * @return Flags to be used in page_mapping_insert().
734
 */
735
int get_area_flags(as_area_t *a)
736
{
737
    return area_flags_to_page_flags(a->flags);
738
}
739
 
756 jermar 740
/** Create page table.
741
 *
742
 * Depending on architecture, create either address space
743
 * private or global page table.
744
 *
745
 * @param flags Flags saying whether the page table is for kernel address space.
746
 *
747
 * @return First entry of the page table.
748
 */
749
pte_t *page_table_create(int flags)
750
{
751
        ASSERT(as_operations);
752
        ASSERT(as_operations->page_table_create);
753
 
754
        return as_operations->page_table_create(flags);
755
}
977 jermar 756
 
1044 jermar 757
/** Lock page table.
758
 *
759
 * This function should be called before any page_mapping_insert(),
760
 * page_mapping_remove() and page_mapping_find().
761
 *
762
 * Locking order is such that address space areas must be locked
763
 * prior to this call. Address space can be locked prior to this
764
 * call in which case the lock argument is false.
765
 *
766
 * @param as Address space.
1248 jermar 767
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 768
 */
769
void page_table_lock(as_t *as, bool lock)
770
{
771
    ASSERT(as_operations);
772
    ASSERT(as_operations->page_table_lock);
773
 
774
    as_operations->page_table_lock(as, lock);
775
}
776
 
777
/** Unlock page table.
778
 *
779
 * @param as Address space.
1248 jermar 780
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 781
 */
782
void page_table_unlock(as_t *as, bool unlock)
783
{
784
    ASSERT(as_operations);
785
    ASSERT(as_operations->page_table_unlock);
786
 
787
    as_operations->page_table_unlock(as, unlock);
788
}
789
 
977 jermar 790
 
791
/** Find address space area and lock it.
792
 *
793
 * The address space must be locked and interrupts must be disabled.
794
 *
795
 * @param as Address space.
796
 * @param va Virtual address.
797
 *
798
 * @return Locked address space area containing va on success or NULL on failure.
799
 */
800
as_area_t *find_area_and_lock(as_t *as, __address va)
801
{
802
    as_area_t *a;
1147 jermar 803
    btree_node_t *leaf, *lnode;
804
    int i;
977 jermar 805
 
1147 jermar 806
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
807
    if (a) {
808
        /* va is the base address of an address space area */
1380 jermar 809
        mutex_lock(&a->lock);
1147 jermar 810
        return a;
811
    }
812
 
813
    /*
1150 jermar 814
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 815
     * to find out whether this is a miss or va belongs to an address
816
     * space area found there.
817
     */
818
 
819
    /* First, search the leaf node itself. */
820
    for (i = 0; i < leaf->keys; i++) {
821
        a = (as_area_t *) leaf->value[i];
1380 jermar 822
        mutex_lock(&a->lock);
1147 jermar 823
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
824
            return a;
825
        }
1380 jermar 826
        mutex_unlock(&a->lock);
1147 jermar 827
    }
977 jermar 828
 
1147 jermar 829
    /*
1150 jermar 830
     * Second, locate the left neighbour and test its last record.
1148 jermar 831
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 832
     */
1150 jermar 833
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 834
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 835
        mutex_lock(&a->lock);
1147 jermar 836
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 837
            return a;
1147 jermar 838
        }
1380 jermar 839
        mutex_unlock(&a->lock);
977 jermar 840
    }
841
 
842
    return NULL;
843
}
1048 jermar 844
 
845
/** Check area conflicts with other areas.
846
 *
847
 * The address space must be locked and interrupts must be disabled.
848
 *
849
 * @param as Address space.
850
 * @param va Starting virtual address of the area being tested.
851
 * @param size Size of the area being tested.
852
 * @param avoid_area Do not touch this area.
853
 *
854
 * @return True if there is no conflict, false otherwise.
855
 */
856
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
857
{
858
    as_area_t *a;
1147 jermar 859
    btree_node_t *leaf, *node;
860
    int i;
1048 jermar 861
 
1070 jermar 862
    /*
863
     * We don't want any area to have conflicts with NULL page.
864
     */
865
    if (overlaps(va, size, NULL, PAGE_SIZE))
866
        return false;
867
 
1147 jermar 868
    /*
869
     * The leaf node is found in O(log n), where n is proportional to
870
     * the number of address space areas belonging to as.
871
     * The check for conflicts is then attempted on the rightmost
1150 jermar 872
     * record in the left neighbour, the leftmost record in the right
873
     * neighbour and all records in the leaf node itself.
1147 jermar 874
     */
1048 jermar 875
 
1147 jermar 876
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
877
        if (a != avoid_area)
878
            return false;
879
    }
880
 
881
    /* First, check the two border cases. */
1150 jermar 882
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 883
        a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 884
        mutex_lock(&a->lock);
1147 jermar 885
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 886
            mutex_unlock(&a->lock);
1147 jermar 887
            return false;
888
        }
1380 jermar 889
        mutex_unlock(&a->lock);
1147 jermar 890
    }
1150 jermar 891
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 892
        a = (as_area_t *) node->value[0];
1380 jermar 893
        mutex_lock(&a->lock);
1147 jermar 894
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 895
            mutex_unlock(&a->lock);
1147 jermar 896
            return false;
897
        }
1380 jermar 898
        mutex_unlock(&a->lock);
1147 jermar 899
    }
900
 
901
    /* Second, check the leaf node. */
902
    for (i = 0; i < leaf->keys; i++) {
903
        a = (as_area_t *) leaf->value[i];
904
 
1048 jermar 905
        if (a == avoid_area)
906
            continue;
1147 jermar 907
 
1380 jermar 908
        mutex_lock(&a->lock);
1147 jermar 909
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 910
            mutex_unlock(&a->lock);
1147 jermar 911
            return false;
912
        }
1380 jermar 913
        mutex_unlock(&a->lock);
1048 jermar 914
    }
915
 
1070 jermar 916
    /*
917
     * So far, the area does not conflict with other areas.
918
     * Check if it doesn't conflict with kernel address space.
919
     */  
920
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
921
        return !overlaps(va, size,
922
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
923
    }
924
 
1048 jermar 925
    return true;
926
}
1235 jermar 927
 
1380 jermar 928
/** Return size of the address space area with given base.  */
1329 palkovsky 929
size_t as_get_size(__address base)
930
{
931
    ipl_t ipl;
932
    as_area_t *src_area;
933
    size_t size;
934
 
935
    ipl = interrupts_disable();
936
    src_area = find_area_and_lock(AS, base);
937
    if (src_area){
938
        size = src_area->pages * PAGE_SIZE;
1380 jermar 939
        mutex_unlock(&src_area->lock);
1329 palkovsky 940
    } else {
941
        size = 0;
942
    }
943
    interrupts_restore(ipl);
944
    return size;
945
}
946
 
1235 jermar 947
/*
948
 * Address space related syscalls.
949
 */
950
 
951
/** Wrapper for as_area_create(). */
952
__native sys_as_area_create(__address address, size_t size, int flags)
953
{
1239 jermar 954
    if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
1235 jermar 955
        return (__native) address;
956
    else
957
        return (__native) -1;
958
}
959
 
960
/** Wrapper for as_area_resize. */
961
__native sys_as_area_resize(__address address, size_t size, int flags)
962
{
1306 jermar 963
    return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 964
}
965
 
1306 jermar 966
/** Wrapper for as_area_destroy. */
967
__native sys_as_area_destroy(__address address)
968
{
969
    return (__native) as_area_destroy(AS, address);
970
}