Subversion Repositories HelenOS

Rev

Rev 2087 | Rev 2094 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief   Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
1288 jermar 77
#include <syscall/copy.h>
78
#include <arch/interrupt.h>
703 jermar 79
 
2009 jermar 80
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#include <arch/mm/cache.h>
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
 
1757 jermar 84
/**
85
 * Each architecture decides what functions will be used to carry out
86
 * address space operations such as creating or locking page tables.
87
 */
756 jermar 88
as_operations_t *as_operations = NULL;
703 jermar 89
 
1890 jermar 90
/**
91
 * Slab for as_t objects.
92
 */
93
static slab_cache_t *as_slab;
94
 
2087 jermar 95
/**
96
 * This lock protects inactive_as_with_asid_head list. It must be acquired
97
 * before as_t mutex.
98
 */
1415 jermar 99
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 100
 
101
/**
102
 * This list contains address spaces that are not active on any
103
 * processor and that have valid ASID.
104
 */
105
LIST_INITIALIZE(inactive_as_with_asid_head);
106
 
757 jermar 107
/** Kernel address space. */
108
as_t *AS_KERNEL = NULL;
109
 
1235 jermar 110
static int area_flags_to_page_flags(int aflags);
1780 jermar 111
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 112
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
113
    as_area_t *avoid_area);
1409 jermar 114
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 115
 
1891 jermar 116
static int as_constructor(void *obj, int flags)
117
{
118
    as_t *as = (as_t *) obj;
119
    int rc;
120
 
121
    link_initialize(&as->inactive_as_with_asid_link);
122
    mutex_initialize(&as->lock);   
123
 
124
    rc = as_constructor_arch(as, flags);
125
 
126
    return rc;
127
}
128
 
129
static int as_destructor(void *obj)
130
{
131
    as_t *as = (as_t *) obj;
132
 
133
    return as_destructor_arch(as);
134
}
135
 
756 jermar 136
/** Initialize address space subsystem. */
137
void as_init(void)
138
{
139
    as_arch_init();
1890 jermar 140
 
1891 jermar 141
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 142
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
1890 jermar 143
 
789 palkovsky 144
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 145
    if (!AS_KERNEL)
146
        panic("can't create kernel address space\n");
147
 
756 jermar 148
}
149
 
757 jermar 150
/** Create address space.
151
 *
152
 * @param flags Flags that influence way in wich the address space is created.
153
 */
756 jermar 154
as_t *as_create(int flags)
703 jermar 155
{
156
    as_t *as;
157
 
1890 jermar 158
    as = (as_t *) slab_alloc(as_slab, 0);
1891 jermar 159
    (void) as_create_arch(as, 0);
160
 
1147 jermar 161
    btree_create(&as->as_area_btree);
822 palkovsky 162
 
163
    if (flags & FLAG_AS_KERNEL)
164
        as->asid = ASID_KERNEL;
165
    else
166
        as->asid = ASID_INVALID;
167
 
1468 jermar 168
    as->refcount = 0;
1415 jermar 169
    as->cpu_refcount = 0;
2089 decky 170
#ifdef AS_PAGE_TABLE
822 palkovsky 171
    as->page_table = page_table_create(flags);
2089 decky 172
#else
173
    page_table_create(flags);
174
#endif
703 jermar 175
 
176
    return as;
177
}
178
 
1468 jermar 179
/** Destroy adress space.
180
 *
2087 jermar 181
 * When there are no tasks referencing this address space (i.e. its refcount is
182
 * zero), the address space can be destroyed.
1468 jermar 183
 */
184
void as_destroy(as_t *as)
973 palkovsky 185
{
1468 jermar 186
    ipl_t ipl;
1594 jermar 187
    bool cond;
973 palkovsky 188
 
1468 jermar 189
    ASSERT(as->refcount == 0);
190
 
191
    /*
192
     * Since there is no reference to this area,
193
     * it is safe not to lock its mutex.
194
     */
195
    ipl = interrupts_disable();
196
    spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 197
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 198
        if (as != AS && as->cpu_refcount == 0)
1587 jermar 199
            list_remove(&as->inactive_as_with_asid_link);
1468 jermar 200
        asid_put(as->asid);
201
    }
202
    spinlock_unlock(&inactive_as_with_asid_lock);
203
 
204
    /*
205
     * Destroy address space areas of the address space.
1954 jermar 206
     * The B+tree must be walked carefully because it is
1594 jermar 207
     * also being destroyed.
1468 jermar 208
     */
1594 jermar 209
    for (cond = true; cond; ) {
1468 jermar 210
        btree_node_t *node;
1594 jermar 211
 
212
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 213
        node = list_get_instance(as->as_area_btree.leaf_head.next,
214
            btree_node_t, leaf_link);
1594 jermar 215
 
216
        if ((cond = node->keys)) {
217
            as_area_destroy(as, node->key[0]);
218
        }
1468 jermar 219
    }
1495 jermar 220
 
1483 jermar 221
    btree_destroy(&as->as_area_btree);
2089 decky 222
#ifdef AS_PAGE_TABLE
1468 jermar 223
    page_table_destroy(as->page_table);
2089 decky 224
#else
225
    page_table_destroy(NULL);
226
#endif
1468 jermar 227
 
228
    interrupts_restore(ipl);
229
 
1890 jermar 230
    slab_free(as_slab, as);
973 palkovsky 231
}
232
 
703 jermar 233
/** Create address space area of common attributes.
234
 *
235
 * The created address space area is added to the target address space.
236
 *
237
 * @param as Target address space.
1239 jermar 238
 * @param flags Flags of the area memory.
1048 jermar 239
 * @param size Size of area.
703 jermar 240
 * @param base Base address of area.
1239 jermar 241
 * @param attrs Attributes of the area.
1409 jermar 242
 * @param backend Address space area backend. NULL if no backend is used.
243
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 244
 *
245
 * @return Address space area on success or NULL on failure.
246
 */
2069 jermar 247
as_area_t *
248
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 249
           mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 250
{
251
    ipl_t ipl;
252
    as_area_t *a;
253
 
254
    if (base % PAGE_SIZE)
1048 jermar 255
        return NULL;
256
 
1233 jermar 257
    if (!size)
258
        return NULL;
259
 
1048 jermar 260
    /* Writeable executable areas are not supported. */
261
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
262
        return NULL;
703 jermar 263
 
264
    ipl = interrupts_disable();
1380 jermar 265
    mutex_lock(&as->lock);
703 jermar 266
 
1048 jermar 267
    if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 268
        mutex_unlock(&as->lock);
1048 jermar 269
        interrupts_restore(ipl);
270
        return NULL;
271
    }
703 jermar 272
 
822 palkovsky 273
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 274
 
1380 jermar 275
    mutex_initialize(&a->lock);
822 palkovsky 276
 
1424 jermar 277
    a->as = as;
1026 jermar 278
    a->flags = flags;
1239 jermar 279
    a->attributes = attrs;
1048 jermar 280
    a->pages = SIZE2FRAMES(size);
822 palkovsky 281
    a->base = base;
1409 jermar 282
    a->sh_info = NULL;
283
    a->backend = backend;
1424 jermar 284
    if (backend_data)
285
        a->backend_data = *backend_data;
286
    else
2087 jermar 287
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
288
            0);
1424 jermar 289
 
1387 jermar 290
    btree_create(&a->used_space);
822 palkovsky 291
 
1147 jermar 292
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 293
 
1380 jermar 294
    mutex_unlock(&as->lock);
703 jermar 295
    interrupts_restore(ipl);
704 jermar 296
 
703 jermar 297
    return a;
298
}
299
 
1235 jermar 300
/** Find address space area and change it.
301
 *
302
 * @param as Address space.
2087 jermar 303
 * @param address Virtual address belonging to the area to be changed. Must be
304
 *     page-aligned.
1235 jermar 305
 * @param size New size of the virtual memory block starting at address.
306
 * @param flags Flags influencing the remap operation. Currently unused.
307
 *
1306 jermar 308
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 309
 */
1780 jermar 310
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 311
{
1306 jermar 312
    as_area_t *area;
1235 jermar 313
    ipl_t ipl;
314
    size_t pages;
315
 
316
    ipl = interrupts_disable();
1380 jermar 317
    mutex_lock(&as->lock);
1235 jermar 318
 
319
    /*
320
     * Locate the area.
321
     */
322
    area = find_area_and_lock(as, address);
323
    if (!area) {
1380 jermar 324
        mutex_unlock(&as->lock);
1235 jermar 325
        interrupts_restore(ipl);
1306 jermar 326
        return ENOENT;
1235 jermar 327
    }
328
 
1424 jermar 329
    if (area->backend == &phys_backend) {
1235 jermar 330
        /*
331
         * Remapping of address space areas associated
332
         * with memory mapped devices is not supported.
333
         */
1380 jermar 334
        mutex_unlock(&area->lock);
335
        mutex_unlock(&as->lock);
1235 jermar 336
        interrupts_restore(ipl);
1306 jermar 337
        return ENOTSUP;
1235 jermar 338
    }
1409 jermar 339
    if (area->sh_info) {
340
        /*
341
         * Remapping of shared address space areas
342
         * is not supported.
343
         */
344
        mutex_unlock(&area->lock);
345
        mutex_unlock(&as->lock);
346
        interrupts_restore(ipl);
347
        return ENOTSUP;
348
    }
1235 jermar 349
 
350
    pages = SIZE2FRAMES((address - area->base) + size);
351
    if (!pages) {
352
        /*
353
         * Zero size address space areas are not allowed.
354
         */
1380 jermar 355
        mutex_unlock(&area->lock);
356
        mutex_unlock(&as->lock);
1235 jermar 357
        interrupts_restore(ipl);
1306 jermar 358
        return EPERM;
1235 jermar 359
    }
360
 
361
    if (pages < area->pages) {
1403 jermar 362
        bool cond;
1780 jermar 363
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 364
 
365
        /*
366
         * Shrinking the area.
367
         * No need to check for overlaps.
368
         */
1403 jermar 369
 
370
        /*
1436 jermar 371
         * Start TLB shootdown sequence.
372
         */
2087 jermar 373
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
374
            pages * PAGE_SIZE, area->pages - pages);
1436 jermar 375
 
376
        /*
1403 jermar 377
         * Remove frames belonging to used space starting from
378
         * the highest addresses downwards until an overlap with
379
         * the resized address space area is found. Note that this
380
         * is also the right way to remove part of the used_space
381
         * B+tree leaf list.
382
         */    
383
        for (cond = true; cond;) {
384
            btree_node_t *node;
385
 
386
            ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 387
            node =
388
                list_get_instance(area->used_space.leaf_head.prev,
389
                btree_node_t, leaf_link);
1403 jermar 390
            if ((cond = (bool) node->keys)) {
1780 jermar 391
                uintptr_t b = node->key[node->keys - 1];
2087 jermar 392
                count_t c =
393
                    (count_t) node->value[node->keys - 1];
1403 jermar 394
                int i = 0;
1235 jermar 395
 
2087 jermar 396
                if (overlaps(b, c * PAGE_SIZE, area->base,
397
                    pages*PAGE_SIZE)) {
1403 jermar 398
 
2087 jermar 399
                    if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 400
                        /*
2087 jermar 401
                         * The whole interval fits
402
                         * completely in the resized
403
                         * address space area.
1403 jermar 404
                         */
405
                        break;
406
                    }
407
 
408
                    /*
2087 jermar 409
                     * Part of the interval corresponding
410
                     * to b and c overlaps with the resized
411
                     * address space area.
1403 jermar 412
                     */
413
 
414
                    cond = false;   /* we are almost done */
415
                    i = (start_free - b) >> PAGE_WIDTH;
2087 jermar 416
                    if (!used_space_remove(area, start_free,
417
                        c - i))
418
                        panic("Could not remove used "
419
                            "space.\n");
1403 jermar 420
                } else {
421
                    /*
2087 jermar 422
                     * The interval of used space can be
423
                     * completely removed.
1403 jermar 424
                     */
425
                    if (!used_space_remove(area, b, c))
2087 jermar 426
                        panic("Could not remove used "
427
                            "space.\n");
1403 jermar 428
                }
429
 
430
                for (; i < c; i++) {
431
                    pte_t *pte;
432
 
433
                    page_table_lock(as, false);
2087 jermar 434
                    pte = page_mapping_find(as, b +
435
                        i * PAGE_SIZE);
436
                    ASSERT(pte && PTE_VALID(pte) &&
437
                        PTE_PRESENT(pte));
438
                    if (area->backend &&
439
                        area->backend->frame_free) {
1424 jermar 440
                        area->backend->frame_free(area,
2087 jermar 441
                            b + i * PAGE_SIZE,
442
                            PTE_GET_FRAME(pte));
1409 jermar 443
                    }
2087 jermar 444
                    page_mapping_remove(as, b +
445
                        i * PAGE_SIZE);
1403 jermar 446
                    page_table_unlock(as, false);
447
                }
1235 jermar 448
            }
449
        }
1436 jermar 450
 
1235 jermar 451
        /*
1436 jermar 452
         * Finish TLB shootdown sequence.
1235 jermar 453
         */
2087 jermar 454
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
455
            area->pages - pages);
1235 jermar 456
        tlb_shootdown_finalize();
1889 jermar 457
 
458
        /*
459
         * Invalidate software translation caches (e.g. TSB on sparc64).
460
         */
2087 jermar 461
        as_invalidate_translation_cache(as, area->base +
462
            pages * PAGE_SIZE, area->pages - pages);
1235 jermar 463
    } else {
464
        /*
465
         * Growing the area.
466
         * Check for overlaps with other address space areas.
467
         */
2087 jermar 468
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
469
            area)) {
1380 jermar 470
            mutex_unlock(&area->lock);
471
            mutex_unlock(&as->lock);       
1235 jermar 472
            interrupts_restore(ipl);
1306 jermar 473
            return EADDRNOTAVAIL;
1235 jermar 474
        }
475
    }
476
 
477
    area->pages = pages;
478
 
1380 jermar 479
    mutex_unlock(&area->lock);
480
    mutex_unlock(&as->lock);
1235 jermar 481
    interrupts_restore(ipl);
482
 
1306 jermar 483
    return 0;
1235 jermar 484
}
485
 
1306 jermar 486
/** Destroy address space area.
487
 *
488
 * @param as Address space.
489
 * @param address Address withing the area to be deleted.
490
 *
491
 * @return Zero on success or a value from @ref errno.h on failure.
492
 */
1780 jermar 493
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 494
{
495
    as_area_t *area;
1780 jermar 496
    uintptr_t base;
1495 jermar 497
    link_t *cur;
1306 jermar 498
    ipl_t ipl;
499
 
500
    ipl = interrupts_disable();
1380 jermar 501
    mutex_lock(&as->lock);
1306 jermar 502
 
503
    area = find_area_and_lock(as, address);
504
    if (!area) {
1380 jermar 505
        mutex_unlock(&as->lock);
1306 jermar 506
        interrupts_restore(ipl);
507
        return ENOENT;
508
    }
509
 
1403 jermar 510
    base = area->base;
511
 
1411 jermar 512
    /*
1436 jermar 513
     * Start TLB shootdown sequence.
514
     */
1889 jermar 515
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 516
 
517
    /*
1411 jermar 518
     * Visit only the pages mapped by used_space B+tree.
519
     */
2087 jermar 520
    for (cur = area->used_space.leaf_head.next;
521
        cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 522
        btree_node_t *node;
1495 jermar 523
        int i;
1403 jermar 524
 
1495 jermar 525
        node = list_get_instance(cur, btree_node_t, leaf_link);
526
        for (i = 0; i < node->keys; i++) {
1780 jermar 527
            uintptr_t b = node->key[i];
1495 jermar 528
            count_t j;
1411 jermar 529
            pte_t *pte;
1403 jermar 530
 
1495 jermar 531
            for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 532
                page_table_lock(as, false);
2087 jermar 533
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
534
                ASSERT(pte && PTE_VALID(pte) &&
535
                    PTE_PRESENT(pte));
536
                if (area->backend &&
537
                    area->backend->frame_free) {
538
                    area->backend->frame_free(area, b +
539
                    j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 540
                }
2087 jermar 541
                page_mapping_remove(as, b + j * PAGE_SIZE);            
1411 jermar 542
                page_table_unlock(as, false);
1306 jermar 543
            }
544
        }
545
    }
1403 jermar 546
 
1306 jermar 547
    /*
1436 jermar 548
     * Finish TLB shootdown sequence.
1306 jermar 549
     */
1889 jermar 550
    tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 551
    tlb_shootdown_finalize();
1436 jermar 552
 
1889 jermar 553
    /*
2087 jermar 554
     * Invalidate potential software translation caches (e.g. TSB on
555
     * sparc64).
1889 jermar 556
     */
557
    as_invalidate_translation_cache(as, area->base, area->pages);
558
 
1436 jermar 559
    btree_destroy(&area->used_space);
1306 jermar 560
 
1309 jermar 561
    area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 562
 
563
    if (area->sh_info)
564
        sh_info_remove_reference(area->sh_info);
565
 
1380 jermar 566
    mutex_unlock(&area->lock);
1306 jermar 567
 
568
    /*
569
     * Remove the empty area from address space.
570
     */
1889 jermar 571
    btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 572
 
1309 jermar 573
    free(area);
574
 
1889 jermar 575
    mutex_unlock(&as->lock);
1306 jermar 576
    interrupts_restore(ipl);
577
    return 0;
578
}
579
 
1413 jermar 580
/** Share address space area with another or the same address space.
1235 jermar 581
 *
1424 jermar 582
 * Address space area mapping is shared with a new address space area.
583
 * If the source address space area has not been shared so far,
584
 * a new sh_info is created. The new address space area simply gets the
585
 * sh_info of the source area. The process of duplicating the
586
 * mapping is done through the backend share function.
1413 jermar 587
 *
1417 jermar 588
 * @param src_as Pointer to source address space.
1239 jermar 589
 * @param src_base Base address of the source address space area.
1417 jermar 590
 * @param acc_size Expected size of the source area.
1428 palkovsky 591
 * @param dst_as Pointer to destination address space.
1417 jermar 592
 * @param dst_base Target base address.
593
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 594
 *
2007 jermar 595
 * @return Zero on success or ENOENT if there is no such task or if there is no
596
 * such address space area, EPERM if there was a problem in accepting the area
597
 * or ENOMEM if there was a problem in allocating destination address space
598
 * area. ENOTSUP is returned if the address space area backend does not support
2015 jermar 599
 * sharing or if the kernel detects an attempt to create an illegal address
600
 * alias.
1235 jermar 601
 */
1780 jermar 602
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
603
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 604
{
605
    ipl_t ipl;
1239 jermar 606
    int src_flags;
607
    size_t src_size;
608
    as_area_t *src_area, *dst_area;
1413 jermar 609
    share_info_t *sh_info;
1424 jermar 610
    mem_backend_t *src_backend;
611
    mem_backend_data_t src_backend_data;
1434 palkovsky 612
 
1235 jermar 613
    ipl = interrupts_disable();
1380 jermar 614
    mutex_lock(&src_as->lock);
1329 palkovsky 615
    src_area = find_area_and_lock(src_as, src_base);
1239 jermar 616
    if (!src_area) {
1238 jermar 617
        /*
618
         * Could not find the source address space area.
619
         */
1380 jermar 620
        mutex_unlock(&src_as->lock);
1238 jermar 621
        interrupts_restore(ipl);
622
        return ENOENT;
623
    }
2007 jermar 624
 
1424 jermar 625
    if (!src_area->backend || !src_area->backend->share) {
1413 jermar 626
        /*
1851 jermar 627
         * There is no backend or the backend does not
1424 jermar 628
         * know how to share the area.
1413 jermar 629
         */
630
        mutex_unlock(&src_area->lock);
631
        mutex_unlock(&src_as->lock);
632
        interrupts_restore(ipl);
633
        return ENOTSUP;
634
    }
635
 
1239 jermar 636
    src_size = src_area->pages * PAGE_SIZE;
637
    src_flags = src_area->flags;
1424 jermar 638
    src_backend = src_area->backend;
639
    src_backend_data = src_area->backend_data;
1544 palkovsky 640
 
641
    /* Share the cacheable flag from the original mapping */
642
    if (src_flags & AS_AREA_CACHEABLE)
643
        dst_flags_mask |= AS_AREA_CACHEABLE;
644
 
2087 jermar 645
    if (src_size != acc_size ||
646
        (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 647
        mutex_unlock(&src_area->lock);
648
        mutex_unlock(&src_as->lock);
1235 jermar 649
        interrupts_restore(ipl);
650
        return EPERM;
651
    }
1413 jermar 652
 
2015 jermar 653
#ifdef CONFIG_VIRT_IDX_DCACHE
654
    if (!(dst_flags_mask & AS_AREA_EXEC)) {
655
        if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
656
            /*
657
             * Refuse to create an illegal address alias.
658
             */
659
            mutex_unlock(&src_area->lock);
660
            mutex_unlock(&src_as->lock);
661
            interrupts_restore(ipl);
662
            return ENOTSUP;
663
        }
664
    }
665
#endif /* CONFIG_VIRT_IDX_DCACHE */
666
 
1235 jermar 667
    /*
1413 jermar 668
     * Now we are committed to sharing the area.
1954 jermar 669
     * First, prepare the area for sharing.
1413 jermar 670
     * Then it will be safe to unlock it.
671
     */
672
    sh_info = src_area->sh_info;
673
    if (!sh_info) {
674
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
675
        mutex_initialize(&sh_info->lock);
676
        sh_info->refcount = 2;
677
        btree_create(&sh_info->pagemap);
678
        src_area->sh_info = sh_info;
679
    } else {
680
        mutex_lock(&sh_info->lock);
681
        sh_info->refcount++;
682
        mutex_unlock(&sh_info->lock);
683
    }
684
 
1424 jermar 685
    src_area->backend->share(src_area);
1413 jermar 686
 
687
    mutex_unlock(&src_area->lock);
688
    mutex_unlock(&src_as->lock);
689
 
690
    /*
1239 jermar 691
     * Create copy of the source address space area.
692
     * The destination area is created with AS_AREA_ATTR_PARTIAL
693
     * attribute set which prevents race condition with
694
     * preliminary as_page_fault() calls.
1417 jermar 695
     * The flags of the source area are masked against dst_flags_mask
696
     * to support sharing in less privileged mode.
1235 jermar 697
     */
1461 palkovsky 698
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 699
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 700
    if (!dst_area) {
1235 jermar 701
        /*
702
         * Destination address space area could not be created.
703
         */
1413 jermar 704
        sh_info_remove_reference(sh_info);
705
 
1235 jermar 706
        interrupts_restore(ipl);
707
        return ENOMEM;
708
    }
2009 jermar 709
 
1235 jermar 710
    /*
1239 jermar 711
     * Now the destination address space area has been
712
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 713
     * attribute and set the sh_info.
1239 jermar 714
     */
2009 jermar 715
    mutex_lock(&dst_as->lock); 
1380 jermar 716
    mutex_lock(&dst_area->lock);
1239 jermar 717
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 718
    dst_area->sh_info = sh_info;
1380 jermar 719
    mutex_unlock(&dst_area->lock);
2009 jermar 720
    mutex_unlock(&dst_as->lock);   
721
 
1235 jermar 722
    interrupts_restore(ipl);
723
 
724
    return 0;
725
}
726
 
1423 jermar 727
/** Check access mode for address space area.
728
 *
729
 * The address space area must be locked prior to this call.
730
 *
731
 * @param area Address space area.
732
 * @param access Access mode.
733
 *
734
 * @return False if access violates area's permissions, true otherwise.
735
 */
736
bool as_area_check_access(as_area_t *area, pf_access_t access)
737
{
738
    int flagmap[] = {
739
        [PF_ACCESS_READ] = AS_AREA_READ,
740
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
741
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
742
    };
743
 
744
    if (!(area->flags & flagmap[access]))
745
        return false;
746
 
747
    return true;
748
}
749
 
703 jermar 750
/** Handle page fault within the current address space.
751
 *
1409 jermar 752
 * This is the high-level page fault handler. It decides
753
 * whether the page fault can be resolved by any backend
754
 * and if so, it invokes the backend to resolve the page
755
 * fault.
756
 *
703 jermar 757
 * Interrupts are assumed disabled.
758
 *
759
 * @param page Faulting page.
1411 jermar 760
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 761
 * @param istate Pointer to interrupted state.
703 jermar 762
 *
1409 jermar 763
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
764
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 765
 */
1780 jermar 766
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 767
{
1044 jermar 768
    pte_t *pte;
977 jermar 769
    as_area_t *area;
703 jermar 770
 
1380 jermar 771
    if (!THREAD)
1409 jermar 772
        return AS_PF_FAULT;
1380 jermar 773
 
703 jermar 774
    ASSERT(AS);
1044 jermar 775
 
1380 jermar 776
    mutex_lock(&AS->lock);
977 jermar 777
    area = find_area_and_lock(AS, page);   
703 jermar 778
    if (!area) {
779
        /*
780
         * No area contained mapping for 'page'.
781
         * Signal page fault to low-level handler.
782
         */
1380 jermar 783
        mutex_unlock(&AS->lock);
1288 jermar 784
        goto page_fault;
703 jermar 785
    }
786
 
1239 jermar 787
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
788
        /*
789
         * The address space area is not fully initialized.
790
         * Avoid possible race by returning error.
791
         */
1380 jermar 792
        mutex_unlock(&area->lock);
793
        mutex_unlock(&AS->lock);
1288 jermar 794
        goto page_fault;       
1239 jermar 795
    }
796
 
1424 jermar 797
    if (!area->backend || !area->backend->page_fault) {
1409 jermar 798
        /*
799
         * The address space area is not backed by any backend
800
         * or the backend cannot handle page faults.
801
         */
802
        mutex_unlock(&area->lock);
803
        mutex_unlock(&AS->lock);
804
        goto page_fault;       
805
    }
1179 jermar 806
 
1044 jermar 807
    page_table_lock(AS, false);
808
 
703 jermar 809
    /*
1044 jermar 810
     * To avoid race condition between two page faults
811
     * on the same address, we need to make sure
812
     * the mapping has not been already inserted.
813
     */
814
    if ((pte = page_mapping_find(AS, page))) {
815
        if (PTE_PRESENT(pte)) {
1423 jermar 816
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 817
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
818
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 819
                page_table_unlock(AS, false);
820
                mutex_unlock(&area->lock);
821
                mutex_unlock(&AS->lock);
822
                return AS_PF_OK;
823
            }
1044 jermar 824
        }
825
    }
1409 jermar 826
 
1044 jermar 827
    /*
1409 jermar 828
     * Resort to the backend page fault handler.
703 jermar 829
     */
1424 jermar 830
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 831
        page_table_unlock(AS, false);
832
        mutex_unlock(&area->lock);
833
        mutex_unlock(&AS->lock);
834
        goto page_fault;
835
    }
703 jermar 836
 
1044 jermar 837
    page_table_unlock(AS, false);
1380 jermar 838
    mutex_unlock(&area->lock);
839
    mutex_unlock(&AS->lock);
1288 jermar 840
    return AS_PF_OK;
841
 
842
page_fault:
843
    if (THREAD->in_copy_from_uspace) {
844
        THREAD->in_copy_from_uspace = false;
2087 jermar 845
        istate_set_retaddr(istate,
846
            (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 847
    } else if (THREAD->in_copy_to_uspace) {
848
        THREAD->in_copy_to_uspace = false;
2087 jermar 849
        istate_set_retaddr(istate,
850
            (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 851
    } else {
852
        return AS_PF_FAULT;
853
    }
854
 
855
    return AS_PF_DEFER;
703 jermar 856
}
857
 
823 jermar 858
/** Switch address spaces.
703 jermar 859
 *
1380 jermar 860
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 861
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 862
 *
823 jermar 863
 * @param old Old address space or NULL.
864
 * @param new New address space.
703 jermar 865
 */
823 jermar 866
void as_switch(as_t *old, as_t *new)
703 jermar 867
{
868
    ipl_t ipl;
823 jermar 869
    bool needs_asid = false;
703 jermar 870
 
871
    ipl = interrupts_disable();
1415 jermar 872
    spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 873
 
874
    /*
823 jermar 875
     * First, take care of the old address space.
876
     */
877
    if (old) {
1380 jermar 878
        mutex_lock_active(&old->lock);
1415 jermar 879
        ASSERT(old->cpu_refcount);
880
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 881
            /*
882
             * The old address space is no longer active on
883
             * any processor. It can be appended to the
884
             * list of inactive address spaces with assigned
885
             * ASID.
886
             */
887
             ASSERT(old->asid != ASID_INVALID);
2087 jermar 888
             list_append(&old->inactive_as_with_asid_link,
889
                 &inactive_as_with_asid_head);
823 jermar 890
        }
1380 jermar 891
        mutex_unlock(&old->lock);
1890 jermar 892
 
893
        /*
894
         * Perform architecture-specific tasks when the address space
895
         * is being removed from the CPU.
896
         */
897
        as_deinstall_arch(old);
823 jermar 898
    }
899
 
900
    /*
901
     * Second, prepare the new address space.
902
     */
1380 jermar 903
    mutex_lock_active(&new->lock);
1415 jermar 904
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
2087 jermar 905
        if (new->asid != ASID_INVALID) {
823 jermar 906
            list_remove(&new->inactive_as_with_asid_link);
2087 jermar 907
        } else {
908
            /*
909
             * Defer call to asid_get() until new->lock is released.
910
             */
911
            needs_asid = true;
912
        }
823 jermar 913
    }
914
    SET_PTL0_ADDRESS(new->page_table);
1380 jermar 915
    mutex_unlock(&new->lock);
823 jermar 916
 
917
    if (needs_asid) {
918
        /*
919
         * Allocation of new ASID was deferred
920
         * until now in order to avoid deadlock.
921
         */
922
        asid_t asid;
923
 
924
        asid = asid_get();
1380 jermar 925
        mutex_lock_active(&new->lock);
823 jermar 926
        new->asid = asid;
1380 jermar 927
        mutex_unlock(&new->lock);
823 jermar 928
    }
1415 jermar 929
    spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 930
    interrupts_restore(ipl);
931
 
932
    /*
703 jermar 933
     * Perform architecture-specific steps.
727 jermar 934
     * (e.g. write ASID to hardware register etc.)
703 jermar 935
     */
823 jermar 936
    as_install_arch(new);
703 jermar 937
 
823 jermar 938
    AS = new;
703 jermar 939
}
754 jermar 940
 
1235 jermar 941
/** Convert address space area flags to page flags.
754 jermar 942
 *
1235 jermar 943
 * @param aflags Flags of some address space area.
754 jermar 944
 *
1235 jermar 945
 * @return Flags to be passed to page_mapping_insert().
754 jermar 946
 */
1235 jermar 947
int area_flags_to_page_flags(int aflags)
754 jermar 948
{
949
    int flags;
950
 
1178 jermar 951
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 952
 
1235 jermar 953
    if (aflags & AS_AREA_READ)
1026 jermar 954
        flags |= PAGE_READ;
955
 
1235 jermar 956
    if (aflags & AS_AREA_WRITE)
1026 jermar 957
        flags |= PAGE_WRITE;
958
 
1235 jermar 959
    if (aflags & AS_AREA_EXEC)
1026 jermar 960
        flags |= PAGE_EXEC;
961
 
1424 jermar 962
    if (aflags & AS_AREA_CACHEABLE)
1178 jermar 963
        flags |= PAGE_CACHEABLE;
964
 
754 jermar 965
    return flags;
966
}
756 jermar 967
 
1235 jermar 968
/** Compute flags for virtual address translation subsytem.
969
 *
970
 * The address space area must be locked.
971
 * Interrupts must be disabled.
972
 *
973
 * @param a Address space area.
974
 *
975
 * @return Flags to be used in page_mapping_insert().
976
 */
1409 jermar 977
int as_area_get_flags(as_area_t *a)
1235 jermar 978
{
979
    return area_flags_to_page_flags(a->flags);
980
}
981
 
756 jermar 982
/** Create page table.
983
 *
984
 * Depending on architecture, create either address space
985
 * private or global page table.
986
 *
987
 * @param flags Flags saying whether the page table is for kernel address space.
988
 *
989
 * @return First entry of the page table.
990
 */
991
pte_t *page_table_create(int flags)
992
{
993
        ASSERT(as_operations);
994
        ASSERT(as_operations->page_table_create);
995
 
996
        return as_operations->page_table_create(flags);
997
}
977 jermar 998
 
1468 jermar 999
/** Destroy page table.
1000
 *
1001
 * Destroy page table in architecture specific way.
1002
 *
1003
 * @param page_table Physical address of PTL0.
1004
 */
1005
void page_table_destroy(pte_t *page_table)
1006
{
1007
        ASSERT(as_operations);
1008
        ASSERT(as_operations->page_table_destroy);
1009
 
1010
        as_operations->page_table_destroy(page_table);
1011
}
1012
 
1044 jermar 1013
/** Lock page table.
1014
 *
1015
 * This function should be called before any page_mapping_insert(),
1016
 * page_mapping_remove() and page_mapping_find().
1017
 *
1018
 * Locking order is such that address space areas must be locked
1019
 * prior to this call. Address space can be locked prior to this
1020
 * call in which case the lock argument is false.
1021
 *
1022
 * @param as Address space.
1248 jermar 1023
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1024
 */
1025
void page_table_lock(as_t *as, bool lock)
1026
{
1027
    ASSERT(as_operations);
1028
    ASSERT(as_operations->page_table_lock);
1029
 
1030
    as_operations->page_table_lock(as, lock);
1031
}
1032
 
1033
/** Unlock page table.
1034
 *
1035
 * @param as Address space.
1248 jermar 1036
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1037
 */
1038
void page_table_unlock(as_t *as, bool unlock)
1039
{
1040
    ASSERT(as_operations);
1041
    ASSERT(as_operations->page_table_unlock);
1042
 
1043
    as_operations->page_table_unlock(as, unlock);
1044
}
1045
 
977 jermar 1046
 
1047
/** Find address space area and lock it.
1048
 *
1049
 * The address space must be locked and interrupts must be disabled.
1050
 *
1051
 * @param as Address space.
1052
 * @param va Virtual address.
1053
 *
2087 jermar 1054
 * @return Locked address space area containing va on success or NULL on
1055
 *     failure.
977 jermar 1056
 */
1780 jermar 1057
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1058
{
1059
    as_area_t *a;
1147 jermar 1060
    btree_node_t *leaf, *lnode;
1061
    int i;
977 jermar 1062
 
1147 jermar 1063
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1064
    if (a) {
1065
        /* va is the base address of an address space area */
1380 jermar 1066
        mutex_lock(&a->lock);
1147 jermar 1067
        return a;
1068
    }
1069
 
1070
    /*
1150 jermar 1071
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1072
     * to find out whether this is a miss or va belongs to an address
1073
     * space area found there.
1074
     */
1075
 
1076
    /* First, search the leaf node itself. */
1077
    for (i = 0; i < leaf->keys; i++) {
1078
        a = (as_area_t *) leaf->value[i];
1380 jermar 1079
        mutex_lock(&a->lock);
1147 jermar 1080
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1081
            return a;
1082
        }
1380 jermar 1083
        mutex_unlock(&a->lock);
1147 jermar 1084
    }
977 jermar 1085
 
1147 jermar 1086
    /*
1150 jermar 1087
     * Second, locate the left neighbour and test its last record.
1148 jermar 1088
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 1089
     */
2087 jermar 1090
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1091
    if (lnode) {
1147 jermar 1092
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1093
        mutex_lock(&a->lock);
1147 jermar 1094
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1095
            return a;
1147 jermar 1096
        }
1380 jermar 1097
        mutex_unlock(&a->lock);
977 jermar 1098
    }
1099
 
1100
    return NULL;
1101
}
1048 jermar 1102
 
1103
/** Check area conflicts with other areas.
1104
 *
1105
 * The address space must be locked and interrupts must be disabled.
1106
 *
1107
 * @param as Address space.
1108
 * @param va Starting virtual address of the area being tested.
1109
 * @param size Size of the area being tested.
1110
 * @param avoid_area Do not touch this area.
1111
 *
1112
 * @return True if there is no conflict, false otherwise.
1113
 */
2087 jermar 1114
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1115
              as_area_t *avoid_area)
1048 jermar 1116
{
1117
    as_area_t *a;
1147 jermar 1118
    btree_node_t *leaf, *node;
1119
    int i;
1048 jermar 1120
 
1070 jermar 1121
    /*
1122
     * We don't want any area to have conflicts with NULL page.
1123
     */
1124
    if (overlaps(va, size, NULL, PAGE_SIZE))
1125
        return false;
1126
 
1147 jermar 1127
    /*
1128
     * The leaf node is found in O(log n), where n is proportional to
1129
     * the number of address space areas belonging to as.
1130
     * The check for conflicts is then attempted on the rightmost
1150 jermar 1131
     * record in the left neighbour, the leftmost record in the right
1132
     * neighbour and all records in the leaf node itself.
1147 jermar 1133
     */
1048 jermar 1134
 
1147 jermar 1135
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1136
        if (a != avoid_area)
1137
            return false;
1138
    }
1139
 
1140
    /* First, check the two border cases. */
1150 jermar 1141
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1142
        a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1143
        mutex_lock(&a->lock);
1147 jermar 1144
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1145
            mutex_unlock(&a->lock);
1147 jermar 1146
            return false;
1147
        }
1380 jermar 1148
        mutex_unlock(&a->lock);
1147 jermar 1149
    }
2087 jermar 1150
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1151
    if (node) {
1147 jermar 1152
        a = (as_area_t *) node->value[0];
1380 jermar 1153
        mutex_lock(&a->lock);
1147 jermar 1154
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1155
            mutex_unlock(&a->lock);
1147 jermar 1156
            return false;
1157
        }
1380 jermar 1158
        mutex_unlock(&a->lock);
1147 jermar 1159
    }
1160
 
1161
    /* Second, check the leaf node. */
1162
    for (i = 0; i < leaf->keys; i++) {
1163
        a = (as_area_t *) leaf->value[i];
1164
 
1048 jermar 1165
        if (a == avoid_area)
1166
            continue;
1147 jermar 1167
 
1380 jermar 1168
        mutex_lock(&a->lock);
1147 jermar 1169
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1170
            mutex_unlock(&a->lock);
1147 jermar 1171
            return false;
1172
        }
1380 jermar 1173
        mutex_unlock(&a->lock);
1048 jermar 1174
    }
1175
 
1070 jermar 1176
    /*
1177
     * So far, the area does not conflict with other areas.
1178
     * Check if it doesn't conflict with kernel address space.
1179
     */  
1180
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1181
        return !overlaps(va, size,
2087 jermar 1182
            KERNEL_ADDRESS_SPACE_START,
1183
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1184
    }
1185
 
1048 jermar 1186
    return true;
1187
}
1235 jermar 1188
 
1380 jermar 1189
/** Return size of the address space area with given base.  */
1780 jermar 1190
size_t as_get_size(uintptr_t base)
1329 palkovsky 1191
{
1192
    ipl_t ipl;
1193
    as_area_t *src_area;
1194
    size_t size;
1195
 
1196
    ipl = interrupts_disable();
1197
    src_area = find_area_and_lock(AS, base);
1198
    if (src_area){
1199
        size = src_area->pages * PAGE_SIZE;
1380 jermar 1200
        mutex_unlock(&src_area->lock);
1329 palkovsky 1201
    } else {
1202
        size = 0;
1203
    }
1204
    interrupts_restore(ipl);
1205
    return size;
1206
}
1207
 
1387 jermar 1208
/** Mark portion of address space area as used.
1209
 *
1210
 * The address space area must be already locked.
1211
 *
1212
 * @param a Address space area.
1213
 * @param page First page to be marked.
1214
 * @param count Number of page to be marked.
1215
 *
1216
 * @return 0 on failure and 1 on success.
1217
 */
1780 jermar 1218
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1219
{
1220
    btree_node_t *leaf, *node;
1221
    count_t pages;
1222
    int i;
1223
 
1224
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1225
    ASSERT(count);
1226
 
1227
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1228
    if (pages) {
1229
        /*
1230
         * We hit the beginning of some used space.
1231
         */
1232
        return 0;
1233
    }
1234
 
1437 jermar 1235
    if (!leaf->keys) {
1236
        btree_insert(&a->used_space, page, (void *) count, leaf);
1237
        return 1;
1238
    }
1239
 
1387 jermar 1240
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1241
    if (node) {
2087 jermar 1242
        uintptr_t left_pg = node->key[node->keys - 1];
1243
        uintptr_t right_pg = leaf->key[0];
1244
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1245
        count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1246
 
1247
        /*
1248
         * Examine the possibility that the interval fits
1249
         * somewhere between the rightmost interval of
1250
         * the left neigbour and the first interval of the leaf.
1251
         */
1252
 
1253
        if (page >= right_pg) {
1254
            /* Do nothing. */
2087 jermar 1255
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1256
            left_cnt * PAGE_SIZE)) {
1387 jermar 1257
            /* The interval intersects with the left interval. */
1258
            return 0;
2087 jermar 1259
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1260
            right_cnt * PAGE_SIZE)) {
1387 jermar 1261
            /* The interval intersects with the right interval. */
1262
            return 0;          
2087 jermar 1263
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1264
            (page + count * PAGE_SIZE == right_pg)) {
1265
            /*
1266
             * The interval can be added by merging the two already
1267
             * present intervals.
1268
             */
1403 jermar 1269
            node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1270
            btree_remove(&a->used_space, right_pg, leaf);
1271
            return 1;
2087 jermar 1272
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1273
            /*
1274
             * The interval can be added by simply growing the left
1275
             * interval.
1276
             */
1403 jermar 1277
            node->value[node->keys - 1] += count;
1387 jermar 1278
            return 1;
2087 jermar 1279
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1280
            /*
2087 jermar 1281
             * The interval can be addded by simply moving base of
1282
             * the right interval down and increasing its size
1283
             * accordingly.
1387 jermar 1284
             */
1403 jermar 1285
            leaf->value[0] += count;
1387 jermar 1286
            leaf->key[0] = page;
1287
            return 1;
1288
        } else {
1289
            /*
1290
             * The interval is between both neigbouring intervals,
1291
             * but cannot be merged with any of them.
1292
             */
2087 jermar 1293
            btree_insert(&a->used_space, page, (void *) count,
1294
                leaf);
1387 jermar 1295
            return 1;
1296
        }
1297
    } else if (page < leaf->key[0]) {
1780 jermar 1298
        uintptr_t right_pg = leaf->key[0];
1387 jermar 1299
        count_t right_cnt = (count_t) leaf->value[0];
1300
 
1301
        /*
2087 jermar 1302
         * Investigate the border case in which the left neighbour does
1303
         * not exist but the interval fits from the left.
1387 jermar 1304
         */
1305
 
2087 jermar 1306
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1307
            right_cnt * PAGE_SIZE)) {
1387 jermar 1308
            /* The interval intersects with the right interval. */
1309
            return 0;
2087 jermar 1310
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1311
            /*
2087 jermar 1312
             * The interval can be added by moving the base of the
1313
             * right interval down and increasing its size
1314
             * accordingly.
1387 jermar 1315
             */
1316
            leaf->key[0] = page;
1403 jermar 1317
            leaf->value[0] += count;
1387 jermar 1318
            return 1;
1319
        } else {
1320
            /*
1321
             * The interval doesn't adjoin with the right interval.
1322
             * It must be added individually.
1323
             */
2087 jermar 1324
            btree_insert(&a->used_space, page, (void *) count,
1325
                leaf);
1387 jermar 1326
            return 1;
1327
        }
1328
    }
1329
 
1330
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1331
    if (node) {
2087 jermar 1332
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1333
        uintptr_t right_pg = node->key[0];
1334
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1335
        count_t right_cnt = (count_t) node->value[0];
1387 jermar 1336
 
1337
        /*
1338
         * Examine the possibility that the interval fits
1339
         * somewhere between the leftmost interval of
1340
         * the right neigbour and the last interval of the leaf.
1341
         */
1342
 
1343
        if (page < left_pg) {
1344
            /* Do nothing. */
2087 jermar 1345
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1346
            left_cnt * PAGE_SIZE)) {
1387 jermar 1347
            /* The interval intersects with the left interval. */
1348
            return 0;
2087 jermar 1349
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1350
            right_cnt * PAGE_SIZE)) {
1387 jermar 1351
            /* The interval intersects with the right interval. */
1352
            return 0;          
2087 jermar 1353
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1354
            (page + count * PAGE_SIZE == right_pg)) {
1355
            /*
1356
             * The interval can be added by merging the two already
1357
             * present intervals.
1358
             * */
1403 jermar 1359
            leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1360
            btree_remove(&a->used_space, right_pg, node);
1361
            return 1;
2087 jermar 1362
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1363
            /*
1364
             * The interval can be added by simply growing the left
1365
             * interval.
1366
             * */
1403 jermar 1367
            leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1368
            return 1;
2087 jermar 1369
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1370
            /*
2087 jermar 1371
             * The interval can be addded by simply moving base of
1372
             * the right interval down and increasing its size
1373
             * accordingly.
1387 jermar 1374
             */
1403 jermar 1375
            node->value[0] += count;
1387 jermar 1376
            node->key[0] = page;
1377
            return 1;
1378
        } else {
1379
            /*
1380
             * The interval is between both neigbouring intervals,
1381
             * but cannot be merged with any of them.
1382
             */
2087 jermar 1383
            btree_insert(&a->used_space, page, (void *) count,
1384
                leaf);
1387 jermar 1385
            return 1;
1386
        }
1387
    } else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1388
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1389
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1390
 
1391
        /*
2087 jermar 1392
         * Investigate the border case in which the right neighbour
1393
         * does not exist but the interval fits from the right.
1387 jermar 1394
         */
1395
 
2087 jermar 1396
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1397
            left_cnt * PAGE_SIZE)) {
1403 jermar 1398
            /* The interval intersects with the left interval. */
1387 jermar 1399
            return 0;
2087 jermar 1400
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1401
            /*
1402
             * The interval can be added by growing the left
1403
             * interval.
1404
             */
1403 jermar 1405
            leaf->value[leaf->keys - 1] += count;
1387 jermar 1406
            return 1;
1407
        } else {
1408
            /*
1409
             * The interval doesn't adjoin with the left interval.
1410
             * It must be added individually.
1411
             */
2087 jermar 1412
            btree_insert(&a->used_space, page, (void *) count,
1413
                leaf);
1387 jermar 1414
            return 1;
1415
        }
1416
    }
1417
 
1418
    /*
2087 jermar 1419
     * Note that if the algorithm made it thus far, the interval can fit
1420
     * only between two other intervals of the leaf. The two border cases
1421
     * were already resolved.
1387 jermar 1422
     */
1423
    for (i = 1; i < leaf->keys; i++) {
1424
        if (page < leaf->key[i]) {
2087 jermar 1425
            uintptr_t left_pg = leaf->key[i - 1];
1426
            uintptr_t right_pg = leaf->key[i];
1427
            count_t left_cnt = (count_t) leaf->value[i - 1];
1428
            count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1429
 
1430
            /*
1431
             * The interval fits between left_pg and right_pg.
1432
             */
1433
 
2087 jermar 1434
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1435
                left_cnt * PAGE_SIZE)) {
1436
                /*
1437
                 * The interval intersects with the left
1438
                 * interval.
1439
                 */
1387 jermar 1440
                return 0;
2087 jermar 1441
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1442
                right_cnt * PAGE_SIZE)) {
1443
                /*
1444
                 * The interval intersects with the right
1445
                 * interval.
1446
                 */
1387 jermar 1447
                return 0;          
2087 jermar 1448
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1449
                (page + count * PAGE_SIZE == right_pg)) {
1450
                /*
1451
                 * The interval can be added by merging the two
1452
                 * already present intervals.
1453
                 */
1403 jermar 1454
                leaf->value[i - 1] += count + right_cnt;
1387 jermar 1455
                btree_remove(&a->used_space, right_pg, leaf);
1456
                return 1;
2087 jermar 1457
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1458
                /*
1459
                 * The interval can be added by simply growing
1460
                 * the left interval.
1461
                 */
1403 jermar 1462
                leaf->value[i - 1] += count;
1387 jermar 1463
                return 1;
2087 jermar 1464
            } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1465
                /*
2087 jermar 1466
                     * The interval can be addded by simply moving
1467
                 * base of the right interval down and
1468
                 * increasing its size accordingly.
1387 jermar 1469
                 */
1403 jermar 1470
                leaf->value[i] += count;
1387 jermar 1471
                leaf->key[i] = page;
1472
                return 1;
1473
            } else {
1474
                /*
2087 jermar 1475
                 * The interval is between both neigbouring
1476
                 * intervals, but cannot be merged with any of
1477
                 * them.
1387 jermar 1478
                 */
2087 jermar 1479
                btree_insert(&a->used_space, page,
1480
                    (void *) count, leaf);
1387 jermar 1481
                return 1;
1482
            }
1483
        }
1484
    }
1485
 
2087 jermar 1486
    panic("Inconsistency detected while adding %d pages of used space at "
1487
        "%p.\n", count, page);
1387 jermar 1488
}
1489
 
1490
/** Mark portion of address space area as unused.
1491
 *
1492
 * The address space area must be already locked.
1493
 *
1494
 * @param a Address space area.
1495
 * @param page First page to be marked.
1496
 * @param count Number of page to be marked.
1497
 *
1498
 * @return 0 on failure and 1 on success.
1499
 */
1780 jermar 1500
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1501
{
1502
    btree_node_t *leaf, *node;
1503
    count_t pages;
1504
    int i;
1505
 
1506
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1507
    ASSERT(count);
1508
 
1509
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1510
    if (pages) {
1511
        /*
1512
         * We are lucky, page is the beginning of some interval.
1513
         */
1514
        if (count > pages) {
1515
            return 0;
1516
        } else if (count == pages) {
1517
            btree_remove(&a->used_space, page, leaf);
1403 jermar 1518
            return 1;
1387 jermar 1519
        } else {
1520
            /*
1521
             * Find the respective interval.
1522
             * Decrease its size and relocate its start address.
1523
             */
1524
            for (i = 0; i < leaf->keys; i++) {
1525
                if (leaf->key[i] == page) {
2087 jermar 1526
                    leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1527
                    leaf->value[i] -= count;
1387 jermar 1528
                    return 1;
1529
                }
1530
            }
1531
            goto error;
1532
        }
1533
    }
1534
 
1535
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1536
    if (node && page < leaf->key[0]) {
1780 jermar 1537
        uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1538
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1539
 
2087 jermar 1540
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1541
            count * PAGE_SIZE)) {
1542
            if (page + count * PAGE_SIZE ==
1543
                left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1544
                /*
2087 jermar 1545
                 * The interval is contained in the rightmost
1546
                 * interval of the left neighbour and can be
1547
                 * removed by updating the size of the bigger
1548
                 * interval.
1387 jermar 1549
                 */
1403 jermar 1550
                node->value[node->keys - 1] -= count;
1387 jermar 1551
                return 1;
2087 jermar 1552
            } else if (page + count * PAGE_SIZE <
1553
                left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1554
                count_t new_cnt;
1387 jermar 1555
 
1556
                /*
2087 jermar 1557
                 * The interval is contained in the rightmost
1558
                 * interval of the left neighbour but its
1559
                 * removal requires both updating the size of
1560
                 * the original interval and also inserting a
1561
                 * new interval.
1387 jermar 1562
                 */
2087 jermar 1563
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1564
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1565
                node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1566
                btree_insert(&a->used_space, page +
1567
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1568
                return 1;
1569
            }
1570
        }
1571
        return 0;
1572
    } else if (page < leaf->key[0]) {
1573
        return 0;
1574
    }
1575
 
1576
    if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1577
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1578
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1579
 
2087 jermar 1580
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1581
            count * PAGE_SIZE)) {
1582
            if (page + count * PAGE_SIZE ==
1583
                left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1584
                /*
2087 jermar 1585
                 * The interval is contained in the rightmost
1586
                 * interval of the leaf and can be removed by
1587
                 * updating the size of the bigger interval.
1387 jermar 1588
                 */
1403 jermar 1589
                leaf->value[leaf->keys - 1] -= count;
1387 jermar 1590
                return 1;
2087 jermar 1591
            } else if (page + count * PAGE_SIZE < left_pg +
1592
                left_cnt * PAGE_SIZE) {
1403 jermar 1593
                count_t new_cnt;
1387 jermar 1594
 
1595
                /*
2087 jermar 1596
                 * The interval is contained in the rightmost
1597
                 * interval of the leaf but its removal
1598
                 * requires both updating the size of the
1599
                 * original interval and also inserting a new
1600
                 * interval.
1387 jermar 1601
                 */
2087 jermar 1602
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1603
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1604
                leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1605
                btree_insert(&a->used_space, page +
1606
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1607
                return 1;
1608
            }
1609
        }
1610
        return 0;
1611
    }  
1612
 
1613
    /*
1614
     * The border cases have been already resolved.
1615
     * Now the interval can be only between intervals of the leaf.
1616
     */
1617
    for (i = 1; i < leaf->keys - 1; i++) {
1618
        if (page < leaf->key[i]) {
1780 jermar 1619
            uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1620
            count_t left_cnt = (count_t) leaf->value[i - 1];
1621
 
1622
            /*
2087 jermar 1623
             * Now the interval is between intervals corresponding
1624
             * to (i - 1) and i.
1387 jermar 1625
             */
2087 jermar 1626
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1627
                count * PAGE_SIZE)) {
1628
                if (page + count * PAGE_SIZE ==
1629
                    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1630
                    /*
2087 jermar 1631
                     * The interval is contained in the
1632
                     * interval (i - 1) of the leaf and can
1633
                     * be removed by updating the size of
1634
                     * the bigger interval.
1387 jermar 1635
                     */
1403 jermar 1636
                    leaf->value[i - 1] -= count;
1387 jermar 1637
                    return 1;
2087 jermar 1638
                } else if (page + count * PAGE_SIZE <
1639
                    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1640
                    count_t new_cnt;
1387 jermar 1641
 
1642
                    /*
2087 jermar 1643
                     * The interval is contained in the
1644
                     * interval (i - 1) of the leaf but its
1645
                     * removal requires both updating the
1646
                     * size of the original interval and
1387 jermar 1647
                     * also inserting a new interval.
1648
                     */
2087 jermar 1649
                    new_cnt = ((left_pg +
1650
                        left_cnt * PAGE_SIZE) -
1651
                        (page + count * PAGE_SIZE)) >>
1652
                        PAGE_WIDTH;
1403 jermar 1653
                    leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1654
                    btree_insert(&a->used_space, page +
1655
                        count * PAGE_SIZE, (void *) new_cnt,
1656
                        leaf);
1387 jermar 1657
                    return 1;
1658
                }
1659
            }
1660
            return 0;
1661
        }
1662
    }
1663
 
1664
error:
2087 jermar 1665
    panic("Inconsistency detected while removing %d pages of used space "
1666
        "from %p.\n", count, page);
1387 jermar 1667
}
1668
 
1409 jermar 1669
/** Remove reference to address space area share info.
1670
 *
1671
 * If the reference count drops to 0, the sh_info is deallocated.
1672
 *
1673
 * @param sh_info Pointer to address space area share info.
1674
 */
1675
void sh_info_remove_reference(share_info_t *sh_info)
1676
{
1677
    bool dealloc = false;
1678
 
1679
    mutex_lock(&sh_info->lock);
1680
    ASSERT(sh_info->refcount);
1681
    if (--sh_info->refcount == 0) {
1682
        dealloc = true;
1495 jermar 1683
        link_t *cur;
1409 jermar 1684
 
1685
        /*
1686
         * Now walk carefully the pagemap B+tree and free/remove
1687
         * reference from all frames found there.
1688
         */
2087 jermar 1689
        for (cur = sh_info->pagemap.leaf_head.next;
1690
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1691
            btree_node_t *node;
1495 jermar 1692
            int i;
1409 jermar 1693
 
1495 jermar 1694
            node = list_get_instance(cur, btree_node_t, leaf_link);
1695
            for (i = 0; i < node->keys; i++)
1780 jermar 1696
                frame_free((uintptr_t) node->value[i]);
1409 jermar 1697
        }
1698
 
1699
    }
1700
    mutex_unlock(&sh_info->lock);
1701
 
1702
    if (dealloc) {
1703
        btree_destroy(&sh_info->pagemap);
1704
        free(sh_info);
1705
    }
1706
}
1707
 
1235 jermar 1708
/*
1709
 * Address space related syscalls.
1710
 */
1711
 
1712
/** Wrapper for as_area_create(). */
1780 jermar 1713
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1714
{
2087 jermar 1715
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1716
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1717
        return (unative_t) address;
1235 jermar 1718
    else
1780 jermar 1719
        return (unative_t) -1;
1235 jermar 1720
}
1721
 
1793 jermar 1722
/** Wrapper for as_area_resize(). */
1780 jermar 1723
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1724
{
1780 jermar 1725
    return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1726
}
1727
 
1793 jermar 1728
/** Wrapper for as_area_destroy(). */
1780 jermar 1729
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1730
{
1780 jermar 1731
    return (unative_t) as_area_destroy(AS, address);
1306 jermar 1732
}
1702 cejka 1733
 
1914 jermar 1734
/** Print out information about address space.
1735
 *
1736
 * @param as Address space.
1737
 */
1738
void as_print(as_t *as)
1739
{
1740
    ipl_t ipl;
1741
 
1742
    ipl = interrupts_disable();
1743
    mutex_lock(&as->lock);
1744
 
1745
    /* print out info about address space areas */
1746
    link_t *cur;
2087 jermar 1747
    for (cur = as->as_area_btree.leaf_head.next;
1748
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1749
        btree_node_t *node;
1914 jermar 1750
 
2087 jermar 1751
        node = list_get_instance(cur, btree_node_t, leaf_link);
1752
 
1914 jermar 1753
        int i;
1754
        for (i = 0; i < node->keys; i++) {
1915 jermar 1755
            as_area_t *area = node->value[i];
1914 jermar 1756
 
1757
            mutex_lock(&area->lock);
1758
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
2087 jermar 1759
                area, area->base, area->pages, area->base,
1760
                area->base + area->pages*PAGE_SIZE);
1914 jermar 1761
            mutex_unlock(&area->lock);
1762
        }
1763
    }
1764
 
1765
    mutex_unlock(&as->lock);
1766
    interrupts_restore(ipl);
1767
}
1768
 
1757 jermar 1769
/** @}
1702 cejka 1770
 */