Subversion Repositories HelenOS

Rev

Rev 2133 | Rev 2170 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief   Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
1288 jermar 77
#include <syscall/copy.h>
78
#include <arch/interrupt.h>
703 jermar 79
 
2009 jermar 80
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#include <arch/mm/cache.h>
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
 
2125 decky 84
#ifndef __OBJC__
1757 jermar 85
/**
86
 * Each architecture decides what functions will be used to carry out
87
 * address space operations such as creating or locking page tables.
88
 */
756 jermar 89
as_operations_t *as_operations = NULL;
703 jermar 90
 
1890 jermar 91
/**
92
 * Slab for as_t objects.
93
 */
94
static slab_cache_t *as_slab;
2126 decky 95
#endif
1890 jermar 96
 
2087 jermar 97
/**
98
 * This lock protects inactive_as_with_asid_head list. It must be acquired
99
 * before as_t mutex.
100
 */
1415 jermar 101
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 102
 
103
/**
104
 * This list contains address spaces that are not active on any
105
 * processor and that have valid ASID.
106
 */
107
LIST_INITIALIZE(inactive_as_with_asid_head);
108
 
757 jermar 109
/** Kernel address space. */
110
as_t *AS_KERNEL = NULL;
111
 
1235 jermar 112
static int area_flags_to_page_flags(int aflags);
1780 jermar 113
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 114
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
115
    as_area_t *avoid_area);
1409 jermar 116
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 117
 
2126 decky 118
#ifndef __OBJC__
1891 jermar 119
static int as_constructor(void *obj, int flags)
120
{
121
    as_t *as = (as_t *) obj;
122
    int rc;
123
 
124
    link_initialize(&as->inactive_as_with_asid_link);
125
    mutex_initialize(&as->lock);   
126
 
127
    rc = as_constructor_arch(as, flags);
128
 
129
    return rc;
130
}
131
 
132
static int as_destructor(void *obj)
133
{
134
    as_t *as = (as_t *) obj;
135
 
136
    return as_destructor_arch(as);
137
}
2126 decky 138
#endif
1891 jermar 139
 
756 jermar 140
/** Initialize address space subsystem. */
141
void as_init(void)
142
{
143
    as_arch_init();
2126 decky 144
 
145
#ifndef __OBJC__
1891 jermar 146
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 147
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
2126 decky 148
#endif
1890 jermar 149
 
789 palkovsky 150
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 151
    if (!AS_KERNEL)
152
        panic("can't create kernel address space\n");
153
 
756 jermar 154
}
155
 
757 jermar 156
/** Create address space.
157
 *
158
 * @param flags Flags that influence way in wich the address space is created.
159
 */
756 jermar 160
as_t *as_create(int flags)
703 jermar 161
{
162
    as_t *as;
163
 
2126 decky 164
#ifdef __OBJC__
165
    as = [as_t new];
166
    link_initialize(&as->inactive_as_with_asid_link);
167
    mutex_initialize(&as->lock);   
168
    (void) as_constructor_arch(as, flags);
169
#else
1890 jermar 170
    as = (as_t *) slab_alloc(as_slab, 0);
2126 decky 171
#endif
1891 jermar 172
    (void) as_create_arch(as, 0);
173
 
1147 jermar 174
    btree_create(&as->as_area_btree);
822 palkovsky 175
 
176
    if (flags & FLAG_AS_KERNEL)
177
        as->asid = ASID_KERNEL;
178
    else
179
        as->asid = ASID_INVALID;
180
 
1468 jermar 181
    as->refcount = 0;
1415 jermar 182
    as->cpu_refcount = 0;
2089 decky 183
#ifdef AS_PAGE_TABLE
2106 jermar 184
    as->genarch.page_table = page_table_create(flags);
2089 decky 185
#else
186
    page_table_create(flags);
187
#endif
703 jermar 188
 
189
    return as;
190
}
191
 
1468 jermar 192
/** Destroy adress space.
193
 *
2087 jermar 194
 * When there are no tasks referencing this address space (i.e. its refcount is
195
 * zero), the address space can be destroyed.
1468 jermar 196
 */
197
void as_destroy(as_t *as)
973 palkovsky 198
{
1468 jermar 199
    ipl_t ipl;
1594 jermar 200
    bool cond;
973 palkovsky 201
 
1468 jermar 202
    ASSERT(as->refcount == 0);
203
 
204
    /*
205
     * Since there is no reference to this area,
206
     * it is safe not to lock its mutex.
207
     */
208
    ipl = interrupts_disable();
209
    spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 210
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 211
        if (as != AS && as->cpu_refcount == 0)
1587 jermar 212
            list_remove(&as->inactive_as_with_asid_link);
1468 jermar 213
        asid_put(as->asid);
214
    }
215
    spinlock_unlock(&inactive_as_with_asid_lock);
216
 
217
    /*
218
     * Destroy address space areas of the address space.
1954 jermar 219
     * The B+tree must be walked carefully because it is
1594 jermar 220
     * also being destroyed.
1468 jermar 221
     */
1594 jermar 222
    for (cond = true; cond; ) {
1468 jermar 223
        btree_node_t *node;
1594 jermar 224
 
225
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 226
        node = list_get_instance(as->as_area_btree.leaf_head.next,
227
            btree_node_t, leaf_link);
1594 jermar 228
 
229
        if ((cond = node->keys)) {
230
            as_area_destroy(as, node->key[0]);
231
        }
1468 jermar 232
    }
1495 jermar 233
 
1483 jermar 234
    btree_destroy(&as->as_area_btree);
2089 decky 235
#ifdef AS_PAGE_TABLE
2106 jermar 236
    page_table_destroy(as->genarch.page_table);
2089 decky 237
#else
238
    page_table_destroy(NULL);
239
#endif
1468 jermar 240
 
241
    interrupts_restore(ipl);
2126 decky 242
 
243
#ifdef __OBJC__
244
    [as free];
245
#else
1890 jermar 246
    slab_free(as_slab, as);
2126 decky 247
#endif
973 palkovsky 248
}
249
 
703 jermar 250
/** Create address space area of common attributes.
251
 *
252
 * The created address space area is added to the target address space.
253
 *
254
 * @param as Target address space.
1239 jermar 255
 * @param flags Flags of the area memory.
1048 jermar 256
 * @param size Size of area.
703 jermar 257
 * @param base Base address of area.
1239 jermar 258
 * @param attrs Attributes of the area.
1409 jermar 259
 * @param backend Address space area backend. NULL if no backend is used.
260
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 261
 *
262
 * @return Address space area on success or NULL on failure.
263
 */
2069 jermar 264
as_area_t *
265
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 266
           mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 267
{
268
    ipl_t ipl;
269
    as_area_t *a;
270
 
271
    if (base % PAGE_SIZE)
1048 jermar 272
        return NULL;
273
 
1233 jermar 274
    if (!size)
275
        return NULL;
276
 
1048 jermar 277
    /* Writeable executable areas are not supported. */
278
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
279
        return NULL;
703 jermar 280
 
281
    ipl = interrupts_disable();
1380 jermar 282
    mutex_lock(&as->lock);
703 jermar 283
 
1048 jermar 284
    if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 285
        mutex_unlock(&as->lock);
1048 jermar 286
        interrupts_restore(ipl);
287
        return NULL;
288
    }
703 jermar 289
 
822 palkovsky 290
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 291
 
1380 jermar 292
    mutex_initialize(&a->lock);
822 palkovsky 293
 
1424 jermar 294
    a->as = as;
1026 jermar 295
    a->flags = flags;
1239 jermar 296
    a->attributes = attrs;
1048 jermar 297
    a->pages = SIZE2FRAMES(size);
822 palkovsky 298
    a->base = base;
1409 jermar 299
    a->sh_info = NULL;
300
    a->backend = backend;
1424 jermar 301
    if (backend_data)
302
        a->backend_data = *backend_data;
303
    else
2087 jermar 304
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
305
            0);
1424 jermar 306
 
1387 jermar 307
    btree_create(&a->used_space);
822 palkovsky 308
 
1147 jermar 309
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 310
 
1380 jermar 311
    mutex_unlock(&as->lock);
703 jermar 312
    interrupts_restore(ipl);
704 jermar 313
 
703 jermar 314
    return a;
315
}
316
 
1235 jermar 317
/** Find address space area and change it.
318
 *
319
 * @param as Address space.
2087 jermar 320
 * @param address Virtual address belonging to the area to be changed. Must be
321
 *     page-aligned.
1235 jermar 322
 * @param size New size of the virtual memory block starting at address.
323
 * @param flags Flags influencing the remap operation. Currently unused.
324
 *
1306 jermar 325
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 326
 */
1780 jermar 327
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 328
{
1306 jermar 329
    as_area_t *area;
1235 jermar 330
    ipl_t ipl;
331
    size_t pages;
332
 
333
    ipl = interrupts_disable();
1380 jermar 334
    mutex_lock(&as->lock);
1235 jermar 335
 
336
    /*
337
     * Locate the area.
338
     */
339
    area = find_area_and_lock(as, address);
340
    if (!area) {
1380 jermar 341
        mutex_unlock(&as->lock);
1235 jermar 342
        interrupts_restore(ipl);
1306 jermar 343
        return ENOENT;
1235 jermar 344
    }
345
 
1424 jermar 346
    if (area->backend == &phys_backend) {
1235 jermar 347
        /*
348
         * Remapping of address space areas associated
349
         * with memory mapped devices is not supported.
350
         */
1380 jermar 351
        mutex_unlock(&area->lock);
352
        mutex_unlock(&as->lock);
1235 jermar 353
        interrupts_restore(ipl);
1306 jermar 354
        return ENOTSUP;
1235 jermar 355
    }
1409 jermar 356
    if (area->sh_info) {
357
        /*
358
         * Remapping of shared address space areas
359
         * is not supported.
360
         */
361
        mutex_unlock(&area->lock);
362
        mutex_unlock(&as->lock);
363
        interrupts_restore(ipl);
364
        return ENOTSUP;
365
    }
1235 jermar 366
 
367
    pages = SIZE2FRAMES((address - area->base) + size);
368
    if (!pages) {
369
        /*
370
         * Zero size address space areas are not allowed.
371
         */
1380 jermar 372
        mutex_unlock(&area->lock);
373
        mutex_unlock(&as->lock);
1235 jermar 374
        interrupts_restore(ipl);
1306 jermar 375
        return EPERM;
1235 jermar 376
    }
377
 
378
    if (pages < area->pages) {
1403 jermar 379
        bool cond;
1780 jermar 380
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 381
 
382
        /*
383
         * Shrinking the area.
384
         * No need to check for overlaps.
385
         */
1403 jermar 386
 
387
        /*
1436 jermar 388
         * Start TLB shootdown sequence.
389
         */
2087 jermar 390
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
391
            pages * PAGE_SIZE, area->pages - pages);
1436 jermar 392
 
393
        /*
1403 jermar 394
         * Remove frames belonging to used space starting from
395
         * the highest addresses downwards until an overlap with
396
         * the resized address space area is found. Note that this
397
         * is also the right way to remove part of the used_space
398
         * B+tree leaf list.
399
         */    
400
        for (cond = true; cond;) {
401
            btree_node_t *node;
402
 
403
            ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 404
            node =
405
                list_get_instance(area->used_space.leaf_head.prev,
406
                btree_node_t, leaf_link);
1403 jermar 407
            if ((cond = (bool) node->keys)) {
1780 jermar 408
                uintptr_t b = node->key[node->keys - 1];
2087 jermar 409
                count_t c =
410
                    (count_t) node->value[node->keys - 1];
1403 jermar 411
                int i = 0;
1235 jermar 412
 
2087 jermar 413
                if (overlaps(b, c * PAGE_SIZE, area->base,
2133 jermar 414
                    pages * PAGE_SIZE)) {
1403 jermar 415
 
2087 jermar 416
                    if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 417
                        /*
2087 jermar 418
                         * The whole interval fits
419
                         * completely in the resized
420
                         * address space area.
1403 jermar 421
                         */
422
                        break;
423
                    }
424
 
425
                    /*
2087 jermar 426
                     * Part of the interval corresponding
427
                     * to b and c overlaps with the resized
428
                     * address space area.
1403 jermar 429
                     */
430
 
431
                    cond = false;   /* we are almost done */
432
                    i = (start_free - b) >> PAGE_WIDTH;
2087 jermar 433
                    if (!used_space_remove(area, start_free,
434
                        c - i))
435
                        panic("Could not remove used "
436
                            "space.\n");
1403 jermar 437
                } else {
438
                    /*
2087 jermar 439
                     * The interval of used space can be
440
                     * completely removed.
1403 jermar 441
                     */
442
                    if (!used_space_remove(area, b, c))
2087 jermar 443
                        panic("Could not remove used "
444
                            "space.\n");
1403 jermar 445
                }
446
 
447
                for (; i < c; i++) {
448
                    pte_t *pte;
449
 
450
                    page_table_lock(as, false);
2087 jermar 451
                    pte = page_mapping_find(as, b +
452
                        i * PAGE_SIZE);
453
                    ASSERT(pte && PTE_VALID(pte) &&
454
                        PTE_PRESENT(pte));
455
                    if (area->backend &&
456
                        area->backend->frame_free) {
1424 jermar 457
                        area->backend->frame_free(area,
2087 jermar 458
                            b + i * PAGE_SIZE,
459
                            PTE_GET_FRAME(pte));
1409 jermar 460
                    }
2087 jermar 461
                    page_mapping_remove(as, b +
462
                        i * PAGE_SIZE);
1403 jermar 463
                    page_table_unlock(as, false);
464
                }
1235 jermar 465
            }
466
        }
1436 jermar 467
 
1235 jermar 468
        /*
1436 jermar 469
         * Finish TLB shootdown sequence.
1235 jermar 470
         */
2087 jermar 471
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
472
            area->pages - pages);
1235 jermar 473
        tlb_shootdown_finalize();
1889 jermar 474
 
475
        /*
476
         * Invalidate software translation caches (e.g. TSB on sparc64).
477
         */
2087 jermar 478
        as_invalidate_translation_cache(as, area->base +
479
            pages * PAGE_SIZE, area->pages - pages);
1235 jermar 480
    } else {
481
        /*
482
         * Growing the area.
483
         * Check for overlaps with other address space areas.
484
         */
2087 jermar 485
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
486
            area)) {
1380 jermar 487
            mutex_unlock(&area->lock);
488
            mutex_unlock(&as->lock);       
1235 jermar 489
            interrupts_restore(ipl);
1306 jermar 490
            return EADDRNOTAVAIL;
1235 jermar 491
        }
492
    }
493
 
494
    area->pages = pages;
495
 
1380 jermar 496
    mutex_unlock(&area->lock);
497
    mutex_unlock(&as->lock);
1235 jermar 498
    interrupts_restore(ipl);
499
 
1306 jermar 500
    return 0;
1235 jermar 501
}
502
 
1306 jermar 503
/** Destroy address space area.
504
 *
505
 * @param as Address space.
506
 * @param address Address withing the area to be deleted.
507
 *
508
 * @return Zero on success or a value from @ref errno.h on failure.
509
 */
1780 jermar 510
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 511
{
512
    as_area_t *area;
1780 jermar 513
    uintptr_t base;
1495 jermar 514
    link_t *cur;
1306 jermar 515
    ipl_t ipl;
516
 
517
    ipl = interrupts_disable();
1380 jermar 518
    mutex_lock(&as->lock);
1306 jermar 519
 
520
    area = find_area_and_lock(as, address);
521
    if (!area) {
1380 jermar 522
        mutex_unlock(&as->lock);
1306 jermar 523
        interrupts_restore(ipl);
524
        return ENOENT;
525
    }
526
 
1403 jermar 527
    base = area->base;
528
 
1411 jermar 529
    /*
1436 jermar 530
     * Start TLB shootdown sequence.
531
     */
1889 jermar 532
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 533
 
534
    /*
1411 jermar 535
     * Visit only the pages mapped by used_space B+tree.
536
     */
2087 jermar 537
    for (cur = area->used_space.leaf_head.next;
538
        cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 539
        btree_node_t *node;
1495 jermar 540
        int i;
1403 jermar 541
 
1495 jermar 542
        node = list_get_instance(cur, btree_node_t, leaf_link);
543
        for (i = 0; i < node->keys; i++) {
1780 jermar 544
            uintptr_t b = node->key[i];
1495 jermar 545
            count_t j;
1411 jermar 546
            pte_t *pte;
1403 jermar 547
 
1495 jermar 548
            for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 549
                page_table_lock(as, false);
2087 jermar 550
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
551
                ASSERT(pte && PTE_VALID(pte) &&
552
                    PTE_PRESENT(pte));
553
                if (area->backend &&
554
                    area->backend->frame_free) {
555
                    area->backend->frame_free(area, b +
2133 jermar 556
                        j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 557
                }
2087 jermar 558
                page_mapping_remove(as, b + j * PAGE_SIZE);            
1411 jermar 559
                page_table_unlock(as, false);
1306 jermar 560
            }
561
        }
562
    }
1403 jermar 563
 
1306 jermar 564
    /*
1436 jermar 565
     * Finish TLB shootdown sequence.
1306 jermar 566
     */
1889 jermar 567
    tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 568
    tlb_shootdown_finalize();
1436 jermar 569
 
1889 jermar 570
    /*
2087 jermar 571
     * Invalidate potential software translation caches (e.g. TSB on
572
     * sparc64).
1889 jermar 573
     */
574
    as_invalidate_translation_cache(as, area->base, area->pages);
575
 
1436 jermar 576
    btree_destroy(&area->used_space);
1306 jermar 577
 
1309 jermar 578
    area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 579
 
580
    if (area->sh_info)
581
        sh_info_remove_reference(area->sh_info);
582
 
1380 jermar 583
    mutex_unlock(&area->lock);
1306 jermar 584
 
585
    /*
586
     * Remove the empty area from address space.
587
     */
1889 jermar 588
    btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 589
 
1309 jermar 590
    free(area);
591
 
1889 jermar 592
    mutex_unlock(&as->lock);
1306 jermar 593
    interrupts_restore(ipl);
594
    return 0;
595
}
596
 
1413 jermar 597
/** Share address space area with another or the same address space.
1235 jermar 598
 *
1424 jermar 599
 * Address space area mapping is shared with a new address space area.
600
 * If the source address space area has not been shared so far,
601
 * a new sh_info is created. The new address space area simply gets the
602
 * sh_info of the source area. The process of duplicating the
603
 * mapping is done through the backend share function.
1413 jermar 604
 *
1417 jermar 605
 * @param src_as Pointer to source address space.
1239 jermar 606
 * @param src_base Base address of the source address space area.
1417 jermar 607
 * @param acc_size Expected size of the source area.
1428 palkovsky 608
 * @param dst_as Pointer to destination address space.
1417 jermar 609
 * @param dst_base Target base address.
610
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 611
 *
2007 jermar 612
 * @return Zero on success or ENOENT if there is no such task or if there is no
613
 * such address space area, EPERM if there was a problem in accepting the area
614
 * or ENOMEM if there was a problem in allocating destination address space
615
 * area. ENOTSUP is returned if the address space area backend does not support
2141 jermar 616
 * sharing.
1235 jermar 617
 */
1780 jermar 618
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
619
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 620
{
621
    ipl_t ipl;
1239 jermar 622
    int src_flags;
623
    size_t src_size;
624
    as_area_t *src_area, *dst_area;
1413 jermar 625
    share_info_t *sh_info;
1424 jermar 626
    mem_backend_t *src_backend;
627
    mem_backend_data_t src_backend_data;
1434 palkovsky 628
 
1235 jermar 629
    ipl = interrupts_disable();
1380 jermar 630
    mutex_lock(&src_as->lock);
1329 palkovsky 631
    src_area = find_area_and_lock(src_as, src_base);
1239 jermar 632
    if (!src_area) {
1238 jermar 633
        /*
634
         * Could not find the source address space area.
635
         */
1380 jermar 636
        mutex_unlock(&src_as->lock);
1238 jermar 637
        interrupts_restore(ipl);
638
        return ENOENT;
639
    }
2007 jermar 640
 
1424 jermar 641
    if (!src_area->backend || !src_area->backend->share) {
1413 jermar 642
        /*
1851 jermar 643
         * There is no backend or the backend does not
1424 jermar 644
         * know how to share the area.
1413 jermar 645
         */
646
        mutex_unlock(&src_area->lock);
647
        mutex_unlock(&src_as->lock);
648
        interrupts_restore(ipl);
649
        return ENOTSUP;
650
    }
651
 
1239 jermar 652
    src_size = src_area->pages * PAGE_SIZE;
653
    src_flags = src_area->flags;
1424 jermar 654
    src_backend = src_area->backend;
655
    src_backend_data = src_area->backend_data;
1544 palkovsky 656
 
657
    /* Share the cacheable flag from the original mapping */
658
    if (src_flags & AS_AREA_CACHEABLE)
659
        dst_flags_mask |= AS_AREA_CACHEABLE;
660
 
2087 jermar 661
    if (src_size != acc_size ||
662
        (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 663
        mutex_unlock(&src_area->lock);
664
        mutex_unlock(&src_as->lock);
1235 jermar 665
        interrupts_restore(ipl);
666
        return EPERM;
667
    }
1413 jermar 668
 
1235 jermar 669
    /*
1413 jermar 670
     * Now we are committed to sharing the area.
1954 jermar 671
     * First, prepare the area for sharing.
1413 jermar 672
     * Then it will be safe to unlock it.
673
     */
674
    sh_info = src_area->sh_info;
675
    if (!sh_info) {
676
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
677
        mutex_initialize(&sh_info->lock);
678
        sh_info->refcount = 2;
679
        btree_create(&sh_info->pagemap);
680
        src_area->sh_info = sh_info;
681
    } else {
682
        mutex_lock(&sh_info->lock);
683
        sh_info->refcount++;
684
        mutex_unlock(&sh_info->lock);
685
    }
686
 
1424 jermar 687
    src_area->backend->share(src_area);
1413 jermar 688
 
689
    mutex_unlock(&src_area->lock);
690
    mutex_unlock(&src_as->lock);
691
 
692
    /*
1239 jermar 693
     * Create copy of the source address space area.
694
     * The destination area is created with AS_AREA_ATTR_PARTIAL
695
     * attribute set which prevents race condition with
696
     * preliminary as_page_fault() calls.
1417 jermar 697
     * The flags of the source area are masked against dst_flags_mask
698
     * to support sharing in less privileged mode.
1235 jermar 699
     */
1461 palkovsky 700
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 701
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 702
    if (!dst_area) {
1235 jermar 703
        /*
704
         * Destination address space area could not be created.
705
         */
1413 jermar 706
        sh_info_remove_reference(sh_info);
707
 
1235 jermar 708
        interrupts_restore(ipl);
709
        return ENOMEM;
710
    }
2009 jermar 711
 
1235 jermar 712
    /*
1239 jermar 713
     * Now the destination address space area has been
714
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 715
     * attribute and set the sh_info.
1239 jermar 716
     */
2009 jermar 717
    mutex_lock(&dst_as->lock); 
1380 jermar 718
    mutex_lock(&dst_area->lock);
1239 jermar 719
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 720
    dst_area->sh_info = sh_info;
1380 jermar 721
    mutex_unlock(&dst_area->lock);
2009 jermar 722
    mutex_unlock(&dst_as->lock);   
723
 
1235 jermar 724
    interrupts_restore(ipl);
725
 
726
    return 0;
727
}
728
 
1423 jermar 729
/** Check access mode for address space area.
730
 *
731
 * The address space area must be locked prior to this call.
732
 *
733
 * @param area Address space area.
734
 * @param access Access mode.
735
 *
736
 * @return False if access violates area's permissions, true otherwise.
737
 */
738
bool as_area_check_access(as_area_t *area, pf_access_t access)
739
{
740
    int flagmap[] = {
741
        [PF_ACCESS_READ] = AS_AREA_READ,
742
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
743
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
744
    };
745
 
746
    if (!(area->flags & flagmap[access]))
747
        return false;
748
 
749
    return true;
750
}
751
 
703 jermar 752
/** Handle page fault within the current address space.
753
 *
1409 jermar 754
 * This is the high-level page fault handler. It decides
755
 * whether the page fault can be resolved by any backend
756
 * and if so, it invokes the backend to resolve the page
757
 * fault.
758
 *
703 jermar 759
 * Interrupts are assumed disabled.
760
 *
761
 * @param page Faulting page.
1411 jermar 762
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 763
 * @param istate Pointer to interrupted state.
703 jermar 764
 *
1409 jermar 765
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
766
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 767
 */
1780 jermar 768
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 769
{
1044 jermar 770
    pte_t *pte;
977 jermar 771
    as_area_t *area;
703 jermar 772
 
1380 jermar 773
    if (!THREAD)
1409 jermar 774
        return AS_PF_FAULT;
1380 jermar 775
 
703 jermar 776
    ASSERT(AS);
1044 jermar 777
 
1380 jermar 778
    mutex_lock(&AS->lock);
977 jermar 779
    area = find_area_and_lock(AS, page);   
703 jermar 780
    if (!area) {
781
        /*
782
         * No area contained mapping for 'page'.
783
         * Signal page fault to low-level handler.
784
         */
1380 jermar 785
        mutex_unlock(&AS->lock);
1288 jermar 786
        goto page_fault;
703 jermar 787
    }
788
 
1239 jermar 789
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
790
        /*
791
         * The address space area is not fully initialized.
792
         * Avoid possible race by returning error.
793
         */
1380 jermar 794
        mutex_unlock(&area->lock);
795
        mutex_unlock(&AS->lock);
1288 jermar 796
        goto page_fault;       
1239 jermar 797
    }
798
 
1424 jermar 799
    if (!area->backend || !area->backend->page_fault) {
1409 jermar 800
        /*
801
         * The address space area is not backed by any backend
802
         * or the backend cannot handle page faults.
803
         */
804
        mutex_unlock(&area->lock);
805
        mutex_unlock(&AS->lock);
806
        goto page_fault;       
807
    }
1179 jermar 808
 
1044 jermar 809
    page_table_lock(AS, false);
810
 
703 jermar 811
    /*
1044 jermar 812
     * To avoid race condition between two page faults
813
     * on the same address, we need to make sure
814
     * the mapping has not been already inserted.
815
     */
816
    if ((pte = page_mapping_find(AS, page))) {
817
        if (PTE_PRESENT(pte)) {
1423 jermar 818
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 819
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
820
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 821
                page_table_unlock(AS, false);
822
                mutex_unlock(&area->lock);
823
                mutex_unlock(&AS->lock);
824
                return AS_PF_OK;
825
            }
1044 jermar 826
        }
827
    }
1409 jermar 828
 
1044 jermar 829
    /*
1409 jermar 830
     * Resort to the backend page fault handler.
703 jermar 831
     */
1424 jermar 832
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 833
        page_table_unlock(AS, false);
834
        mutex_unlock(&area->lock);
835
        mutex_unlock(&AS->lock);
836
        goto page_fault;
837
    }
703 jermar 838
 
1044 jermar 839
    page_table_unlock(AS, false);
1380 jermar 840
    mutex_unlock(&area->lock);
841
    mutex_unlock(&AS->lock);
1288 jermar 842
    return AS_PF_OK;
843
 
844
page_fault:
845
    if (THREAD->in_copy_from_uspace) {
846
        THREAD->in_copy_from_uspace = false;
2087 jermar 847
        istate_set_retaddr(istate,
848
            (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 849
    } else if (THREAD->in_copy_to_uspace) {
850
        THREAD->in_copy_to_uspace = false;
2087 jermar 851
        istate_set_retaddr(istate,
852
            (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 853
    } else {
854
        return AS_PF_FAULT;
855
    }
856
 
857
    return AS_PF_DEFER;
703 jermar 858
}
859
 
823 jermar 860
/** Switch address spaces.
703 jermar 861
 *
1380 jermar 862
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 863
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 864
 *
823 jermar 865
 * @param old Old address space or NULL.
866
 * @param new New address space.
703 jermar 867
 */
2106 jermar 868
void as_switch(as_t *old_as, as_t *new_as)
703 jermar 869
{
870
    ipl_t ipl;
823 jermar 871
    bool needs_asid = false;
703 jermar 872
 
873
    ipl = interrupts_disable();
1415 jermar 874
    spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 875
 
876
    /*
823 jermar 877
     * First, take care of the old address space.
878
     */
2106 jermar 879
    if (old_as) {
880
        mutex_lock_active(&old_as->lock);
881
        ASSERT(old_as->cpu_refcount);
882
        if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
823 jermar 883
            /*
884
             * The old address space is no longer active on
885
             * any processor. It can be appended to the
886
             * list of inactive address spaces with assigned
887
             * ASID.
888
             */
2141 jermar 889
            ASSERT(old_as->asid != ASID_INVALID);
890
            list_append(&old_as->inactive_as_with_asid_link,
891
                &inactive_as_with_asid_head);
823 jermar 892
        }
2106 jermar 893
        mutex_unlock(&old_as->lock);
1890 jermar 894
 
895
        /*
896
         * Perform architecture-specific tasks when the address space
897
         * is being removed from the CPU.
898
         */
2106 jermar 899
        as_deinstall_arch(old_as);
823 jermar 900
    }
901
 
902
    /*
903
     * Second, prepare the new address space.
904
     */
2106 jermar 905
    mutex_lock_active(&new_as->lock);
906
    if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
907
        if (new_as->asid != ASID_INVALID) {
908
            list_remove(&new_as->inactive_as_with_asid_link);
2087 jermar 909
        } else {
910
            /*
2106 jermar 911
             * Defer call to asid_get() until new_as->lock is released.
2087 jermar 912
             */
913
            needs_asid = true;
914
        }
823 jermar 915
    }
2106 jermar 916
#ifdef AS_PAGE_TABLE
917
    SET_PTL0_ADDRESS(new_as->genarch.page_table);
918
#endif
919
    mutex_unlock(&new_as->lock);
823 jermar 920
 
921
    if (needs_asid) {
922
        /*
923
         * Allocation of new ASID was deferred
924
         * until now in order to avoid deadlock.
925
         */
926
        asid_t asid;
927
 
928
        asid = asid_get();
2106 jermar 929
        mutex_lock_active(&new_as->lock);
930
        new_as->asid = asid;
931
        mutex_unlock(&new_as->lock);
823 jermar 932
    }
1415 jermar 933
    spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 934
    interrupts_restore(ipl);
935
 
936
    /*
703 jermar 937
     * Perform architecture-specific steps.
727 jermar 938
     * (e.g. write ASID to hardware register etc.)
703 jermar 939
     */
2106 jermar 940
    as_install_arch(new_as);
703 jermar 941
 
2106 jermar 942
    AS = new_as;
703 jermar 943
}
754 jermar 944
 
1235 jermar 945
/** Convert address space area flags to page flags.
754 jermar 946
 *
1235 jermar 947
 * @param aflags Flags of some address space area.
754 jermar 948
 *
1235 jermar 949
 * @return Flags to be passed to page_mapping_insert().
754 jermar 950
 */
1235 jermar 951
int area_flags_to_page_flags(int aflags)
754 jermar 952
{
953
    int flags;
954
 
1178 jermar 955
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 956
 
1235 jermar 957
    if (aflags & AS_AREA_READ)
1026 jermar 958
        flags |= PAGE_READ;
959
 
1235 jermar 960
    if (aflags & AS_AREA_WRITE)
1026 jermar 961
        flags |= PAGE_WRITE;
962
 
1235 jermar 963
    if (aflags & AS_AREA_EXEC)
1026 jermar 964
        flags |= PAGE_EXEC;
965
 
1424 jermar 966
    if (aflags & AS_AREA_CACHEABLE)
1178 jermar 967
        flags |= PAGE_CACHEABLE;
968
 
754 jermar 969
    return flags;
970
}
756 jermar 971
 
1235 jermar 972
/** Compute flags for virtual address translation subsytem.
973
 *
974
 * The address space area must be locked.
975
 * Interrupts must be disabled.
976
 *
977
 * @param a Address space area.
978
 *
979
 * @return Flags to be used in page_mapping_insert().
980
 */
1409 jermar 981
int as_area_get_flags(as_area_t *a)
1235 jermar 982
{
983
    return area_flags_to_page_flags(a->flags);
984
}
985
 
756 jermar 986
/** Create page table.
987
 *
988
 * Depending on architecture, create either address space
989
 * private or global page table.
990
 *
991
 * @param flags Flags saying whether the page table is for kernel address space.
992
 *
993
 * @return First entry of the page table.
994
 */
995
pte_t *page_table_create(int flags)
996
{
2125 decky 997
#ifdef __OBJC__
998
    return [as_t page_table_create: flags];
999
#else
1000
    ASSERT(as_operations);
1001
    ASSERT(as_operations->page_table_create);
1002
 
1003
    return as_operations->page_table_create(flags);
1004
#endif
756 jermar 1005
}
977 jermar 1006
 
1468 jermar 1007
/** Destroy page table.
1008
 *
1009
 * Destroy page table in architecture specific way.
1010
 *
1011
 * @param page_table Physical address of PTL0.
1012
 */
1013
void page_table_destroy(pte_t *page_table)
1014
{
2125 decky 1015
#ifdef __OBJC__
1016
    return [as_t page_table_destroy: page_table];
1017
#else
1018
    ASSERT(as_operations);
1019
    ASSERT(as_operations->page_table_destroy);
1020
 
1021
    as_operations->page_table_destroy(page_table);
1022
#endif
1468 jermar 1023
}
1024
 
1044 jermar 1025
/** Lock page table.
1026
 *
1027
 * This function should be called before any page_mapping_insert(),
1028
 * page_mapping_remove() and page_mapping_find().
1029
 *
1030
 * Locking order is such that address space areas must be locked
1031
 * prior to this call. Address space can be locked prior to this
1032
 * call in which case the lock argument is false.
1033
 *
1034
 * @param as Address space.
1248 jermar 1035
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1036
 */
1037
void page_table_lock(as_t *as, bool lock)
1038
{
2125 decky 1039
#ifdef __OBJC__
1040
    [as page_table_lock: lock];
1041
#else
1044 jermar 1042
    ASSERT(as_operations);
1043
    ASSERT(as_operations->page_table_lock);
2125 decky 1044
 
1044 jermar 1045
    as_operations->page_table_lock(as, lock);
2125 decky 1046
#endif
1044 jermar 1047
}
1048
 
1049
/** Unlock page table.
1050
 *
1051
 * @param as Address space.
1248 jermar 1052
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1053
 */
1054
void page_table_unlock(as_t *as, bool unlock)
1055
{
2125 decky 1056
#ifdef __OBJC__
1057
    [as page_table_unlock: unlock];
1058
#else
1044 jermar 1059
    ASSERT(as_operations);
1060
    ASSERT(as_operations->page_table_unlock);
2125 decky 1061
 
1044 jermar 1062
    as_operations->page_table_unlock(as, unlock);
2125 decky 1063
#endif
1044 jermar 1064
}
1065
 
977 jermar 1066
 
1067
/** Find address space area and lock it.
1068
 *
1069
 * The address space must be locked and interrupts must be disabled.
1070
 *
1071
 * @param as Address space.
1072
 * @param va Virtual address.
1073
 *
2087 jermar 1074
 * @return Locked address space area containing va on success or NULL on
1075
 *     failure.
977 jermar 1076
 */
1780 jermar 1077
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1078
{
1079
    as_area_t *a;
1147 jermar 1080
    btree_node_t *leaf, *lnode;
1081
    int i;
977 jermar 1082
 
1147 jermar 1083
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1084
    if (a) {
1085
        /* va is the base address of an address space area */
1380 jermar 1086
        mutex_lock(&a->lock);
1147 jermar 1087
        return a;
1088
    }
1089
 
1090
    /*
1150 jermar 1091
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1092
     * to find out whether this is a miss or va belongs to an address
1093
     * space area found there.
1094
     */
1095
 
1096
    /* First, search the leaf node itself. */
1097
    for (i = 0; i < leaf->keys; i++) {
1098
        a = (as_area_t *) leaf->value[i];
1380 jermar 1099
        mutex_lock(&a->lock);
1147 jermar 1100
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1101
            return a;
1102
        }
1380 jermar 1103
        mutex_unlock(&a->lock);
1147 jermar 1104
    }
977 jermar 1105
 
1147 jermar 1106
    /*
1150 jermar 1107
     * Second, locate the left neighbour and test its last record.
1148 jermar 1108
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 1109
     */
2087 jermar 1110
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1111
    if (lnode) {
1147 jermar 1112
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1113
        mutex_lock(&a->lock);
1147 jermar 1114
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1115
            return a;
1147 jermar 1116
        }
1380 jermar 1117
        mutex_unlock(&a->lock);
977 jermar 1118
    }
1119
 
1120
    return NULL;
1121
}
1048 jermar 1122
 
1123
/** Check area conflicts with other areas.
1124
 *
1125
 * The address space must be locked and interrupts must be disabled.
1126
 *
1127
 * @param as Address space.
1128
 * @param va Starting virtual address of the area being tested.
1129
 * @param size Size of the area being tested.
1130
 * @param avoid_area Do not touch this area.
1131
 *
1132
 * @return True if there is no conflict, false otherwise.
1133
 */
2087 jermar 1134
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1135
              as_area_t *avoid_area)
1048 jermar 1136
{
1137
    as_area_t *a;
1147 jermar 1138
    btree_node_t *leaf, *node;
1139
    int i;
1048 jermar 1140
 
1070 jermar 1141
    /*
1142
     * We don't want any area to have conflicts with NULL page.
1143
     */
1144
    if (overlaps(va, size, NULL, PAGE_SIZE))
1145
        return false;
1146
 
1147 jermar 1147
    /*
1148
     * The leaf node is found in O(log n), where n is proportional to
1149
     * the number of address space areas belonging to as.
1150
     * The check for conflicts is then attempted on the rightmost
1150 jermar 1151
     * record in the left neighbour, the leftmost record in the right
1152
     * neighbour and all records in the leaf node itself.
1147 jermar 1153
     */
1048 jermar 1154
 
1147 jermar 1155
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1156
        if (a != avoid_area)
1157
            return false;
1158
    }
1159
 
1160
    /* First, check the two border cases. */
1150 jermar 1161
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1162
        a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1163
        mutex_lock(&a->lock);
1147 jermar 1164
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1165
            mutex_unlock(&a->lock);
1147 jermar 1166
            return false;
1167
        }
1380 jermar 1168
        mutex_unlock(&a->lock);
1147 jermar 1169
    }
2087 jermar 1170
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1171
    if (node) {
1147 jermar 1172
        a = (as_area_t *) node->value[0];
1380 jermar 1173
        mutex_lock(&a->lock);
1147 jermar 1174
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1175
            mutex_unlock(&a->lock);
1147 jermar 1176
            return false;
1177
        }
1380 jermar 1178
        mutex_unlock(&a->lock);
1147 jermar 1179
    }
1180
 
1181
    /* Second, check the leaf node. */
1182
    for (i = 0; i < leaf->keys; i++) {
1183
        a = (as_area_t *) leaf->value[i];
1184
 
1048 jermar 1185
        if (a == avoid_area)
1186
            continue;
1147 jermar 1187
 
1380 jermar 1188
        mutex_lock(&a->lock);
1147 jermar 1189
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1190
            mutex_unlock(&a->lock);
1147 jermar 1191
            return false;
1192
        }
1380 jermar 1193
        mutex_unlock(&a->lock);
1048 jermar 1194
    }
1195
 
1070 jermar 1196
    /*
1197
     * So far, the area does not conflict with other areas.
1198
     * Check if it doesn't conflict with kernel address space.
1199
     */  
1200
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1201
        return !overlaps(va, size,
2087 jermar 1202
            KERNEL_ADDRESS_SPACE_START,
1203
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1204
    }
1205
 
1048 jermar 1206
    return true;
1207
}
1235 jermar 1208
 
1380 jermar 1209
/** Return size of the address space area with given base.  */
1780 jermar 1210
size_t as_get_size(uintptr_t base)
1329 palkovsky 1211
{
1212
    ipl_t ipl;
1213
    as_area_t *src_area;
1214
    size_t size;
1215
 
1216
    ipl = interrupts_disable();
1217
    src_area = find_area_and_lock(AS, base);
1218
    if (src_area){
1219
        size = src_area->pages * PAGE_SIZE;
1380 jermar 1220
        mutex_unlock(&src_area->lock);
1329 palkovsky 1221
    } else {
1222
        size = 0;
1223
    }
1224
    interrupts_restore(ipl);
1225
    return size;
1226
}
1227
 
1387 jermar 1228
/** Mark portion of address space area as used.
1229
 *
1230
 * The address space area must be already locked.
1231
 *
1232
 * @param a Address space area.
1233
 * @param page First page to be marked.
1234
 * @param count Number of page to be marked.
1235
 *
1236
 * @return 0 on failure and 1 on success.
1237
 */
1780 jermar 1238
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1239
{
1240
    btree_node_t *leaf, *node;
1241
    count_t pages;
1242
    int i;
1243
 
1244
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1245
    ASSERT(count);
1246
 
1247
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1248
    if (pages) {
1249
        /*
1250
         * We hit the beginning of some used space.
1251
         */
1252
        return 0;
1253
    }
1254
 
1437 jermar 1255
    if (!leaf->keys) {
1256
        btree_insert(&a->used_space, page, (void *) count, leaf);
1257
        return 1;
1258
    }
1259
 
1387 jermar 1260
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1261
    if (node) {
2087 jermar 1262
        uintptr_t left_pg = node->key[node->keys - 1];
1263
        uintptr_t right_pg = leaf->key[0];
1264
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1265
        count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1266
 
1267
        /*
1268
         * Examine the possibility that the interval fits
1269
         * somewhere between the rightmost interval of
1270
         * the left neigbour and the first interval of the leaf.
1271
         */
1272
 
1273
        if (page >= right_pg) {
1274
            /* Do nothing. */
2087 jermar 1275
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1276
            left_cnt * PAGE_SIZE)) {
1387 jermar 1277
            /* The interval intersects with the left interval. */
1278
            return 0;
2087 jermar 1279
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1280
            right_cnt * PAGE_SIZE)) {
1387 jermar 1281
            /* The interval intersects with the right interval. */
1282
            return 0;          
2087 jermar 1283
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1284
            (page + count * PAGE_SIZE == right_pg)) {
1285
            /*
1286
             * The interval can be added by merging the two already
1287
             * present intervals.
1288
             */
1403 jermar 1289
            node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1290
            btree_remove(&a->used_space, right_pg, leaf);
1291
            return 1;
2087 jermar 1292
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1293
            /*
1294
             * The interval can be added by simply growing the left
1295
             * interval.
1296
             */
1403 jermar 1297
            node->value[node->keys - 1] += count;
1387 jermar 1298
            return 1;
2087 jermar 1299
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1300
            /*
2087 jermar 1301
             * The interval can be addded by simply moving base of
1302
             * the right interval down and increasing its size
1303
             * accordingly.
1387 jermar 1304
             */
1403 jermar 1305
            leaf->value[0] += count;
1387 jermar 1306
            leaf->key[0] = page;
1307
            return 1;
1308
        } else {
1309
            /*
1310
             * The interval is between both neigbouring intervals,
1311
             * but cannot be merged with any of them.
1312
             */
2087 jermar 1313
            btree_insert(&a->used_space, page, (void *) count,
1314
                leaf);
1387 jermar 1315
            return 1;
1316
        }
1317
    } else if (page < leaf->key[0]) {
1780 jermar 1318
        uintptr_t right_pg = leaf->key[0];
1387 jermar 1319
        count_t right_cnt = (count_t) leaf->value[0];
1320
 
1321
        /*
2087 jermar 1322
         * Investigate the border case in which the left neighbour does
1323
         * not exist but the interval fits from the left.
1387 jermar 1324
         */
1325
 
2087 jermar 1326
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1327
            right_cnt * PAGE_SIZE)) {
1387 jermar 1328
            /* The interval intersects with the right interval. */
1329
            return 0;
2087 jermar 1330
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1331
            /*
2087 jermar 1332
             * The interval can be added by moving the base of the
1333
             * right interval down and increasing its size
1334
             * accordingly.
1387 jermar 1335
             */
1336
            leaf->key[0] = page;
1403 jermar 1337
            leaf->value[0] += count;
1387 jermar 1338
            return 1;
1339
        } else {
1340
            /*
1341
             * The interval doesn't adjoin with the right interval.
1342
             * It must be added individually.
1343
             */
2087 jermar 1344
            btree_insert(&a->used_space, page, (void *) count,
1345
                leaf);
1387 jermar 1346
            return 1;
1347
        }
1348
    }
1349
 
1350
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1351
    if (node) {
2087 jermar 1352
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1353
        uintptr_t right_pg = node->key[0];
1354
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1355
        count_t right_cnt = (count_t) node->value[0];
1387 jermar 1356
 
1357
        /*
1358
         * Examine the possibility that the interval fits
1359
         * somewhere between the leftmost interval of
1360
         * the right neigbour and the last interval of the leaf.
1361
         */
1362
 
1363
        if (page < left_pg) {
1364
            /* Do nothing. */
2087 jermar 1365
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1366
            left_cnt * PAGE_SIZE)) {
1387 jermar 1367
            /* The interval intersects with the left interval. */
1368
            return 0;
2087 jermar 1369
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1370
            right_cnt * PAGE_SIZE)) {
1387 jermar 1371
            /* The interval intersects with the right interval. */
1372
            return 0;          
2087 jermar 1373
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1374
            (page + count * PAGE_SIZE == right_pg)) {
1375
            /*
1376
             * The interval can be added by merging the two already
1377
             * present intervals.
1378
             * */
1403 jermar 1379
            leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1380
            btree_remove(&a->used_space, right_pg, node);
1381
            return 1;
2087 jermar 1382
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1383
            /*
1384
             * The interval can be added by simply growing the left
1385
             * interval.
1386
             * */
1403 jermar 1387
            leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1388
            return 1;
2087 jermar 1389
        } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1390
            /*
2087 jermar 1391
             * The interval can be addded by simply moving base of
1392
             * the right interval down and increasing its size
1393
             * accordingly.
1387 jermar 1394
             */
1403 jermar 1395
            node->value[0] += count;
1387 jermar 1396
            node->key[0] = page;
1397
            return 1;
1398
        } else {
1399
            /*
1400
             * The interval is between both neigbouring intervals,
1401
             * but cannot be merged with any of them.
1402
             */
2087 jermar 1403
            btree_insert(&a->used_space, page, (void *) count,
1404
                leaf);
1387 jermar 1405
            return 1;
1406
        }
1407
    } else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1408
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1409
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1410
 
1411
        /*
2087 jermar 1412
         * Investigate the border case in which the right neighbour
1413
         * does not exist but the interval fits from the right.
1387 jermar 1414
         */
1415
 
2087 jermar 1416
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1417
            left_cnt * PAGE_SIZE)) {
1403 jermar 1418
            /* The interval intersects with the left interval. */
1387 jermar 1419
            return 0;
2087 jermar 1420
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1421
            /*
1422
             * The interval can be added by growing the left
1423
             * interval.
1424
             */
1403 jermar 1425
            leaf->value[leaf->keys - 1] += count;
1387 jermar 1426
            return 1;
1427
        } else {
1428
            /*
1429
             * The interval doesn't adjoin with the left interval.
1430
             * It must be added individually.
1431
             */
2087 jermar 1432
            btree_insert(&a->used_space, page, (void *) count,
1433
                leaf);
1387 jermar 1434
            return 1;
1435
        }
1436
    }
1437
 
1438
    /*
2087 jermar 1439
     * Note that if the algorithm made it thus far, the interval can fit
1440
     * only between two other intervals of the leaf. The two border cases
1441
     * were already resolved.
1387 jermar 1442
     */
1443
    for (i = 1; i < leaf->keys; i++) {
1444
        if (page < leaf->key[i]) {
2087 jermar 1445
            uintptr_t left_pg = leaf->key[i - 1];
1446
            uintptr_t right_pg = leaf->key[i];
1447
            count_t left_cnt = (count_t) leaf->value[i - 1];
1448
            count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1449
 
1450
            /*
1451
             * The interval fits between left_pg and right_pg.
1452
             */
1453
 
2087 jermar 1454
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1455
                left_cnt * PAGE_SIZE)) {
1456
                /*
1457
                 * The interval intersects with the left
1458
                 * interval.
1459
                 */
1387 jermar 1460
                return 0;
2087 jermar 1461
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1462
                right_cnt * PAGE_SIZE)) {
1463
                /*
1464
                 * The interval intersects with the right
1465
                 * interval.
1466
                 */
1387 jermar 1467
                return 0;          
2087 jermar 1468
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1469
                (page + count * PAGE_SIZE == right_pg)) {
1470
                /*
1471
                 * The interval can be added by merging the two
1472
                 * already present intervals.
1473
                 */
1403 jermar 1474
                leaf->value[i - 1] += count + right_cnt;
1387 jermar 1475
                btree_remove(&a->used_space, right_pg, leaf);
1476
                return 1;
2087 jermar 1477
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1478
                /*
1479
                 * The interval can be added by simply growing
1480
                 * the left interval.
1481
                 */
1403 jermar 1482
                leaf->value[i - 1] += count;
1387 jermar 1483
                return 1;
2087 jermar 1484
            } else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1485
                /*
2087 jermar 1486
                     * The interval can be addded by simply moving
1487
                 * base of the right interval down and
1488
                 * increasing its size accordingly.
1387 jermar 1489
                 */
1403 jermar 1490
                leaf->value[i] += count;
1387 jermar 1491
                leaf->key[i] = page;
1492
                return 1;
1493
            } else {
1494
                /*
2087 jermar 1495
                 * The interval is between both neigbouring
1496
                 * intervals, but cannot be merged with any of
1497
                 * them.
1387 jermar 1498
                 */
2087 jermar 1499
                btree_insert(&a->used_space, page,
1500
                    (void *) count, leaf);
1387 jermar 1501
                return 1;
1502
            }
1503
        }
1504
    }
1505
 
2087 jermar 1506
    panic("Inconsistency detected while adding %d pages of used space at "
1507
        "%p.\n", count, page);
1387 jermar 1508
}
1509
 
1510
/** Mark portion of address space area as unused.
1511
 *
1512
 * The address space area must be already locked.
1513
 *
1514
 * @param a Address space area.
1515
 * @param page First page to be marked.
1516
 * @param count Number of page to be marked.
1517
 *
1518
 * @return 0 on failure and 1 on success.
1519
 */
1780 jermar 1520
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1521
{
1522
    btree_node_t *leaf, *node;
1523
    count_t pages;
1524
    int i;
1525
 
1526
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1527
    ASSERT(count);
1528
 
1529
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1530
    if (pages) {
1531
        /*
1532
         * We are lucky, page is the beginning of some interval.
1533
         */
1534
        if (count > pages) {
1535
            return 0;
1536
        } else if (count == pages) {
1537
            btree_remove(&a->used_space, page, leaf);
1403 jermar 1538
            return 1;
1387 jermar 1539
        } else {
1540
            /*
1541
             * Find the respective interval.
1542
             * Decrease its size and relocate its start address.
1543
             */
1544
            for (i = 0; i < leaf->keys; i++) {
1545
                if (leaf->key[i] == page) {
2087 jermar 1546
                    leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1547
                    leaf->value[i] -= count;
1387 jermar 1548
                    return 1;
1549
                }
1550
            }
1551
            goto error;
1552
        }
1553
    }
1554
 
1555
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1556
    if (node && page < leaf->key[0]) {
1780 jermar 1557
        uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1558
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1559
 
2087 jermar 1560
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1561
            count * PAGE_SIZE)) {
1562
            if (page + count * PAGE_SIZE ==
1563
                left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1564
                /*
2087 jermar 1565
                 * The interval is contained in the rightmost
1566
                 * interval of the left neighbour and can be
1567
                 * removed by updating the size of the bigger
1568
                 * interval.
1387 jermar 1569
                 */
1403 jermar 1570
                node->value[node->keys - 1] -= count;
1387 jermar 1571
                return 1;
2087 jermar 1572
            } else if (page + count * PAGE_SIZE <
1573
                left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1574
                count_t new_cnt;
1387 jermar 1575
 
1576
                /*
2087 jermar 1577
                 * The interval is contained in the rightmost
1578
                 * interval of the left neighbour but its
1579
                 * removal requires both updating the size of
1580
                 * the original interval and also inserting a
1581
                 * new interval.
1387 jermar 1582
                 */
2087 jermar 1583
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1584
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1585
                node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1586
                btree_insert(&a->used_space, page +
1587
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1588
                return 1;
1589
            }
1590
        }
1591
        return 0;
1592
    } else if (page < leaf->key[0]) {
1593
        return 0;
1594
    }
1595
 
1596
    if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1597
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1598
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1599
 
2087 jermar 1600
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1601
            count * PAGE_SIZE)) {
1602
            if (page + count * PAGE_SIZE ==
1603
                left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1604
                /*
2087 jermar 1605
                 * The interval is contained in the rightmost
1606
                 * interval of the leaf and can be removed by
1607
                 * updating the size of the bigger interval.
1387 jermar 1608
                 */
1403 jermar 1609
                leaf->value[leaf->keys - 1] -= count;
1387 jermar 1610
                return 1;
2087 jermar 1611
            } else if (page + count * PAGE_SIZE < left_pg +
1612
                left_cnt * PAGE_SIZE) {
1403 jermar 1613
                count_t new_cnt;
1387 jermar 1614
 
1615
                /*
2087 jermar 1616
                 * The interval is contained in the rightmost
1617
                 * interval of the leaf but its removal
1618
                 * requires both updating the size of the
1619
                 * original interval and also inserting a new
1620
                 * interval.
1387 jermar 1621
                 */
2087 jermar 1622
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1623
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1624
                leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1625
                btree_insert(&a->used_space, page +
1626
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1627
                return 1;
1628
            }
1629
        }
1630
        return 0;
1631
    }  
1632
 
1633
    /*
1634
     * The border cases have been already resolved.
1635
     * Now the interval can be only between intervals of the leaf.
1636
     */
1637
    for (i = 1; i < leaf->keys - 1; i++) {
1638
        if (page < leaf->key[i]) {
1780 jermar 1639
            uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1640
            count_t left_cnt = (count_t) leaf->value[i - 1];
1641
 
1642
            /*
2087 jermar 1643
             * Now the interval is between intervals corresponding
1644
             * to (i - 1) and i.
1387 jermar 1645
             */
2087 jermar 1646
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1647
                count * PAGE_SIZE)) {
1648
                if (page + count * PAGE_SIZE ==
1649
                    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1650
                    /*
2087 jermar 1651
                     * The interval is contained in the
1652
                     * interval (i - 1) of the leaf and can
1653
                     * be removed by updating the size of
1654
                     * the bigger interval.
1387 jermar 1655
                     */
1403 jermar 1656
                    leaf->value[i - 1] -= count;
1387 jermar 1657
                    return 1;
2087 jermar 1658
                } else if (page + count * PAGE_SIZE <
1659
                    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1660
                    count_t new_cnt;
1387 jermar 1661
 
1662
                    /*
2087 jermar 1663
                     * The interval is contained in the
1664
                     * interval (i - 1) of the leaf but its
1665
                     * removal requires both updating the
1666
                     * size of the original interval and
1387 jermar 1667
                     * also inserting a new interval.
1668
                     */
2087 jermar 1669
                    new_cnt = ((left_pg +
1670
                        left_cnt * PAGE_SIZE) -
1671
                        (page + count * PAGE_SIZE)) >>
1672
                        PAGE_WIDTH;
1403 jermar 1673
                    leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1674
                    btree_insert(&a->used_space, page +
1675
                        count * PAGE_SIZE, (void *) new_cnt,
1676
                        leaf);
1387 jermar 1677
                    return 1;
1678
                }
1679
            }
1680
            return 0;
1681
        }
1682
    }
1683
 
1684
error:
2087 jermar 1685
    panic("Inconsistency detected while removing %d pages of used space "
1686
        "from %p.\n", count, page);
1387 jermar 1687
}
1688
 
1409 jermar 1689
/** Remove reference to address space area share info.
1690
 *
1691
 * If the reference count drops to 0, the sh_info is deallocated.
1692
 *
1693
 * @param sh_info Pointer to address space area share info.
1694
 */
1695
void sh_info_remove_reference(share_info_t *sh_info)
1696
{
1697
    bool dealloc = false;
1698
 
1699
    mutex_lock(&sh_info->lock);
1700
    ASSERT(sh_info->refcount);
1701
    if (--sh_info->refcount == 0) {
1702
        dealloc = true;
1495 jermar 1703
        link_t *cur;
1409 jermar 1704
 
1705
        /*
1706
         * Now walk carefully the pagemap B+tree and free/remove
1707
         * reference from all frames found there.
1708
         */
2087 jermar 1709
        for (cur = sh_info->pagemap.leaf_head.next;
1710
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1711
            btree_node_t *node;
1495 jermar 1712
            int i;
1409 jermar 1713
 
1495 jermar 1714
            node = list_get_instance(cur, btree_node_t, leaf_link);
1715
            for (i = 0; i < node->keys; i++)
1780 jermar 1716
                frame_free((uintptr_t) node->value[i]);
1409 jermar 1717
        }
1718
 
1719
    }
1720
    mutex_unlock(&sh_info->lock);
1721
 
1722
    if (dealloc) {
1723
        btree_destroy(&sh_info->pagemap);
1724
        free(sh_info);
1725
    }
1726
}
1727
 
1235 jermar 1728
/*
1729
 * Address space related syscalls.
1730
 */
1731
 
1732
/** Wrapper for as_area_create(). */
1780 jermar 1733
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1734
{
2087 jermar 1735
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1736
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1737
        return (unative_t) address;
1235 jermar 1738
    else
1780 jermar 1739
        return (unative_t) -1;
1235 jermar 1740
}
1741
 
1793 jermar 1742
/** Wrapper for as_area_resize(). */
1780 jermar 1743
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1744
{
1780 jermar 1745
    return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1746
}
1747
 
1793 jermar 1748
/** Wrapper for as_area_destroy(). */
1780 jermar 1749
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1750
{
1780 jermar 1751
    return (unative_t) as_area_destroy(AS, address);
1306 jermar 1752
}
1702 cejka 1753
 
1914 jermar 1754
/** Print out information about address space.
1755
 *
1756
 * @param as Address space.
1757
 */
1758
void as_print(as_t *as)
1759
{
1760
    ipl_t ipl;
1761
 
1762
    ipl = interrupts_disable();
1763
    mutex_lock(&as->lock);
1764
 
1765
    /* print out info about address space areas */
1766
    link_t *cur;
2087 jermar 1767
    for (cur = as->as_area_btree.leaf_head.next;
1768
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1769
        btree_node_t *node;
1914 jermar 1770
 
2087 jermar 1771
        node = list_get_instance(cur, btree_node_t, leaf_link);
1772
 
1914 jermar 1773
        int i;
1774
        for (i = 0; i < node->keys; i++) {
1915 jermar 1775
            as_area_t *area = node->value[i];
1914 jermar 1776
 
1777
            mutex_lock(&area->lock);
1778
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
2087 jermar 1779
                area, area->base, area->pages, area->base,
1780
                area->base + area->pages*PAGE_SIZE);
1914 jermar 1781
            mutex_unlock(&area->lock);
1782
        }
1783
    }
1784
 
1785
    mutex_unlock(&as->lock);
1786
    interrupts_restore(ipl);
1787
}
1788
 
1757 jermar 1789
/** @}
1702 cejka 1790
 */