Subversion Repositories HelenOS

Rev

Rev 2009 | Rev 2069 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief   Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
77
#include <typedefs.h>
1288 jermar 78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
703 jermar 80
 
2009 jermar 81
#ifdef CONFIG_VIRT_IDX_DCACHE
82
#include <arch/mm/cache.h>
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
84
 
1757 jermar 85
/**
86
 * Each architecture decides what functions will be used to carry out
87
 * address space operations such as creating or locking page tables.
88
 */
756 jermar 89
as_operations_t *as_operations = NULL;
703 jermar 90
 
1890 jermar 91
/**
92
 * Slab for as_t objects.
93
 */
94
static slab_cache_t *as_slab;
95
 
1415 jermar 96
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
97
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 98
 
99
/**
100
 * This list contains address spaces that are not active on any
101
 * processor and that have valid ASID.
102
 */
103
LIST_INITIALIZE(inactive_as_with_asid_head);
104
 
757 jermar 105
/** Kernel address space. */
106
as_t *AS_KERNEL = NULL;
107
 
1235 jermar 108
static int area_flags_to_page_flags(int aflags);
1780 jermar 109
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
110
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
1409 jermar 111
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 112
 
1891 jermar 113
static int as_constructor(void *obj, int flags)
114
{
115
    as_t *as = (as_t *) obj;
116
    int rc;
117
 
118
    link_initialize(&as->inactive_as_with_asid_link);
119
    mutex_initialize(&as->lock);   
120
 
121
    rc = as_constructor_arch(as, flags);
122
 
123
    return rc;
124
}
125
 
126
static int as_destructor(void *obj)
127
{
128
    as_t *as = (as_t *) obj;
129
 
130
    return as_destructor_arch(as);
131
}
132
 
756 jermar 133
/** Initialize address space subsystem. */
134
void as_init(void)
135
{
136
    as_arch_init();
1890 jermar 137
 
1891 jermar 138
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
139
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
1890 jermar 140
 
789 palkovsky 141
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 142
    if (!AS_KERNEL)
143
        panic("can't create kernel address space\n");
144
 
756 jermar 145
}
146
 
757 jermar 147
/** Create address space.
148
 *
149
 * @param flags Flags that influence way in wich the address space is created.
150
 */
756 jermar 151
as_t *as_create(int flags)
703 jermar 152
{
153
    as_t *as;
154
 
1890 jermar 155
    as = (as_t *) slab_alloc(as_slab, 0);
1891 jermar 156
    (void) as_create_arch(as, 0);
157
 
1147 jermar 158
    btree_create(&as->as_area_btree);
822 palkovsky 159
 
160
    if (flags & FLAG_AS_KERNEL)
161
        as->asid = ASID_KERNEL;
162
    else
163
        as->asid = ASID_INVALID;
164
 
1468 jermar 165
    as->refcount = 0;
1415 jermar 166
    as->cpu_refcount = 0;
822 palkovsky 167
    as->page_table = page_table_create(flags);
703 jermar 168
 
169
    return as;
170
}
171
 
1468 jermar 172
/** Destroy adress space.
173
 *
174
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
175
 * the address space can be destroyed.
176
 */
177
void as_destroy(as_t *as)
973 palkovsky 178
{
1468 jermar 179
    ipl_t ipl;
1594 jermar 180
    bool cond;
973 palkovsky 181
 
1468 jermar 182
    ASSERT(as->refcount == 0);
183
 
184
    /*
185
     * Since there is no reference to this area,
186
     * it is safe not to lock its mutex.
187
     */
188
    ipl = interrupts_disable();
189
    spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 190
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 191
        if (as != AS && as->cpu_refcount == 0)
1587 jermar 192
            list_remove(&as->inactive_as_with_asid_link);
1468 jermar 193
        asid_put(as->asid);
194
    }
195
    spinlock_unlock(&inactive_as_with_asid_lock);
196
 
197
    /*
198
     * Destroy address space areas of the address space.
1954 jermar 199
     * The B+tree must be walked carefully because it is
1594 jermar 200
     * also being destroyed.
1468 jermar 201
     */
1594 jermar 202
    for (cond = true; cond; ) {
1468 jermar 203
        btree_node_t *node;
1594 jermar 204
 
205
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
206
        node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
207
 
208
        if ((cond = node->keys)) {
209
            as_area_destroy(as, node->key[0]);
210
        }
1468 jermar 211
    }
1495 jermar 212
 
1483 jermar 213
    btree_destroy(&as->as_area_btree);
1468 jermar 214
    page_table_destroy(as->page_table);
215
 
216
    interrupts_restore(ipl);
217
 
1890 jermar 218
    slab_free(as_slab, as);
973 palkovsky 219
}
220
 
703 jermar 221
/** Create address space area of common attributes.
222
 *
223
 * The created address space area is added to the target address space.
224
 *
225
 * @param as Target address space.
1239 jermar 226
 * @param flags Flags of the area memory.
1048 jermar 227
 * @param size Size of area.
703 jermar 228
 * @param base Base address of area.
1239 jermar 229
 * @param attrs Attributes of the area.
1409 jermar 230
 * @param backend Address space area backend. NULL if no backend is used.
231
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 232
 *
233
 * @return Address space area on success or NULL on failure.
234
 */
1780 jermar 235
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 236
           mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 237
{
238
    ipl_t ipl;
239
    as_area_t *a;
240
 
241
    if (base % PAGE_SIZE)
1048 jermar 242
        return NULL;
243
 
1233 jermar 244
    if (!size)
245
        return NULL;
246
 
1048 jermar 247
    /* Writeable executable areas are not supported. */
248
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
249
        return NULL;
703 jermar 250
 
251
    ipl = interrupts_disable();
1380 jermar 252
    mutex_lock(&as->lock);
703 jermar 253
 
1048 jermar 254
    if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 255
        mutex_unlock(&as->lock);
1048 jermar 256
        interrupts_restore(ipl);
257
        return NULL;
258
    }
703 jermar 259
 
822 palkovsky 260
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 261
 
1380 jermar 262
    mutex_initialize(&a->lock);
822 palkovsky 263
 
1424 jermar 264
    a->as = as;
1026 jermar 265
    a->flags = flags;
1239 jermar 266
    a->attributes = attrs;
1048 jermar 267
    a->pages = SIZE2FRAMES(size);
822 palkovsky 268
    a->base = base;
1409 jermar 269
    a->sh_info = NULL;
270
    a->backend = backend;
1424 jermar 271
    if (backend_data)
272
        a->backend_data = *backend_data;
273
    else
1780 jermar 274
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
1424 jermar 275
 
1387 jermar 276
    btree_create(&a->used_space);
822 palkovsky 277
 
1147 jermar 278
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 279
 
1380 jermar 280
    mutex_unlock(&as->lock);
703 jermar 281
    interrupts_restore(ipl);
704 jermar 282
 
703 jermar 283
    return a;
284
}
285
 
1235 jermar 286
/** Find address space area and change it.
287
 *
288
 * @param as Address space.
289
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
290
 * @param size New size of the virtual memory block starting at address.
291
 * @param flags Flags influencing the remap operation. Currently unused.
292
 *
1306 jermar 293
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 294
 */
1780 jermar 295
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 296
{
1306 jermar 297
    as_area_t *area;
1235 jermar 298
    ipl_t ipl;
299
    size_t pages;
300
 
301
    ipl = interrupts_disable();
1380 jermar 302
    mutex_lock(&as->lock);
1235 jermar 303
 
304
    /*
305
     * Locate the area.
306
     */
307
    area = find_area_and_lock(as, address);
308
    if (!area) {
1380 jermar 309
        mutex_unlock(&as->lock);
1235 jermar 310
        interrupts_restore(ipl);
1306 jermar 311
        return ENOENT;
1235 jermar 312
    }
313
 
1424 jermar 314
    if (area->backend == &phys_backend) {
1235 jermar 315
        /*
316
         * Remapping of address space areas associated
317
         * with memory mapped devices is not supported.
318
         */
1380 jermar 319
        mutex_unlock(&area->lock);
320
        mutex_unlock(&as->lock);
1235 jermar 321
        interrupts_restore(ipl);
1306 jermar 322
        return ENOTSUP;
1235 jermar 323
    }
1409 jermar 324
    if (area->sh_info) {
325
        /*
326
         * Remapping of shared address space areas
327
         * is not supported.
328
         */
329
        mutex_unlock(&area->lock);
330
        mutex_unlock(&as->lock);
331
        interrupts_restore(ipl);
332
        return ENOTSUP;
333
    }
1235 jermar 334
 
335
    pages = SIZE2FRAMES((address - area->base) + size);
336
    if (!pages) {
337
        /*
338
         * Zero size address space areas are not allowed.
339
         */
1380 jermar 340
        mutex_unlock(&area->lock);
341
        mutex_unlock(&as->lock);
1235 jermar 342
        interrupts_restore(ipl);
1306 jermar 343
        return EPERM;
1235 jermar 344
    }
345
 
346
    if (pages < area->pages) {
1403 jermar 347
        bool cond;
1780 jermar 348
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 349
 
350
        /*
351
         * Shrinking the area.
352
         * No need to check for overlaps.
353
         */
1403 jermar 354
 
355
        /*
1436 jermar 356
         * Start TLB shootdown sequence.
357
         */
358
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
359
 
360
        /*
1403 jermar 361
         * Remove frames belonging to used space starting from
362
         * the highest addresses downwards until an overlap with
363
         * the resized address space area is found. Note that this
364
         * is also the right way to remove part of the used_space
365
         * B+tree leaf list.
366
         */    
367
        for (cond = true; cond;) {
368
            btree_node_t *node;
369
 
370
            ASSERT(!list_empty(&area->used_space.leaf_head));
371
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
372
            if ((cond = (bool) node->keys)) {
1780 jermar 373
                uintptr_t b = node->key[node->keys - 1];
1403 jermar 374
                count_t c = (count_t) node->value[node->keys - 1];
375
                int i = 0;
1235 jermar 376
 
1403 jermar 377
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
378
 
379
                    if (b + c*PAGE_SIZE <= start_free) {
380
                        /*
381
                         * The whole interval fits completely
382
                         * in the resized address space area.
383
                         */
384
                        break;
385
                    }
386
 
387
                    /*
388
                     * Part of the interval corresponding to b and c
389
                     * overlaps with the resized address space area.
390
                     */
391
 
392
                    cond = false;   /* we are almost done */
393
                    i = (start_free - b) >> PAGE_WIDTH;
394
                    if (!used_space_remove(area, start_free, c - i))
1889 jermar 395
                        panic("Could not remove used space.\n");
1403 jermar 396
                } else {
397
                    /*
398
                     * The interval of used space can be completely removed.
399
                     */
400
                    if (!used_space_remove(area, b, c))
401
                        panic("Could not remove used space.\n");
402
                }
403
 
404
                for (; i < c; i++) {
405
                    pte_t *pte;
406
 
407
                    page_table_lock(as, false);
408
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
409
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 410
                    if (area->backend && area->backend->frame_free) {
411
                        area->backend->frame_free(area,
1409 jermar 412
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
413
                    }
1403 jermar 414
                    page_mapping_remove(as, b + i*PAGE_SIZE);
415
                    page_table_unlock(as, false);
416
                }
1235 jermar 417
            }
418
        }
1436 jermar 419
 
1235 jermar 420
        /*
1436 jermar 421
         * Finish TLB shootdown sequence.
1235 jermar 422
         */
1954 jermar 423
        tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
1235 jermar 424
        tlb_shootdown_finalize();
1889 jermar 425
 
426
        /*
427
         * Invalidate software translation caches (e.g. TSB on sparc64).
428
         */
429
        as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
1235 jermar 430
    } else {
431
        /*
432
         * Growing the area.
433
         * Check for overlaps with other address space areas.
434
         */
435
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 436
            mutex_unlock(&area->lock);
437
            mutex_unlock(&as->lock);       
1235 jermar 438
            interrupts_restore(ipl);
1306 jermar 439
            return EADDRNOTAVAIL;
1235 jermar 440
        }
441
    }
442
 
443
    area->pages = pages;
444
 
1380 jermar 445
    mutex_unlock(&area->lock);
446
    mutex_unlock(&as->lock);
1235 jermar 447
    interrupts_restore(ipl);
448
 
1306 jermar 449
    return 0;
1235 jermar 450
}
451
 
1306 jermar 452
/** Destroy address space area.
453
 *
454
 * @param as Address space.
455
 * @param address Address withing the area to be deleted.
456
 *
457
 * @return Zero on success or a value from @ref errno.h on failure.
458
 */
1780 jermar 459
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 460
{
461
    as_area_t *area;
1780 jermar 462
    uintptr_t base;
1495 jermar 463
    link_t *cur;
1306 jermar 464
    ipl_t ipl;
465
 
466
    ipl = interrupts_disable();
1380 jermar 467
    mutex_lock(&as->lock);
1306 jermar 468
 
469
    area = find_area_and_lock(as, address);
470
    if (!area) {
1380 jermar 471
        mutex_unlock(&as->lock);
1306 jermar 472
        interrupts_restore(ipl);
473
        return ENOENT;
474
    }
475
 
1403 jermar 476
    base = area->base;
477
 
1411 jermar 478
    /*
1436 jermar 479
     * Start TLB shootdown sequence.
480
     */
1889 jermar 481
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 482
 
483
    /*
1411 jermar 484
     * Visit only the pages mapped by used_space B+tree.
485
     */
1495 jermar 486
    for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 487
        btree_node_t *node;
1495 jermar 488
        int i;
1403 jermar 489
 
1495 jermar 490
        node = list_get_instance(cur, btree_node_t, leaf_link);
491
        for (i = 0; i < node->keys; i++) {
1780 jermar 492
            uintptr_t b = node->key[i];
1495 jermar 493
            count_t j;
1411 jermar 494
            pte_t *pte;
1403 jermar 495
 
1495 jermar 496
            for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 497
                page_table_lock(as, false);
1495 jermar 498
                pte = page_mapping_find(as, b + j*PAGE_SIZE);
1411 jermar 499
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 500
                if (area->backend && area->backend->frame_free) {
501
                    area->backend->frame_free(area,
1495 jermar 502
                        b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 503
                }
1889 jermar 504
                page_mapping_remove(as, b + j*PAGE_SIZE);              
1411 jermar 505
                page_table_unlock(as, false);
1306 jermar 506
            }
507
        }
508
    }
1403 jermar 509
 
1306 jermar 510
    /*
1436 jermar 511
     * Finish TLB shootdown sequence.
1306 jermar 512
     */
1889 jermar 513
    tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 514
    tlb_shootdown_finalize();
1436 jermar 515
 
1889 jermar 516
    /*
517
     * Invalidate potential software translation caches (e.g. TSB on sparc64).
518
     */
519
    as_invalidate_translation_cache(as, area->base, area->pages);
520
 
1436 jermar 521
    btree_destroy(&area->used_space);
1306 jermar 522
 
1309 jermar 523
    area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 524
 
525
    if (area->sh_info)
526
        sh_info_remove_reference(area->sh_info);
527
 
1380 jermar 528
    mutex_unlock(&area->lock);
1306 jermar 529
 
530
    /*
531
     * Remove the empty area from address space.
532
     */
1889 jermar 533
    btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 534
 
1309 jermar 535
    free(area);
536
 
1889 jermar 537
    mutex_unlock(&as->lock);
1306 jermar 538
    interrupts_restore(ipl);
539
    return 0;
540
}
541
 
1413 jermar 542
/** Share address space area with another or the same address space.
1235 jermar 543
 *
1424 jermar 544
 * Address space area mapping is shared with a new address space area.
545
 * If the source address space area has not been shared so far,
546
 * a new sh_info is created. The new address space area simply gets the
547
 * sh_info of the source area. The process of duplicating the
548
 * mapping is done through the backend share function.
1413 jermar 549
 *
1417 jermar 550
 * @param src_as Pointer to source address space.
1239 jermar 551
 * @param src_base Base address of the source address space area.
1417 jermar 552
 * @param acc_size Expected size of the source area.
1428 palkovsky 553
 * @param dst_as Pointer to destination address space.
1417 jermar 554
 * @param dst_base Target base address.
555
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 556
 *
2007 jermar 557
 * @return Zero on success or ENOENT if there is no such task or if there is no
558
 * such address space area, EPERM if there was a problem in accepting the area
559
 * or ENOMEM if there was a problem in allocating destination address space
560
 * area. ENOTSUP is returned if the address space area backend does not support
2015 jermar 561
 * sharing or if the kernel detects an attempt to create an illegal address
562
 * alias.
1235 jermar 563
 */
1780 jermar 564
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
565
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 566
{
567
    ipl_t ipl;
1239 jermar 568
    int src_flags;
569
    size_t src_size;
570
    as_area_t *src_area, *dst_area;
1413 jermar 571
    share_info_t *sh_info;
1424 jermar 572
    mem_backend_t *src_backend;
573
    mem_backend_data_t src_backend_data;
1434 palkovsky 574
 
1235 jermar 575
    ipl = interrupts_disable();
1380 jermar 576
    mutex_lock(&src_as->lock);
1329 palkovsky 577
    src_area = find_area_and_lock(src_as, src_base);
1239 jermar 578
    if (!src_area) {
1238 jermar 579
        /*
580
         * Could not find the source address space area.
581
         */
1380 jermar 582
        mutex_unlock(&src_as->lock);
1238 jermar 583
        interrupts_restore(ipl);
584
        return ENOENT;
585
    }
2007 jermar 586
 
1424 jermar 587
    if (!src_area->backend || !src_area->backend->share) {
1413 jermar 588
        /*
1851 jermar 589
         * There is no backend or the backend does not
1424 jermar 590
         * know how to share the area.
1413 jermar 591
         */
592
        mutex_unlock(&src_area->lock);
593
        mutex_unlock(&src_as->lock);
594
        interrupts_restore(ipl);
595
        return ENOTSUP;
596
    }
597
 
1239 jermar 598
    src_size = src_area->pages * PAGE_SIZE;
599
    src_flags = src_area->flags;
1424 jermar 600
    src_backend = src_area->backend;
601
    src_backend_data = src_area->backend_data;
1544 palkovsky 602
 
603
    /* Share the cacheable flag from the original mapping */
604
    if (src_flags & AS_AREA_CACHEABLE)
605
        dst_flags_mask |= AS_AREA_CACHEABLE;
606
 
1461 palkovsky 607
    if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 608
        mutex_unlock(&src_area->lock);
609
        mutex_unlock(&src_as->lock);
1235 jermar 610
        interrupts_restore(ipl);
611
        return EPERM;
612
    }
1413 jermar 613
 
2015 jermar 614
#ifdef CONFIG_VIRT_IDX_DCACHE
615
    if (!(dst_flags_mask & AS_AREA_EXEC)) {
616
        if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
617
            /*
618
             * Refuse to create an illegal address alias.
619
             */
620
            mutex_unlock(&src_area->lock);
621
            mutex_unlock(&src_as->lock);
622
            interrupts_restore(ipl);
623
            return ENOTSUP;
624
        }
625
    }
626
#endif /* CONFIG_VIRT_IDX_DCACHE */
627
 
1235 jermar 628
    /*
1413 jermar 629
     * Now we are committed to sharing the area.
1954 jermar 630
     * First, prepare the area for sharing.
1413 jermar 631
     * Then it will be safe to unlock it.
632
     */
633
    sh_info = src_area->sh_info;
634
    if (!sh_info) {
635
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
636
        mutex_initialize(&sh_info->lock);
637
        sh_info->refcount = 2;
638
        btree_create(&sh_info->pagemap);
639
        src_area->sh_info = sh_info;
640
    } else {
641
        mutex_lock(&sh_info->lock);
642
        sh_info->refcount++;
643
        mutex_unlock(&sh_info->lock);
644
    }
645
 
1424 jermar 646
    src_area->backend->share(src_area);
1413 jermar 647
 
648
    mutex_unlock(&src_area->lock);
649
    mutex_unlock(&src_as->lock);
650
 
651
    /*
1239 jermar 652
     * Create copy of the source address space area.
653
     * The destination area is created with AS_AREA_ATTR_PARTIAL
654
     * attribute set which prevents race condition with
655
     * preliminary as_page_fault() calls.
1417 jermar 656
     * The flags of the source area are masked against dst_flags_mask
657
     * to support sharing in less privileged mode.
1235 jermar 658
     */
1461 palkovsky 659
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
1424 jermar 660
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 661
    if (!dst_area) {
1235 jermar 662
        /*
663
         * Destination address space area could not be created.
664
         */
1413 jermar 665
        sh_info_remove_reference(sh_info);
666
 
1235 jermar 667
        interrupts_restore(ipl);
668
        return ENOMEM;
669
    }
2009 jermar 670
 
1235 jermar 671
    /*
1239 jermar 672
     * Now the destination address space area has been
673
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 674
     * attribute and set the sh_info.
1239 jermar 675
     */
2009 jermar 676
    mutex_lock(&dst_as->lock); 
1380 jermar 677
    mutex_lock(&dst_area->lock);
1239 jermar 678
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 679
    dst_area->sh_info = sh_info;
1380 jermar 680
    mutex_unlock(&dst_area->lock);
2009 jermar 681
    mutex_unlock(&dst_as->lock);   
682
 
1235 jermar 683
    interrupts_restore(ipl);
684
 
685
    return 0;
686
}
687
 
1423 jermar 688
/** Check access mode for address space area.
689
 *
690
 * The address space area must be locked prior to this call.
691
 *
692
 * @param area Address space area.
693
 * @param access Access mode.
694
 *
695
 * @return False if access violates area's permissions, true otherwise.
696
 */
697
bool as_area_check_access(as_area_t *area, pf_access_t access)
698
{
699
    int flagmap[] = {
700
        [PF_ACCESS_READ] = AS_AREA_READ,
701
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
702
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
703
    };
704
 
705
    if (!(area->flags & flagmap[access]))
706
        return false;
707
 
708
    return true;
709
}
710
 
703 jermar 711
/** Handle page fault within the current address space.
712
 *
1409 jermar 713
 * This is the high-level page fault handler. It decides
714
 * whether the page fault can be resolved by any backend
715
 * and if so, it invokes the backend to resolve the page
716
 * fault.
717
 *
703 jermar 718
 * Interrupts are assumed disabled.
719
 *
720
 * @param page Faulting page.
1411 jermar 721
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 722
 * @param istate Pointer to interrupted state.
703 jermar 723
 *
1409 jermar 724
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
725
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 726
 */
1780 jermar 727
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 728
{
1044 jermar 729
    pte_t *pte;
977 jermar 730
    as_area_t *area;
703 jermar 731
 
1380 jermar 732
    if (!THREAD)
1409 jermar 733
        return AS_PF_FAULT;
1380 jermar 734
 
703 jermar 735
    ASSERT(AS);
1044 jermar 736
 
1380 jermar 737
    mutex_lock(&AS->lock);
977 jermar 738
    area = find_area_and_lock(AS, page);   
703 jermar 739
    if (!area) {
740
        /*
741
         * No area contained mapping for 'page'.
742
         * Signal page fault to low-level handler.
743
         */
1380 jermar 744
        mutex_unlock(&AS->lock);
1288 jermar 745
        goto page_fault;
703 jermar 746
    }
747
 
1239 jermar 748
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
749
        /*
750
         * The address space area is not fully initialized.
751
         * Avoid possible race by returning error.
752
         */
1380 jermar 753
        mutex_unlock(&area->lock);
754
        mutex_unlock(&AS->lock);
1288 jermar 755
        goto page_fault;       
1239 jermar 756
    }
757
 
1424 jermar 758
    if (!area->backend || !area->backend->page_fault) {
1409 jermar 759
        /*
760
         * The address space area is not backed by any backend
761
         * or the backend cannot handle page faults.
762
         */
763
        mutex_unlock(&area->lock);
764
        mutex_unlock(&AS->lock);
765
        goto page_fault;       
766
    }
1179 jermar 767
 
1044 jermar 768
    page_table_lock(AS, false);
769
 
703 jermar 770
    /*
1044 jermar 771
     * To avoid race condition between two page faults
772
     * on the same address, we need to make sure
773
     * the mapping has not been already inserted.
774
     */
775
    if ((pte = page_mapping_find(AS, page))) {
776
        if (PTE_PRESENT(pte)) {
1423 jermar 777
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
778
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
779
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
780
                page_table_unlock(AS, false);
781
                mutex_unlock(&area->lock);
782
                mutex_unlock(&AS->lock);
783
                return AS_PF_OK;
784
            }
1044 jermar 785
        }
786
    }
1409 jermar 787
 
1044 jermar 788
    /*
1409 jermar 789
     * Resort to the backend page fault handler.
703 jermar 790
     */
1424 jermar 791
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 792
        page_table_unlock(AS, false);
793
        mutex_unlock(&area->lock);
794
        mutex_unlock(&AS->lock);
795
        goto page_fault;
796
    }
703 jermar 797
 
1044 jermar 798
    page_table_unlock(AS, false);
1380 jermar 799
    mutex_unlock(&area->lock);
800
    mutex_unlock(&AS->lock);
1288 jermar 801
    return AS_PF_OK;
802
 
803
page_fault:
804
    if (THREAD->in_copy_from_uspace) {
805
        THREAD->in_copy_from_uspace = false;
1780 jermar 806
        istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 807
    } else if (THREAD->in_copy_to_uspace) {
808
        THREAD->in_copy_to_uspace = false;
1780 jermar 809
        istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 810
    } else {
811
        return AS_PF_FAULT;
812
    }
813
 
814
    return AS_PF_DEFER;
703 jermar 815
}
816
 
823 jermar 817
/** Switch address spaces.
703 jermar 818
 *
1380 jermar 819
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 820
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 821
 *
823 jermar 822
 * @param old Old address space or NULL.
823
 * @param new New address space.
703 jermar 824
 */
823 jermar 825
void as_switch(as_t *old, as_t *new)
703 jermar 826
{
827
    ipl_t ipl;
823 jermar 828
    bool needs_asid = false;
703 jermar 829
 
830
    ipl = interrupts_disable();
1415 jermar 831
    spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 832
 
833
    /*
823 jermar 834
     * First, take care of the old address space.
835
     */
836
    if (old) {
1380 jermar 837
        mutex_lock_active(&old->lock);
1415 jermar 838
        ASSERT(old->cpu_refcount);
839
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 840
            /*
841
             * The old address space is no longer active on
842
             * any processor. It can be appended to the
843
             * list of inactive address spaces with assigned
844
             * ASID.
845
             */
846
             ASSERT(old->asid != ASID_INVALID);
847
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
848
        }
1380 jermar 849
        mutex_unlock(&old->lock);
1890 jermar 850
 
851
        /*
852
         * Perform architecture-specific tasks when the address space
853
         * is being removed from the CPU.
854
         */
855
        as_deinstall_arch(old);
823 jermar 856
    }
857
 
858
    /*
859
     * Second, prepare the new address space.
860
     */
1380 jermar 861
    mutex_lock_active(&new->lock);
1415 jermar 862
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
823 jermar 863
        if (new->asid != ASID_INVALID)
864
            list_remove(&new->inactive_as_with_asid_link);
865
        else
866
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
867
    }
868
    SET_PTL0_ADDRESS(new->page_table);
1380 jermar 869
    mutex_unlock(&new->lock);
823 jermar 870
 
871
    if (needs_asid) {
872
        /*
873
         * Allocation of new ASID was deferred
874
         * until now in order to avoid deadlock.
875
         */
876
        asid_t asid;
877
 
878
        asid = asid_get();
1380 jermar 879
        mutex_lock_active(&new->lock);
823 jermar 880
        new->asid = asid;
1380 jermar 881
        mutex_unlock(&new->lock);
823 jermar 882
    }
1415 jermar 883
    spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 884
    interrupts_restore(ipl);
885
 
886
    /*
703 jermar 887
     * Perform architecture-specific steps.
727 jermar 888
     * (e.g. write ASID to hardware register etc.)
703 jermar 889
     */
823 jermar 890
    as_install_arch(new);
703 jermar 891
 
823 jermar 892
    AS = new;
703 jermar 893
}
754 jermar 894
 
1235 jermar 895
/** Convert address space area flags to page flags.
754 jermar 896
 *
1235 jermar 897
 * @param aflags Flags of some address space area.
754 jermar 898
 *
1235 jermar 899
 * @return Flags to be passed to page_mapping_insert().
754 jermar 900
 */
1235 jermar 901
int area_flags_to_page_flags(int aflags)
754 jermar 902
{
903
    int flags;
904
 
1178 jermar 905
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 906
 
1235 jermar 907
    if (aflags & AS_AREA_READ)
1026 jermar 908
        flags |= PAGE_READ;
909
 
1235 jermar 910
    if (aflags & AS_AREA_WRITE)
1026 jermar 911
        flags |= PAGE_WRITE;
912
 
1235 jermar 913
    if (aflags & AS_AREA_EXEC)
1026 jermar 914
        flags |= PAGE_EXEC;
915
 
1424 jermar 916
    if (aflags & AS_AREA_CACHEABLE)
1178 jermar 917
        flags |= PAGE_CACHEABLE;
918
 
754 jermar 919
    return flags;
920
}
756 jermar 921
 
1235 jermar 922
/** Compute flags for virtual address translation subsytem.
923
 *
924
 * The address space area must be locked.
925
 * Interrupts must be disabled.
926
 *
927
 * @param a Address space area.
928
 *
929
 * @return Flags to be used in page_mapping_insert().
930
 */
1409 jermar 931
int as_area_get_flags(as_area_t *a)
1235 jermar 932
{
933
    return area_flags_to_page_flags(a->flags);
934
}
935
 
756 jermar 936
/** Create page table.
937
 *
938
 * Depending on architecture, create either address space
939
 * private or global page table.
940
 *
941
 * @param flags Flags saying whether the page table is for kernel address space.
942
 *
943
 * @return First entry of the page table.
944
 */
945
pte_t *page_table_create(int flags)
946
{
947
        ASSERT(as_operations);
948
        ASSERT(as_operations->page_table_create);
949
 
950
        return as_operations->page_table_create(flags);
951
}
977 jermar 952
 
1468 jermar 953
/** Destroy page table.
954
 *
955
 * Destroy page table in architecture specific way.
956
 *
957
 * @param page_table Physical address of PTL0.
958
 */
959
void page_table_destroy(pte_t *page_table)
960
{
961
        ASSERT(as_operations);
962
        ASSERT(as_operations->page_table_destroy);
963
 
964
        as_operations->page_table_destroy(page_table);
965
}
966
 
1044 jermar 967
/** Lock page table.
968
 *
969
 * This function should be called before any page_mapping_insert(),
970
 * page_mapping_remove() and page_mapping_find().
971
 *
972
 * Locking order is such that address space areas must be locked
973
 * prior to this call. Address space can be locked prior to this
974
 * call in which case the lock argument is false.
975
 *
976
 * @param as Address space.
1248 jermar 977
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 978
 */
979
void page_table_lock(as_t *as, bool lock)
980
{
981
    ASSERT(as_operations);
982
    ASSERT(as_operations->page_table_lock);
983
 
984
    as_operations->page_table_lock(as, lock);
985
}
986
 
987
/** Unlock page table.
988
 *
989
 * @param as Address space.
1248 jermar 990
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 991
 */
992
void page_table_unlock(as_t *as, bool unlock)
993
{
994
    ASSERT(as_operations);
995
    ASSERT(as_operations->page_table_unlock);
996
 
997
    as_operations->page_table_unlock(as, unlock);
998
}
999
 
977 jermar 1000
 
1001
/** Find address space area and lock it.
1002
 *
1003
 * The address space must be locked and interrupts must be disabled.
1004
 *
1005
 * @param as Address space.
1006
 * @param va Virtual address.
1007
 *
1008
 * @return Locked address space area containing va on success or NULL on failure.
1009
 */
1780 jermar 1010
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1011
{
1012
    as_area_t *a;
1147 jermar 1013
    btree_node_t *leaf, *lnode;
1014
    int i;
977 jermar 1015
 
1147 jermar 1016
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1017
    if (a) {
1018
        /* va is the base address of an address space area */
1380 jermar 1019
        mutex_lock(&a->lock);
1147 jermar 1020
        return a;
1021
    }
1022
 
1023
    /*
1150 jermar 1024
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1025
     * to find out whether this is a miss or va belongs to an address
1026
     * space area found there.
1027
     */
1028
 
1029
    /* First, search the leaf node itself. */
1030
    for (i = 0; i < leaf->keys; i++) {
1031
        a = (as_area_t *) leaf->value[i];
1380 jermar 1032
        mutex_lock(&a->lock);
1147 jermar 1033
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1034
            return a;
1035
        }
1380 jermar 1036
        mutex_unlock(&a->lock);
1147 jermar 1037
    }
977 jermar 1038
 
1147 jermar 1039
    /*
1150 jermar 1040
     * Second, locate the left neighbour and test its last record.
1148 jermar 1041
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 1042
     */
1150 jermar 1043
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1044
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1045
        mutex_lock(&a->lock);
1147 jermar 1046
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1047
            return a;
1147 jermar 1048
        }
1380 jermar 1049
        mutex_unlock(&a->lock);
977 jermar 1050
    }
1051
 
1052
    return NULL;
1053
}
1048 jermar 1054
 
1055
/** Check area conflicts with other areas.
1056
 *
1057
 * The address space must be locked and interrupts must be disabled.
1058
 *
1059
 * @param as Address space.
1060
 * @param va Starting virtual address of the area being tested.
1061
 * @param size Size of the area being tested.
1062
 * @param avoid_area Do not touch this area.
1063
 *
1064
 * @return True if there is no conflict, false otherwise.
1065
 */
1780 jermar 1066
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1048 jermar 1067
{
1068
    as_area_t *a;
1147 jermar 1069
    btree_node_t *leaf, *node;
1070
    int i;
1048 jermar 1071
 
1070 jermar 1072
    /*
1073
     * We don't want any area to have conflicts with NULL page.
1074
     */
1075
    if (overlaps(va, size, NULL, PAGE_SIZE))
1076
        return false;
1077
 
1147 jermar 1078
    /*
1079
     * The leaf node is found in O(log n), where n is proportional to
1080
     * the number of address space areas belonging to as.
1081
     * The check for conflicts is then attempted on the rightmost
1150 jermar 1082
     * record in the left neighbour, the leftmost record in the right
1083
     * neighbour and all records in the leaf node itself.
1147 jermar 1084
     */
1048 jermar 1085
 
1147 jermar 1086
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1087
        if (a != avoid_area)
1088
            return false;
1089
    }
1090
 
1091
    /* First, check the two border cases. */
1150 jermar 1092
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1093
        a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1094
        mutex_lock(&a->lock);
1147 jermar 1095
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1096
            mutex_unlock(&a->lock);
1147 jermar 1097
            return false;
1098
        }
1380 jermar 1099
        mutex_unlock(&a->lock);
1147 jermar 1100
    }
1150 jermar 1101
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1102
        a = (as_area_t *) node->value[0];
1380 jermar 1103
        mutex_lock(&a->lock);
1147 jermar 1104
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1105
            mutex_unlock(&a->lock);
1147 jermar 1106
            return false;
1107
        }
1380 jermar 1108
        mutex_unlock(&a->lock);
1147 jermar 1109
    }
1110
 
1111
    /* Second, check the leaf node. */
1112
    for (i = 0; i < leaf->keys; i++) {
1113
        a = (as_area_t *) leaf->value[i];
1114
 
1048 jermar 1115
        if (a == avoid_area)
1116
            continue;
1147 jermar 1117
 
1380 jermar 1118
        mutex_lock(&a->lock);
1147 jermar 1119
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1120
            mutex_unlock(&a->lock);
1147 jermar 1121
            return false;
1122
        }
1380 jermar 1123
        mutex_unlock(&a->lock);
1048 jermar 1124
    }
1125
 
1070 jermar 1126
    /*
1127
     * So far, the area does not conflict with other areas.
1128
     * Check if it doesn't conflict with kernel address space.
1129
     */  
1130
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1131
        return !overlaps(va, size,
1132
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1133
    }
1134
 
1048 jermar 1135
    return true;
1136
}
1235 jermar 1137
 
1380 jermar 1138
/** Return size of the address space area with given base.  */
1780 jermar 1139
size_t as_get_size(uintptr_t base)
1329 palkovsky 1140
{
1141
    ipl_t ipl;
1142
    as_area_t *src_area;
1143
    size_t size;
1144
 
1145
    ipl = interrupts_disable();
1146
    src_area = find_area_and_lock(AS, base);
1147
    if (src_area){
1148
        size = src_area->pages * PAGE_SIZE;
1380 jermar 1149
        mutex_unlock(&src_area->lock);
1329 palkovsky 1150
    } else {
1151
        size = 0;
1152
    }
1153
    interrupts_restore(ipl);
1154
    return size;
1155
}
1156
 
1387 jermar 1157
/** Mark portion of address space area as used.
1158
 *
1159
 * The address space area must be already locked.
1160
 *
1161
 * @param a Address space area.
1162
 * @param page First page to be marked.
1163
 * @param count Number of page to be marked.
1164
 *
1165
 * @return 0 on failure and 1 on success.
1166
 */
1780 jermar 1167
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1168
{
1169
    btree_node_t *leaf, *node;
1170
    count_t pages;
1171
    int i;
1172
 
1173
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1174
    ASSERT(count);
1175
 
1176
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1177
    if (pages) {
1178
        /*
1179
         * We hit the beginning of some used space.
1180
         */
1181
        return 0;
1182
    }
1183
 
1437 jermar 1184
    if (!leaf->keys) {
1185
        btree_insert(&a->used_space, page, (void *) count, leaf);
1186
        return 1;
1187
    }
1188
 
1387 jermar 1189
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1190
    if (node) {
1780 jermar 1191
        uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1387 jermar 1192
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1193
 
1194
        /*
1195
         * Examine the possibility that the interval fits
1196
         * somewhere between the rightmost interval of
1197
         * the left neigbour and the first interval of the leaf.
1198
         */
1199
 
1200
        if (page >= right_pg) {
1201
            /* Do nothing. */
1202
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1203
            /* The interval intersects with the left interval. */
1204
            return 0;
1205
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1206
            /* The interval intersects with the right interval. */
1207
            return 0;          
1208
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1209
            /* The interval can be added by merging the two already present intervals. */
1403 jermar 1210
            node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1211
            btree_remove(&a->used_space, right_pg, leaf);
1212
            return 1;
1213
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1214
            /* The interval can be added by simply growing the left interval. */
1403 jermar 1215
            node->value[node->keys - 1] += count;
1387 jermar 1216
            return 1;
1217
        } else if (page + count*PAGE_SIZE == right_pg) {
1218
            /*
1219
             * The interval can be addded by simply moving base of the right
1220
             * interval down and increasing its size accordingly.
1221
             */
1403 jermar 1222
            leaf->value[0] += count;
1387 jermar 1223
            leaf->key[0] = page;
1224
            return 1;
1225
        } else {
1226
            /*
1227
             * The interval is between both neigbouring intervals,
1228
             * but cannot be merged with any of them.
1229
             */
1230
            btree_insert(&a->used_space, page, (void *) count, leaf);
1231
            return 1;
1232
        }
1233
    } else if (page < leaf->key[0]) {
1780 jermar 1234
        uintptr_t right_pg = leaf->key[0];
1387 jermar 1235
        count_t right_cnt = (count_t) leaf->value[0];
1236
 
1237
        /*
1238
         * Investigate the border case in which the left neighbour does not
1239
         * exist but the interval fits from the left.
1240
         */
1241
 
1242
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1243
            /* The interval intersects with the right interval. */
1244
            return 0;
1245
        } else if (page + count*PAGE_SIZE == right_pg) {
1246
            /*
1247
             * The interval can be added by moving the base of the right interval down
1248
             * and increasing its size accordingly.
1249
             */
1250
            leaf->key[0] = page;
1403 jermar 1251
            leaf->value[0] += count;
1387 jermar 1252
            return 1;
1253
        } else {
1254
            /*
1255
             * The interval doesn't adjoin with the right interval.
1256
             * It must be added individually.
1257
             */
1258
            btree_insert(&a->used_space, page, (void *) count, leaf);
1259
            return 1;
1260
        }
1261
    }
1262
 
1263
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1264
    if (node) {
1780 jermar 1265
        uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1387 jermar 1266
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1267
 
1268
        /*
1269
         * Examine the possibility that the interval fits
1270
         * somewhere between the leftmost interval of
1271
         * the right neigbour and the last interval of the leaf.
1272
         */
1273
 
1274
        if (page < left_pg) {
1275
            /* Do nothing. */
1276
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1277
            /* The interval intersects with the left interval. */
1278
            return 0;
1279
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1280
            /* The interval intersects with the right interval. */
1281
            return 0;          
1282
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1283
            /* The interval can be added by merging the two already present intervals. */
1403 jermar 1284
            leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1285
            btree_remove(&a->used_space, right_pg, node);
1286
            return 1;
1287
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1288
            /* The interval can be added by simply growing the left interval. */
1403 jermar 1289
            leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1290
            return 1;
1291
        } else if (page + count*PAGE_SIZE == right_pg) {
1292
            /*
1293
             * The interval can be addded by simply moving base of the right
1294
             * interval down and increasing its size accordingly.
1295
             */
1403 jermar 1296
            node->value[0] += count;
1387 jermar 1297
            node->key[0] = page;
1298
            return 1;
1299
        } else {
1300
            /*
1301
             * The interval is between both neigbouring intervals,
1302
             * but cannot be merged with any of them.
1303
             */
1304
            btree_insert(&a->used_space, page, (void *) count, leaf);
1305
            return 1;
1306
        }
1307
    } else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1308
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1309
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1310
 
1311
        /*
1312
         * Investigate the border case in which the right neighbour does not
1313
         * exist but the interval fits from the right.
1314
         */
1315
 
1316
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1317
            /* The interval intersects with the left interval. */
1387 jermar 1318
            return 0;
1319
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1320
            /* The interval can be added by growing the left interval. */
1403 jermar 1321
            leaf->value[leaf->keys - 1] += count;
1387 jermar 1322
            return 1;
1323
        } else {
1324
            /*
1325
             * The interval doesn't adjoin with the left interval.
1326
             * It must be added individually.
1327
             */
1328
            btree_insert(&a->used_space, page, (void *) count, leaf);
1329
            return 1;
1330
        }
1331
    }
1332
 
1333
    /*
1334
     * Note that if the algorithm made it thus far, the interval can fit only
1335
     * between two other intervals of the leaf. The two border cases were already
1336
     * resolved.
1337
     */
1338
    for (i = 1; i < leaf->keys; i++) {
1339
        if (page < leaf->key[i]) {
1780 jermar 1340
            uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1387 jermar 1341
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1342
 
1343
            /*
1344
             * The interval fits between left_pg and right_pg.
1345
             */
1346
 
1347
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1348
                /* The interval intersects with the left interval. */
1349
                return 0;
1350
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1351
                /* The interval intersects with the right interval. */
1352
                return 0;          
1353
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1354
                /* The interval can be added by merging the two already present intervals. */
1403 jermar 1355
                leaf->value[i - 1] += count + right_cnt;
1387 jermar 1356
                btree_remove(&a->used_space, right_pg, leaf);
1357
                return 1;
1358
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1359
                /* The interval can be added by simply growing the left interval. */
1403 jermar 1360
                leaf->value[i - 1] += count;
1387 jermar 1361
                return 1;
1362
            } else if (page + count*PAGE_SIZE == right_pg) {
1363
                /*
1364
                     * The interval can be addded by simply moving base of the right
1365
                 * interval down and increasing its size accordingly.
1366
                 */
1403 jermar 1367
                leaf->value[i] += count;
1387 jermar 1368
                leaf->key[i] = page;
1369
                return 1;
1370
            } else {
1371
                /*
1372
                 * The interval is between both neigbouring intervals,
1373
                 * but cannot be merged with any of them.
1374
                 */
1375
                btree_insert(&a->used_space, page, (void *) count, leaf);
1376
                return 1;
1377
            }
1378
        }
1379
    }
1380
 
1735 decky 1381
    panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1387 jermar 1382
}
1383
 
1384
/** Mark portion of address space area as unused.
1385
 *
1386
 * The address space area must be already locked.
1387
 *
1388
 * @param a Address space area.
1389
 * @param page First page to be marked.
1390
 * @param count Number of page to be marked.
1391
 *
1392
 * @return 0 on failure and 1 on success.
1393
 */
1780 jermar 1394
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1395
{
1396
    btree_node_t *leaf, *node;
1397
    count_t pages;
1398
    int i;
1399
 
1400
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1401
    ASSERT(count);
1402
 
1403
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1404
    if (pages) {
1405
        /*
1406
         * We are lucky, page is the beginning of some interval.
1407
         */
1408
        if (count > pages) {
1409
            return 0;
1410
        } else if (count == pages) {
1411
            btree_remove(&a->used_space, page, leaf);
1403 jermar 1412
            return 1;
1387 jermar 1413
        } else {
1414
            /*
1415
             * Find the respective interval.
1416
             * Decrease its size and relocate its start address.
1417
             */
1418
            for (i = 0; i < leaf->keys; i++) {
1419
                if (leaf->key[i] == page) {
1420
                    leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1421
                    leaf->value[i] -= count;
1387 jermar 1422
                    return 1;
1423
                }
1424
            }
1425
            goto error;
1426
        }
1427
    }
1428
 
1429
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1430
    if (node && page < leaf->key[0]) {
1780 jermar 1431
        uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1432
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1433
 
1434
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1435
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1436
                /*
1437
                 * The interval is contained in the rightmost interval
1438
                 * of the left neighbour and can be removed by
1439
                 * updating the size of the bigger interval.
1440
                 */
1403 jermar 1441
                node->value[node->keys - 1] -= count;
1387 jermar 1442
                return 1;
1443
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1444
                count_t new_cnt;
1387 jermar 1445
 
1446
                /*
1447
                 * The interval is contained in the rightmost interval
1448
                 * of the left neighbour but its removal requires
1449
                 * both updating the size of the original interval and
1450
                 * also inserting a new interval.
1451
                 */
1403 jermar 1452
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1453
                node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1454
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1455
                return 1;
1456
            }
1457
        }
1458
        return 0;
1459
    } else if (page < leaf->key[0]) {
1460
        return 0;
1461
    }
1462
 
1463
    if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1464
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1465
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1466
 
1467
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1468
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1469
                /*
1470
                 * The interval is contained in the rightmost interval
1471
                 * of the leaf and can be removed by updating the size
1472
                 * of the bigger interval.
1473
                 */
1403 jermar 1474
                leaf->value[leaf->keys - 1] -= count;
1387 jermar 1475
                return 1;
1476
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1477
                count_t new_cnt;
1387 jermar 1478
 
1479
                /*
1480
                 * The interval is contained in the rightmost interval
1481
                 * of the leaf but its removal requires both updating
1482
                 * the size of the original interval and
1483
                 * also inserting a new interval.
1484
                 */
1403 jermar 1485
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1486
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1487
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1488
                return 1;
1489
            }
1490
        }
1491
        return 0;
1492
    }  
1493
 
1494
    /*
1495
     * The border cases have been already resolved.
1496
     * Now the interval can be only between intervals of the leaf.
1497
     */
1498
    for (i = 1; i < leaf->keys - 1; i++) {
1499
        if (page < leaf->key[i]) {
1780 jermar 1500
            uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1501
            count_t left_cnt = (count_t) leaf->value[i - 1];
1502
 
1503
            /*
1504
             * Now the interval is between intervals corresponding to (i - 1) and i.
1505
             */
1506
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1507
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1508
                    /*
1509
                    * The interval is contained in the interval (i - 1)
1510
                     * of the leaf and can be removed by updating the size
1511
                     * of the bigger interval.
1512
                     */
1403 jermar 1513
                    leaf->value[i - 1] -= count;
1387 jermar 1514
                    return 1;
1515
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1516
                    count_t new_cnt;
1387 jermar 1517
 
1518
                    /*
1519
                     * The interval is contained in the interval (i - 1)
1520
                     * of the leaf but its removal requires both updating
1521
                     * the size of the original interval and
1522
                     * also inserting a new interval.
1523
                     */
1403 jermar 1524
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1525
                    leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1526
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1527
                    return 1;
1528
                }
1529
            }
1530
            return 0;
1531
        }
1532
    }
1533
 
1534
error:
1735 decky 1535
    panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1387 jermar 1536
}
1537
 
1409 jermar 1538
/** Remove reference to address space area share info.
1539
 *
1540
 * If the reference count drops to 0, the sh_info is deallocated.
1541
 *
1542
 * @param sh_info Pointer to address space area share info.
1543
 */
1544
void sh_info_remove_reference(share_info_t *sh_info)
1545
{
1546
    bool dealloc = false;
1547
 
1548
    mutex_lock(&sh_info->lock);
1549
    ASSERT(sh_info->refcount);
1550
    if (--sh_info->refcount == 0) {
1551
        dealloc = true;
1495 jermar 1552
        link_t *cur;
1409 jermar 1553
 
1554
        /*
1555
         * Now walk carefully the pagemap B+tree and free/remove
1556
         * reference from all frames found there.
1557
         */
1495 jermar 1558
        for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1559
            btree_node_t *node;
1495 jermar 1560
            int i;
1409 jermar 1561
 
1495 jermar 1562
            node = list_get_instance(cur, btree_node_t, leaf_link);
1563
            for (i = 0; i < node->keys; i++)
1780 jermar 1564
                frame_free((uintptr_t) node->value[i]);
1409 jermar 1565
        }
1566
 
1567
    }
1568
    mutex_unlock(&sh_info->lock);
1569
 
1570
    if (dealloc) {
1571
        btree_destroy(&sh_info->pagemap);
1572
        free(sh_info);
1573
    }
1574
}
1575
 
1235 jermar 1576
/*
1577
 * Address space related syscalls.
1578
 */
1579
 
1580
/** Wrapper for as_area_create(). */
1780 jermar 1581
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1582
{
1424 jermar 1583
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1584
        return (unative_t) address;
1235 jermar 1585
    else
1780 jermar 1586
        return (unative_t) -1;
1235 jermar 1587
}
1588
 
1793 jermar 1589
/** Wrapper for as_area_resize(). */
1780 jermar 1590
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1591
{
1780 jermar 1592
    return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1593
}
1594
 
1793 jermar 1595
/** Wrapper for as_area_destroy(). */
1780 jermar 1596
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1597
{
1780 jermar 1598
    return (unative_t) as_area_destroy(AS, address);
1306 jermar 1599
}
1702 cejka 1600
 
1914 jermar 1601
/** Print out information about address space.
1602
 *
1603
 * @param as Address space.
1604
 */
1605
void as_print(as_t *as)
1606
{
1607
    ipl_t ipl;
1608
 
1609
    ipl = interrupts_disable();
1610
    mutex_lock(&as->lock);
1611
 
1612
    /* print out info about address space areas */
1613
    link_t *cur;
1614
    for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1915 jermar 1615
        btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1914 jermar 1616
 
1617
        int i;
1618
        for (i = 0; i < node->keys; i++) {
1915 jermar 1619
            as_area_t *area = node->value[i];
1914 jermar 1620
 
1621
            mutex_lock(&area->lock);
1622
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1623
                area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1624
            mutex_unlock(&area->lock);
1625
        }
1626
    }
1627
 
1628
    mutex_unlock(&as->lock);
1629
    interrupts_restore(ipl);
1630
}
1631
 
1757 jermar 1632
/** @}
1702 cejka 1633
 */