Subversion Repositories HelenOS-historic

Rev

Rev 1413 | Rev 1417 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file    as.c
31
 * @brief   Address space related functions.
32
 *
703 jermar 33
 * This file contains address space manipulation functions.
34
 * Roughly speaking, this is a higher-level client of
35
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 36
 *
37
 * Functionality provided by this file allows one to
38
 * create address space and create, resize and share
39
 * address space areas.
40
 *
41
 * @see page.c
42
 *
703 jermar 43
 */
44
 
45
#include <mm/as.h>
756 jermar 46
#include <arch/mm/as.h>
703 jermar 47
#include <mm/page.h>
48
#include <mm/frame.h>
814 palkovsky 49
#include <mm/slab.h>
703 jermar 50
#include <mm/tlb.h>
51
#include <arch/mm/page.h>
52
#include <genarch/mm/page_pt.h>
1108 jermar 53
#include <genarch/mm/page_ht.h>
727 jermar 54
#include <mm/asid.h>
703 jermar 55
#include <arch/mm/asid.h>
56
#include <synch/spinlock.h>
1380 jermar 57
#include <synch/mutex.h>
788 jermar 58
#include <adt/list.h>
1147 jermar 59
#include <adt/btree.h>
1235 jermar 60
#include <proc/task.h>
1288 jermar 61
#include <proc/thread.h>
1235 jermar 62
#include <arch/asm.h>
703 jermar 63
#include <panic.h>
64
#include <debug.h>
1235 jermar 65
#include <print.h>
703 jermar 66
#include <memstr.h>
1070 jermar 67
#include <macros.h>
703 jermar 68
#include <arch.h>
1235 jermar 69
#include <errno.h>
70
#include <config.h>
1387 jermar 71
#include <align.h>
1235 jermar 72
#include <arch/types.h>
73
#include <typedefs.h>
1288 jermar 74
#include <syscall/copy.h>
75
#include <arch/interrupt.h>
703 jermar 76
 
1409 jermar 77
/** This structure contains information associated with the shared address space area. */
78
struct share_info {
79
    mutex_t lock;       /**< This lock must be acquired only when the as_area lock is held. */
80
    count_t refcount;   /**< This structure can be deallocated if refcount drops to 0. */
81
    btree_t pagemap;    /**< B+tree containing complete map of anonymous pages of the shared area. */
82
};
83
 
756 jermar 84
as_operations_t *as_operations = NULL;
703 jermar 85
 
1415 jermar 86
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
87
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 88
 
89
/**
90
 * This list contains address spaces that are not active on any
91
 * processor and that have valid ASID.
92
 */
93
LIST_INITIALIZE(inactive_as_with_asid_head);
94
 
757 jermar 95
/** Kernel address space. */
96
as_t *AS_KERNEL = NULL;
97
 
1235 jermar 98
static int area_flags_to_page_flags(int aflags);
977 jermar 99
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 100
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
1409 jermar 101
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 102
 
756 jermar 103
/** Initialize address space subsystem. */
104
void as_init(void)
105
{
106
    as_arch_init();
789 palkovsky 107
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 108
    if (!AS_KERNEL)
109
        panic("can't create kernel address space\n");
110
 
756 jermar 111
}
112
 
757 jermar 113
/** Create address space.
114
 *
115
 * @param flags Flags that influence way in wich the address space is created.
116
 */
756 jermar 117
as_t *as_create(int flags)
703 jermar 118
{
119
    as_t *as;
120
 
822 palkovsky 121
    as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 122
    link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 123
    mutex_initialize(&as->lock);
1147 jermar 124
    btree_create(&as->as_area_btree);
822 palkovsky 125
 
126
    if (flags & FLAG_AS_KERNEL)
127
        as->asid = ASID_KERNEL;
128
    else
129
        as->asid = ASID_INVALID;
130
 
1415 jermar 131
    as->cpu_refcount = 0;
822 palkovsky 132
    as->page_table = page_table_create(flags);
703 jermar 133
 
134
    return as;
135
}
136
 
973 palkovsky 137
/** Free Adress space */
138
void as_free(as_t *as)
139
{
1415 jermar 140
    ASSERT(as->cpu_refcount == 0);
973 palkovsky 141
 
142
    /* TODO: free as_areas and other resources held by as */
143
    /* TODO: free page table */
144
    free(as);
145
}
146
 
703 jermar 147
/** Create address space area of common attributes.
148
 *
149
 * The created address space area is added to the target address space.
150
 *
151
 * @param as Target address space.
1239 jermar 152
 * @param flags Flags of the area memory.
1048 jermar 153
 * @param size Size of area.
703 jermar 154
 * @param base Base address of area.
1239 jermar 155
 * @param attrs Attributes of the area.
1409 jermar 156
 * @param backend Address space area backend. NULL if no backend is used.
157
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 158
 *
159
 * @return Address space area on success or NULL on failure.
160
 */
1409 jermar 161
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
162
           mem_backend_t *backend, void **backend_data)
703 jermar 163
{
164
    ipl_t ipl;
165
    as_area_t *a;
166
 
167
    if (base % PAGE_SIZE)
1048 jermar 168
        return NULL;
169
 
1233 jermar 170
    if (!size)
171
        return NULL;
172
 
1048 jermar 173
    /* Writeable executable areas are not supported. */
174
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
175
        return NULL;
703 jermar 176
 
177
    ipl = interrupts_disable();
1380 jermar 178
    mutex_lock(&as->lock);
703 jermar 179
 
1048 jermar 180
    if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 181
        mutex_unlock(&as->lock);
1048 jermar 182
        interrupts_restore(ipl);
183
        return NULL;
184
    }
703 jermar 185
 
822 palkovsky 186
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 187
 
1380 jermar 188
    mutex_initialize(&a->lock);
822 palkovsky 189
 
1026 jermar 190
    a->flags = flags;
1239 jermar 191
    a->attributes = attrs;
1048 jermar 192
    a->pages = SIZE2FRAMES(size);
822 palkovsky 193
    a->base = base;
1409 jermar 194
    a->sh_info = NULL;
195
    a->backend = backend;
196
    if (backend_data) {
197
        a->backend_data[0] = backend_data[0];
198
        a->backend_data[1] = backend_data[1];
199
    }
1387 jermar 200
    btree_create(&a->used_space);
822 palkovsky 201
 
1147 jermar 202
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 203
 
1380 jermar 204
    mutex_unlock(&as->lock);
703 jermar 205
    interrupts_restore(ipl);
704 jermar 206
 
703 jermar 207
    return a;
208
}
209
 
1235 jermar 210
/** Find address space area and change it.
211
 *
212
 * @param as Address space.
213
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
214
 * @param size New size of the virtual memory block starting at address.
215
 * @param flags Flags influencing the remap operation. Currently unused.
216
 *
1306 jermar 217
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 218
 */
1306 jermar 219
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 220
{
1306 jermar 221
    as_area_t *area;
1235 jermar 222
    ipl_t ipl;
223
    size_t pages;
224
 
225
    ipl = interrupts_disable();
1380 jermar 226
    mutex_lock(&as->lock);
1235 jermar 227
 
228
    /*
229
     * Locate the area.
230
     */
231
    area = find_area_and_lock(as, address);
232
    if (!area) {
1380 jermar 233
        mutex_unlock(&as->lock);
1235 jermar 234
        interrupts_restore(ipl);
1306 jermar 235
        return ENOENT;
1235 jermar 236
    }
237
 
238
    if (area->flags & AS_AREA_DEVICE) {
239
        /*
240
         * Remapping of address space areas associated
241
         * with memory mapped devices is not supported.
242
         */
1380 jermar 243
        mutex_unlock(&area->lock);
244
        mutex_unlock(&as->lock);
1235 jermar 245
        interrupts_restore(ipl);
1306 jermar 246
        return ENOTSUP;
1235 jermar 247
    }
1409 jermar 248
    if (area->sh_info) {
249
        /*
250
         * Remapping of shared address space areas
251
         * is not supported.
252
         */
253
        mutex_unlock(&area->lock);
254
        mutex_unlock(&as->lock);
255
        interrupts_restore(ipl);
256
        return ENOTSUP;
257
    }
1235 jermar 258
 
259
    pages = SIZE2FRAMES((address - area->base) + size);
260
    if (!pages) {
261
        /*
262
         * Zero size address space areas are not allowed.
263
         */
1380 jermar 264
        mutex_unlock(&area->lock);
265
        mutex_unlock(&as->lock);
1235 jermar 266
        interrupts_restore(ipl);
1306 jermar 267
        return EPERM;
1235 jermar 268
    }
269
 
270
    if (pages < area->pages) {
1403 jermar 271
        bool cond;
272
        __address start_free = area->base + pages*PAGE_SIZE;
1235 jermar 273
 
274
        /*
275
         * Shrinking the area.
276
         * No need to check for overlaps.
277
         */
1403 jermar 278
 
279
        /*
280
         * Remove frames belonging to used space starting from
281
         * the highest addresses downwards until an overlap with
282
         * the resized address space area is found. Note that this
283
         * is also the right way to remove part of the used_space
284
         * B+tree leaf list.
285
         */    
286
        for (cond = true; cond;) {
287
            btree_node_t *node;
288
 
289
            ASSERT(!list_empty(&area->used_space.leaf_head));
290
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
291
            if ((cond = (bool) node->keys)) {
292
                __address b = node->key[node->keys - 1];
293
                count_t c = (count_t) node->value[node->keys - 1];
294
                int i = 0;
1235 jermar 295
 
1403 jermar 296
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
297
 
298
                    if (b + c*PAGE_SIZE <= start_free) {
299
                        /*
300
                         * The whole interval fits completely
301
                         * in the resized address space area.
302
                         */
303
                        break;
304
                    }
305
 
306
                    /*
307
                     * Part of the interval corresponding to b and c
308
                     * overlaps with the resized address space area.
309
                     */
310
 
311
                    cond = false;   /* we are almost done */
312
                    i = (start_free - b) >> PAGE_WIDTH;
313
                    if (!used_space_remove(area, start_free, c - i))
314
                        panic("Could not remove used space.");
315
                } else {
316
                    /*
317
                     * The interval of used space can be completely removed.
318
                     */
319
                    if (!used_space_remove(area, b, c))
320
                        panic("Could not remove used space.\n");
321
                }
322
 
323
                for (; i < c; i++) {
324
                    pte_t *pte;
325
 
326
                    page_table_lock(as, false);
327
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
328
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1409 jermar 329
                    if (area->backend && area->backend->backend_frame_free) {
330
                        area->backend->backend_frame_free(area,
331
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
332
                    }
1403 jermar 333
                    page_mapping_remove(as, b + i*PAGE_SIZE);
334
                    page_table_unlock(as, false);
335
                }
1235 jermar 336
            }
337
        }
338
        /*
339
         * Invalidate TLB's.
340
         */
341
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
342
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
343
        tlb_shootdown_finalize();
344
    } else {
345
        /*
346
         * Growing the area.
347
         * Check for overlaps with other address space areas.
348
         */
349
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 350
            mutex_unlock(&area->lock);
351
            mutex_unlock(&as->lock);       
1235 jermar 352
            interrupts_restore(ipl);
1306 jermar 353
            return EADDRNOTAVAIL;
1235 jermar 354
        }
355
    }
356
 
357
    area->pages = pages;
358
 
1380 jermar 359
    mutex_unlock(&area->lock);
360
    mutex_unlock(&as->lock);
1235 jermar 361
    interrupts_restore(ipl);
362
 
1306 jermar 363
    return 0;
1235 jermar 364
}
365
 
1306 jermar 366
/** Destroy address space area.
367
 *
368
 * @param as Address space.
369
 * @param address Address withing the area to be deleted.
370
 *
371
 * @return Zero on success or a value from @ref errno.h on failure.
372
 */
373
int as_area_destroy(as_t *as, __address address)
374
{
375
    as_area_t *area;
376
    __address base;
377
    ipl_t ipl;
1411 jermar 378
    bool cond;
1306 jermar 379
 
380
    ipl = interrupts_disable();
1380 jermar 381
    mutex_lock(&as->lock);
1306 jermar 382
 
383
    area = find_area_and_lock(as, address);
384
    if (!area) {
1380 jermar 385
        mutex_unlock(&as->lock);
1306 jermar 386
        interrupts_restore(ipl);
387
        return ENOENT;
388
    }
389
 
1403 jermar 390
    base = area->base;
391
 
1411 jermar 392
    /*
393
     * Visit only the pages mapped by used_space B+tree.
394
     * Note that we must be very careful when walking the tree
395
     * leaf list and removing used space as the leaf list changes
396
     * unpredictibly after each remove. The solution is to actually
397
     * not walk the tree at all, but to remove items from the head
398
     * of the leaf list until there are some keys left.
399
     */
400
    for (cond = true; cond;) {
401
        btree_node_t *node;
1403 jermar 402
 
1411 jermar 403
        ASSERT(!list_empty(&area->used_space.leaf_head));
404
        node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
405
        if ((cond = (bool) node->keys)) {
406
            __address b = node->key[0];
407
            count_t i;
408
            pte_t *pte;
1403 jermar 409
 
1411 jermar 410
            for (i = 0; i < (count_t) node->value[0]; i++) {
411
                page_table_lock(as, false);
412
                pte = page_mapping_find(as, b + i*PAGE_SIZE);
413
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
414
                if (area->backend && area->backend->backend_frame_free) {
415
                    area->backend->backend_frame_free(area,
416
                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 417
                }
1411 jermar 418
                page_mapping_remove(as, b + i*PAGE_SIZE);
419
                page_table_unlock(as, false);
1306 jermar 420
            }
1411 jermar 421
            if (!used_space_remove(area, b, i))
422
                panic("Could not remove used space.\n");
1306 jermar 423
        }
424
    }
1403 jermar 425
    btree_destroy(&area->used_space);
426
 
1306 jermar 427
    /*
428
     * Invalidate TLB's.
429
     */
430
    tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
431
    tlb_invalidate_pages(AS->asid, area->base, area->pages);
432
    tlb_shootdown_finalize();
433
 
1309 jermar 434
    area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 435
 
436
    if (area->sh_info)
437
        sh_info_remove_reference(area->sh_info);
438
 
1380 jermar 439
    mutex_unlock(&area->lock);
1306 jermar 440
 
441
    /*
442
     * Remove the empty area from address space.
443
     */
444
    btree_remove(&AS->as_area_btree, base, NULL);
445
 
1309 jermar 446
    free(area);
447
 
1380 jermar 448
    mutex_unlock(&AS->lock);
1306 jermar 449
    interrupts_restore(ipl);
450
    return 0;
451
}
452
 
1413 jermar 453
/** Share address space area with another or the same address space.
1235 jermar 454
 *
1413 jermar 455
 * Address space area of anonymous memory is shared with a new address
456
 * space area. If the source address space area has not been shared so
457
 * far, a new sh_info is created and the original mapping is duplicated
458
 * in its pagemap B+tree. The new address space are simply gets the
459
 * sh_info of the source area.
460
 *
461
 * @param src_as Pointer to source address space
1239 jermar 462
 * @param src_base Base address of the source address space area.
1329 palkovsky 463
 * @param acc_size Expected size of the source area
464
 * @param dst_base Target base address
1235 jermar 465
 *
1306 jermar 466
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 467
 *     if there is no such address space area,
468
 *     EPERM if there was a problem in accepting the area or
469
 *     ENOMEM if there was a problem in allocating destination
1413 jermar 470
 *     address space area. ENOTSUP is returned if an attempt
471
 *     to share non-anonymous address space area is detected.
1235 jermar 472
 */
1413 jermar 473
int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
1329 palkovsky 474
          __address dst_base)
1235 jermar 475
{
476
    ipl_t ipl;
1239 jermar 477
    int src_flags;
478
    size_t src_size;
479
    as_area_t *src_area, *dst_area;
1413 jermar 480
    share_info_t *sh_info;
481
    link_t *cur;
1329 palkovsky 482
 
1235 jermar 483
    ipl = interrupts_disable();
1380 jermar 484
    mutex_lock(&src_as->lock);
1329 palkovsky 485
    src_area = find_area_and_lock(src_as, src_base);
1239 jermar 486
    if (!src_area) {
1238 jermar 487
        /*
488
         * Could not find the source address space area.
489
         */
1380 jermar 490
        mutex_unlock(&src_as->lock);
1238 jermar 491
        interrupts_restore(ipl);
492
        return ENOENT;
493
    }
1413 jermar 494
 
495
    if (!src_area->backend || src_area->backend != &anon_backend) {
496
        /*
497
         * As of now, only anonymous address space areas can be shared.
498
         */
499
        mutex_unlock(&src_area->lock);
500
        mutex_unlock(&src_as->lock);
501
        interrupts_restore(ipl);
502
        return ENOTSUP;
503
    }
504
 
1239 jermar 505
    src_size = src_area->pages * PAGE_SIZE;
506
    src_flags = src_area->flags;
1413 jermar 507
 
1329 palkovsky 508
    if (src_size != acc_size) {
1413 jermar 509
        mutex_unlock(&src_area->lock);
510
        mutex_unlock(&src_as->lock);
1235 jermar 511
        interrupts_restore(ipl);
512
        return EPERM;
513
    }
1413 jermar 514
 
1235 jermar 515
    /*
1413 jermar 516
     * Now we are committed to sharing the area.
517
     * First prepare the area for sharing.
518
     * Then it will be safe to unlock it.
519
     */
520
    sh_info = src_area->sh_info;
521
    if (!sh_info) {
522
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
523
        mutex_initialize(&sh_info->lock);
524
        sh_info->refcount = 2;
525
        btree_create(&sh_info->pagemap);
526
        src_area->sh_info = sh_info;
527
    } else {
528
        mutex_lock(&sh_info->lock);
529
        sh_info->refcount++;
530
        mutex_unlock(&sh_info->lock);
531
    }
532
 
533
    /*
534
     * Copy used portions of the area to sh_info's page map.
535
     */
536
    mutex_lock(&sh_info->lock);
537
    for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) {
538
        btree_node_t *node;
539
        int i;
540
 
541
        node = list_get_instance(cur, btree_node_t, leaf_link);
542
        for (i = 0; i < node->keys; i++) {
543
            __address base = node->key[i];
544
            count_t count = (count_t) node->value[i];
545
            int j;
546
 
547
            for (j = 0; j < count; j++) {
548
                pte_t *pte;
549
 
550
                page_table_lock(src_as, false);
551
                pte = page_mapping_find(src_as, base + j*PAGE_SIZE);
552
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
553
                btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base,
554
                    (void *) PTE_GET_FRAME(pte), NULL);
555
                page_table_unlock(src_as, false);
556
            }
557
 
558
        }
559
    }
560
    mutex_unlock(&sh_info->lock);
561
 
562
    mutex_unlock(&src_area->lock);
563
    mutex_unlock(&src_as->lock);
564
 
565
    /*
1239 jermar 566
     * Create copy of the source address space area.
567
     * The destination area is created with AS_AREA_ATTR_PARTIAL
568
     * attribute set which prevents race condition with
569
     * preliminary as_page_fault() calls.
1235 jermar 570
     */
1409 jermar 571
    dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
1239 jermar 572
    if (!dst_area) {
1235 jermar 573
        /*
574
         * Destination address space area could not be created.
575
         */
1413 jermar 576
        sh_info_remove_reference(sh_info);
577
 
1235 jermar 578
        interrupts_restore(ipl);
579
        return ENOMEM;
580
    }
581
 
582
    /*
1239 jermar 583
     * Now the destination address space area has been
584
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 585
     * attribute and set the sh_info.
1239 jermar 586
     */
1380 jermar 587
    mutex_lock(&dst_area->lock);
1239 jermar 588
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 589
    dst_area->sh_info = sh_info;
1380 jermar 590
    mutex_unlock(&dst_area->lock);
1235 jermar 591
 
592
    interrupts_restore(ipl);
593
 
594
    return 0;
595
}
596
 
754 jermar 597
/** Initialize mapping for one page of address space.
703 jermar 598
 *
754 jermar 599
 * This functions maps 'page' to 'frame' according
600
 * to attributes of the address space area to
601
 * wich 'page' belongs.
703 jermar 602
 *
840 jermar 603
 * @param as Target address space.
754 jermar 604
 * @param page Virtual page within the area.
605
 * @param frame Physical frame to which page will be mapped.
703 jermar 606
 */
754 jermar 607
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 608
{
977 jermar 609
    as_area_t *area;
703 jermar 610
    ipl_t ipl;
611
 
612
    ipl = interrupts_disable();
1044 jermar 613
    page_table_lock(as, true);
703 jermar 614
 
977 jermar 615
    area = find_area_and_lock(as, page);
754 jermar 616
    if (!area) {
1403 jermar 617
        panic("Page not part of any as_area.\n");
754 jermar 618
    }
619
 
1409 jermar 620
    ASSERT(!area->backend);
621
 
622
    page_mapping_insert(as, page, frame, as_area_get_flags(area));
1403 jermar 623
    if (!used_space_insert(area, page, 1))
624
        panic("Could not insert used space.\n");
754 jermar 625
 
1380 jermar 626
    mutex_unlock(&area->lock);
1044 jermar 627
    page_table_unlock(as, true);
703 jermar 628
    interrupts_restore(ipl);
629
}
630
 
631
/** Handle page fault within the current address space.
632
 *
1409 jermar 633
 * This is the high-level page fault handler. It decides
634
 * whether the page fault can be resolved by any backend
635
 * and if so, it invokes the backend to resolve the page
636
 * fault.
637
 *
703 jermar 638
 * Interrupts are assumed disabled.
639
 *
640
 * @param page Faulting page.
1411 jermar 641
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 642
 * @param istate Pointer to interrupted state.
703 jermar 643
 *
1409 jermar 644
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
645
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 646
 */
1411 jermar 647
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
703 jermar 648
{
1044 jermar 649
    pte_t *pte;
977 jermar 650
    as_area_t *area;
703 jermar 651
 
1380 jermar 652
    if (!THREAD)
1409 jermar 653
        return AS_PF_FAULT;
1380 jermar 654
 
703 jermar 655
    ASSERT(AS);
1044 jermar 656
 
1380 jermar 657
    mutex_lock(&AS->lock);
977 jermar 658
    area = find_area_and_lock(AS, page);   
703 jermar 659
    if (!area) {
660
        /*
661
         * No area contained mapping for 'page'.
662
         * Signal page fault to low-level handler.
663
         */
1380 jermar 664
        mutex_unlock(&AS->lock);
1288 jermar 665
        goto page_fault;
703 jermar 666
    }
667
 
1239 jermar 668
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
669
        /*
670
         * The address space area is not fully initialized.
671
         * Avoid possible race by returning error.
672
         */
1380 jermar 673
        mutex_unlock(&area->lock);
674
        mutex_unlock(&AS->lock);
1288 jermar 675
        goto page_fault;       
1239 jermar 676
    }
677
 
1409 jermar 678
    if (!area->backend || !area->backend->backend_page_fault) {
679
        /*
680
         * The address space area is not backed by any backend
681
         * or the backend cannot handle page faults.
682
         */
683
        mutex_unlock(&area->lock);
684
        mutex_unlock(&AS->lock);
685
        goto page_fault;       
686
    }
1179 jermar 687
 
1044 jermar 688
    page_table_lock(AS, false);
689
 
703 jermar 690
    /*
1044 jermar 691
     * To avoid race condition between two page faults
692
     * on the same address, we need to make sure
693
     * the mapping has not been already inserted.
694
     */
695
    if ((pte = page_mapping_find(AS, page))) {
696
        if (PTE_PRESENT(pte)) {
697
            page_table_unlock(AS, false);
1380 jermar 698
            mutex_unlock(&area->lock);
699
            mutex_unlock(&AS->lock);
1409 jermar 700
            return AS_PF_OK;
1044 jermar 701
        }
702
    }
1409 jermar 703
 
1044 jermar 704
    /*
1409 jermar 705
     * Resort to the backend page fault handler.
703 jermar 706
     */
1411 jermar 707
    if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 708
        page_table_unlock(AS, false);
709
        mutex_unlock(&area->lock);
710
        mutex_unlock(&AS->lock);
711
        goto page_fault;
712
    }
703 jermar 713
 
1044 jermar 714
    page_table_unlock(AS, false);
1380 jermar 715
    mutex_unlock(&area->lock);
716
    mutex_unlock(&AS->lock);
1288 jermar 717
    return AS_PF_OK;
718
 
719
page_fault:
720
    if (THREAD->in_copy_from_uspace) {
721
        THREAD->in_copy_from_uspace = false;
722
        istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
723
    } else if (THREAD->in_copy_to_uspace) {
724
        THREAD->in_copy_to_uspace = false;
725
        istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
726
    } else {
727
        return AS_PF_FAULT;
728
    }
729
 
730
    return AS_PF_DEFER;
703 jermar 731
}
732
 
823 jermar 733
/** Switch address spaces.
703 jermar 734
 *
1380 jermar 735
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 736
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 737
 *
823 jermar 738
 * @param old Old address space or NULL.
739
 * @param new New address space.
703 jermar 740
 */
823 jermar 741
void as_switch(as_t *old, as_t *new)
703 jermar 742
{
743
    ipl_t ipl;
823 jermar 744
    bool needs_asid = false;
703 jermar 745
 
746
    ipl = interrupts_disable();
1415 jermar 747
    spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 748
 
749
    /*
823 jermar 750
     * First, take care of the old address space.
751
     */
752
    if (old) {
1380 jermar 753
        mutex_lock_active(&old->lock);
1415 jermar 754
        ASSERT(old->cpu_refcount);
755
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 756
            /*
757
             * The old address space is no longer active on
758
             * any processor. It can be appended to the
759
             * list of inactive address spaces with assigned
760
             * ASID.
761
             */
762
             ASSERT(old->asid != ASID_INVALID);
763
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
764
        }
1380 jermar 765
        mutex_unlock(&old->lock);
823 jermar 766
    }
767
 
768
    /*
769
     * Second, prepare the new address space.
770
     */
1380 jermar 771
    mutex_lock_active(&new->lock);
1415 jermar 772
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
823 jermar 773
        if (new->asid != ASID_INVALID)
774
            list_remove(&new->inactive_as_with_asid_link);
775
        else
776
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
777
    }
778
    SET_PTL0_ADDRESS(new->page_table);
1380 jermar 779
    mutex_unlock(&new->lock);
823 jermar 780
 
781
    if (needs_asid) {
782
        /*
783
         * Allocation of new ASID was deferred
784
         * until now in order to avoid deadlock.
785
         */
786
        asid_t asid;
787
 
788
        asid = asid_get();
1380 jermar 789
        mutex_lock_active(&new->lock);
823 jermar 790
        new->asid = asid;
1380 jermar 791
        mutex_unlock(&new->lock);
823 jermar 792
    }
1415 jermar 793
    spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 794
    interrupts_restore(ipl);
795
 
796
    /*
703 jermar 797
     * Perform architecture-specific steps.
727 jermar 798
     * (e.g. write ASID to hardware register etc.)
703 jermar 799
     */
823 jermar 800
    as_install_arch(new);
703 jermar 801
 
823 jermar 802
    AS = new;
703 jermar 803
}
754 jermar 804
 
1235 jermar 805
/** Convert address space area flags to page flags.
754 jermar 806
 *
1235 jermar 807
 * @param aflags Flags of some address space area.
754 jermar 808
 *
1235 jermar 809
 * @return Flags to be passed to page_mapping_insert().
754 jermar 810
 */
1235 jermar 811
int area_flags_to_page_flags(int aflags)
754 jermar 812
{
813
    int flags;
814
 
1178 jermar 815
    flags = PAGE_USER | PAGE_PRESENT;
754 jermar 816
 
1235 jermar 817
    if (aflags & AS_AREA_READ)
1026 jermar 818
        flags |= PAGE_READ;
819
 
1235 jermar 820
    if (aflags & AS_AREA_WRITE)
1026 jermar 821
        flags |= PAGE_WRITE;
822
 
1235 jermar 823
    if (aflags & AS_AREA_EXEC)
1026 jermar 824
        flags |= PAGE_EXEC;
825
 
1235 jermar 826
    if (!(aflags & AS_AREA_DEVICE))
1178 jermar 827
        flags |= PAGE_CACHEABLE;
828
 
754 jermar 829
    return flags;
830
}
756 jermar 831
 
1235 jermar 832
/** Compute flags for virtual address translation subsytem.
833
 *
834
 * The address space area must be locked.
835
 * Interrupts must be disabled.
836
 *
837
 * @param a Address space area.
838
 *
839
 * @return Flags to be used in page_mapping_insert().
840
 */
1409 jermar 841
int as_area_get_flags(as_area_t *a)
1235 jermar 842
{
843
    return area_flags_to_page_flags(a->flags);
844
}
845
 
756 jermar 846
/** Create page table.
847
 *
848
 * Depending on architecture, create either address space
849
 * private or global page table.
850
 *
851
 * @param flags Flags saying whether the page table is for kernel address space.
852
 *
853
 * @return First entry of the page table.
854
 */
855
pte_t *page_table_create(int flags)
856
{
857
        ASSERT(as_operations);
858
        ASSERT(as_operations->page_table_create);
859
 
860
        return as_operations->page_table_create(flags);
861
}
977 jermar 862
 
1044 jermar 863
/** Lock page table.
864
 *
865
 * This function should be called before any page_mapping_insert(),
866
 * page_mapping_remove() and page_mapping_find().
867
 *
868
 * Locking order is such that address space areas must be locked
869
 * prior to this call. Address space can be locked prior to this
870
 * call in which case the lock argument is false.
871
 *
872
 * @param as Address space.
1248 jermar 873
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 874
 */
875
void page_table_lock(as_t *as, bool lock)
876
{
877
    ASSERT(as_operations);
878
    ASSERT(as_operations->page_table_lock);
879
 
880
    as_operations->page_table_lock(as, lock);
881
}
882
 
883
/** Unlock page table.
884
 *
885
 * @param as Address space.
1248 jermar 886
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 887
 */
888
void page_table_unlock(as_t *as, bool unlock)
889
{
890
    ASSERT(as_operations);
891
    ASSERT(as_operations->page_table_unlock);
892
 
893
    as_operations->page_table_unlock(as, unlock);
894
}
895
 
977 jermar 896
 
897
/** Find address space area and lock it.
898
 *
899
 * The address space must be locked and interrupts must be disabled.
900
 *
901
 * @param as Address space.
902
 * @param va Virtual address.
903
 *
904
 * @return Locked address space area containing va on success or NULL on failure.
905
 */
906
as_area_t *find_area_and_lock(as_t *as, __address va)
907
{
908
    as_area_t *a;
1147 jermar 909
    btree_node_t *leaf, *lnode;
910
    int i;
977 jermar 911
 
1147 jermar 912
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
913
    if (a) {
914
        /* va is the base address of an address space area */
1380 jermar 915
        mutex_lock(&a->lock);
1147 jermar 916
        return a;
917
    }
918
 
919
    /*
1150 jermar 920
     * Search the leaf node and the righmost record of its left neighbour
1147 jermar 921
     * to find out whether this is a miss or va belongs to an address
922
     * space area found there.
923
     */
924
 
925
    /* First, search the leaf node itself. */
926
    for (i = 0; i < leaf->keys; i++) {
927
        a = (as_area_t *) leaf->value[i];
1380 jermar 928
        mutex_lock(&a->lock);
1147 jermar 929
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
930
            return a;
931
        }
1380 jermar 932
        mutex_unlock(&a->lock);
1147 jermar 933
    }
977 jermar 934
 
1147 jermar 935
    /*
1150 jermar 936
     * Second, locate the left neighbour and test its last record.
1148 jermar 937
     * Because of its position in the B+tree, it must have base < va.
1147 jermar 938
     */
1150 jermar 939
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 940
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 941
        mutex_lock(&a->lock);
1147 jermar 942
        if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 943
            return a;
1147 jermar 944
        }
1380 jermar 945
        mutex_unlock(&a->lock);
977 jermar 946
    }
947
 
948
    return NULL;
949
}
1048 jermar 950
 
951
/** Check area conflicts with other areas.
952
 *
953
 * The address space must be locked and interrupts must be disabled.
954
 *
955
 * @param as Address space.
956
 * @param va Starting virtual address of the area being tested.
957
 * @param size Size of the area being tested.
958
 * @param avoid_area Do not touch this area.
959
 *
960
 * @return True if there is no conflict, false otherwise.
961
 */
962
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
963
{
964
    as_area_t *a;
1147 jermar 965
    btree_node_t *leaf, *node;
966
    int i;
1048 jermar 967
 
1070 jermar 968
    /*
969
     * We don't want any area to have conflicts with NULL page.
970
     */
971
    if (overlaps(va, size, NULL, PAGE_SIZE))
972
        return false;
973
 
1147 jermar 974
    /*
975
     * The leaf node is found in O(log n), where n is proportional to
976
     * the number of address space areas belonging to as.
977
     * The check for conflicts is then attempted on the rightmost
1150 jermar 978
     * record in the left neighbour, the leftmost record in the right
979
     * neighbour and all records in the leaf node itself.
1147 jermar 980
     */
1048 jermar 981
 
1147 jermar 982
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
983
        if (a != avoid_area)
984
            return false;
985
    }
986
 
987
    /* First, check the two border cases. */
1150 jermar 988
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 989
        a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 990
        mutex_lock(&a->lock);
1147 jermar 991
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 992
            mutex_unlock(&a->lock);
1147 jermar 993
            return false;
994
        }
1380 jermar 995
        mutex_unlock(&a->lock);
1147 jermar 996
    }
1150 jermar 997
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 998
        a = (as_area_t *) node->value[0];
1380 jermar 999
        mutex_lock(&a->lock);
1147 jermar 1000
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1001
            mutex_unlock(&a->lock);
1147 jermar 1002
            return false;
1003
        }
1380 jermar 1004
        mutex_unlock(&a->lock);
1147 jermar 1005
    }
1006
 
1007
    /* Second, check the leaf node. */
1008
    for (i = 0; i < leaf->keys; i++) {
1009
        a = (as_area_t *) leaf->value[i];
1010
 
1048 jermar 1011
        if (a == avoid_area)
1012
            continue;
1147 jermar 1013
 
1380 jermar 1014
        mutex_lock(&a->lock);
1147 jermar 1015
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1016
            mutex_unlock(&a->lock);
1147 jermar 1017
            return false;
1018
        }
1380 jermar 1019
        mutex_unlock(&a->lock);
1048 jermar 1020
    }
1021
 
1070 jermar 1022
    /*
1023
     * So far, the area does not conflict with other areas.
1024
     * Check if it doesn't conflict with kernel address space.
1025
     */  
1026
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1027
        return !overlaps(va, size,
1028
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1029
    }
1030
 
1048 jermar 1031
    return true;
1032
}
1235 jermar 1033
 
1380 jermar 1034
/** Return size of the address space area with given base.  */
1329 palkovsky 1035
size_t as_get_size(__address base)
1036
{
1037
    ipl_t ipl;
1038
    as_area_t *src_area;
1039
    size_t size;
1040
 
1041
    ipl = interrupts_disable();
1042
    src_area = find_area_and_lock(AS, base);
1043
    if (src_area){
1044
        size = src_area->pages * PAGE_SIZE;
1380 jermar 1045
        mutex_unlock(&src_area->lock);
1329 palkovsky 1046
    } else {
1047
        size = 0;
1048
    }
1049
    interrupts_restore(ipl);
1050
    return size;
1051
}
1052
 
1387 jermar 1053
/** Mark portion of address space area as used.
1054
 *
1055
 * The address space area must be already locked.
1056
 *
1057
 * @param a Address space area.
1058
 * @param page First page to be marked.
1059
 * @param count Number of page to be marked.
1060
 *
1061
 * @return 0 on failure and 1 on success.
1062
 */
1063
int used_space_insert(as_area_t *a, __address page, count_t count)
1064
{
1065
    btree_node_t *leaf, *node;
1066
    count_t pages;
1067
    int i;
1068
 
1069
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1070
    ASSERT(count);
1071
 
1072
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1073
    if (pages) {
1074
        /*
1075
         * We hit the beginning of some used space.
1076
         */
1077
        return 0;
1078
    }
1079
 
1080
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1081
    if (node) {
1082
        __address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1083
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1084
 
1085
        /*
1086
         * Examine the possibility that the interval fits
1087
         * somewhere between the rightmost interval of
1088
         * the left neigbour and the first interval of the leaf.
1089
         */
1090
 
1091
        if (page >= right_pg) {
1092
            /* Do nothing. */
1093
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1094
            /* The interval intersects with the left interval. */
1095
            return 0;
1096
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1097
            /* The interval intersects with the right interval. */
1098
            return 0;          
1099
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1100
            /* The interval can be added by merging the two already present intervals. */
1403 jermar 1101
            node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1102
            btree_remove(&a->used_space, right_pg, leaf);
1103
            return 1;
1104
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1105
            /* The interval can be added by simply growing the left interval. */
1403 jermar 1106
            node->value[node->keys - 1] += count;
1387 jermar 1107
            return 1;
1108
        } else if (page + count*PAGE_SIZE == right_pg) {
1109
            /*
1110
             * The interval can be addded by simply moving base of the right
1111
             * interval down and increasing its size accordingly.
1112
             */
1403 jermar 1113
            leaf->value[0] += count;
1387 jermar 1114
            leaf->key[0] = page;
1115
            return 1;
1116
        } else {
1117
            /*
1118
             * The interval is between both neigbouring intervals,
1119
             * but cannot be merged with any of them.
1120
             */
1121
            btree_insert(&a->used_space, page, (void *) count, leaf);
1122
            return 1;
1123
        }
1124
    } else if (page < leaf->key[0]) {
1125
        __address right_pg = leaf->key[0];
1126
        count_t right_cnt = (count_t) leaf->value[0];
1127
 
1128
        /*
1129
         * Investigate the border case in which the left neighbour does not
1130
         * exist but the interval fits from the left.
1131
         */
1132
 
1133
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1134
            /* The interval intersects with the right interval. */
1135
            return 0;
1136
        } else if (page + count*PAGE_SIZE == right_pg) {
1137
            /*
1138
             * The interval can be added by moving the base of the right interval down
1139
             * and increasing its size accordingly.
1140
             */
1141
            leaf->key[0] = page;
1403 jermar 1142
            leaf->value[0] += count;
1387 jermar 1143
            return 1;
1144
        } else {
1145
            /*
1146
             * The interval doesn't adjoin with the right interval.
1147
             * It must be added individually.
1148
             */
1149
            btree_insert(&a->used_space, page, (void *) count, leaf);
1150
            return 1;
1151
        }
1152
    }
1153
 
1154
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1155
    if (node) {
1156
        __address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1157
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1158
 
1159
        /*
1160
         * Examine the possibility that the interval fits
1161
         * somewhere between the leftmost interval of
1162
         * the right neigbour and the last interval of the leaf.
1163
         */
1164
 
1165
        if (page < left_pg) {
1166
            /* Do nothing. */
1167
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1168
            /* The interval intersects with the left interval. */
1169
            return 0;
1170
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1171
            /* The interval intersects with the right interval. */
1172
            return 0;          
1173
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1174
            /* The interval can be added by merging the two already present intervals. */
1403 jermar 1175
            leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1176
            btree_remove(&a->used_space, right_pg, node);
1177
            return 1;
1178
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1179
            /* The interval can be added by simply growing the left interval. */
1403 jermar 1180
            leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1181
            return 1;
1182
        } else if (page + count*PAGE_SIZE == right_pg) {
1183
            /*
1184
             * The interval can be addded by simply moving base of the right
1185
             * interval down and increasing its size accordingly.
1186
             */
1403 jermar 1187
            node->value[0] += count;
1387 jermar 1188
            node->key[0] = page;
1189
            return 1;
1190
        } else {
1191
            /*
1192
             * The interval is between both neigbouring intervals,
1193
             * but cannot be merged with any of them.
1194
             */
1195
            btree_insert(&a->used_space, page, (void *) count, leaf);
1196
            return 1;
1197
        }
1198
    } else if (page >= leaf->key[leaf->keys - 1]) {
1199
        __address left_pg = leaf->key[leaf->keys - 1];
1200
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1201
 
1202
        /*
1203
         * Investigate the border case in which the right neighbour does not
1204
         * exist but the interval fits from the right.
1205
         */
1206
 
1207
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1208
            /* The interval intersects with the left interval. */
1387 jermar 1209
            return 0;
1210
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1211
            /* The interval can be added by growing the left interval. */
1403 jermar 1212
            leaf->value[leaf->keys - 1] += count;
1387 jermar 1213
            return 1;
1214
        } else {
1215
            /*
1216
             * The interval doesn't adjoin with the left interval.
1217
             * It must be added individually.
1218
             */
1219
            btree_insert(&a->used_space, page, (void *) count, leaf);
1220
            return 1;
1221
        }
1222
    }
1223
 
1224
    /*
1225
     * Note that if the algorithm made it thus far, the interval can fit only
1226
     * between two other intervals of the leaf. The two border cases were already
1227
     * resolved.
1228
     */
1229
    for (i = 1; i < leaf->keys; i++) {
1230
        if (page < leaf->key[i]) {
1231
            __address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1232
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1233
 
1234
            /*
1235
             * The interval fits between left_pg and right_pg.
1236
             */
1237
 
1238
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1239
                /* The interval intersects with the left interval. */
1240
                return 0;
1241
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1242
                /* The interval intersects with the right interval. */
1243
                return 0;          
1244
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1245
                /* The interval can be added by merging the two already present intervals. */
1403 jermar 1246
                leaf->value[i - 1] += count + right_cnt;
1387 jermar 1247
                btree_remove(&a->used_space, right_pg, leaf);
1248
                return 1;
1249
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1250
                /* The interval can be added by simply growing the left interval. */
1403 jermar 1251
                leaf->value[i - 1] += count;
1387 jermar 1252
                return 1;
1253
            } else if (page + count*PAGE_SIZE == right_pg) {
1254
                /*
1255
                     * The interval can be addded by simply moving base of the right
1256
                 * interval down and increasing its size accordingly.
1257
                 */
1403 jermar 1258
                leaf->value[i] += count;
1387 jermar 1259
                leaf->key[i] = page;
1260
                return 1;
1261
            } else {
1262
                /*
1263
                 * The interval is between both neigbouring intervals,
1264
                 * but cannot be merged with any of them.
1265
                 */
1266
                btree_insert(&a->used_space, page, (void *) count, leaf);
1267
                return 1;
1268
            }
1269
        }
1270
    }
1271
 
1272
    panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1273
}
1274
 
1275
/** Mark portion of address space area as unused.
1276
 *
1277
 * The address space area must be already locked.
1278
 *
1279
 * @param a Address space area.
1280
 * @param page First page to be marked.
1281
 * @param count Number of page to be marked.
1282
 *
1283
 * @return 0 on failure and 1 on success.
1284
 */
1285
int used_space_remove(as_area_t *a, __address page, count_t count)
1286
{
1287
    btree_node_t *leaf, *node;
1288
    count_t pages;
1289
    int i;
1290
 
1291
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1292
    ASSERT(count);
1293
 
1294
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1295
    if (pages) {
1296
        /*
1297
         * We are lucky, page is the beginning of some interval.
1298
         */
1299
        if (count > pages) {
1300
            return 0;
1301
        } else if (count == pages) {
1302
            btree_remove(&a->used_space, page, leaf);
1403 jermar 1303
            return 1;
1387 jermar 1304
        } else {
1305
            /*
1306
             * Find the respective interval.
1307
             * Decrease its size and relocate its start address.
1308
             */
1309
            for (i = 0; i < leaf->keys; i++) {
1310
                if (leaf->key[i] == page) {
1311
                    leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1312
                    leaf->value[i] -= count;
1387 jermar 1313
                    return 1;
1314
                }
1315
            }
1316
            goto error;
1317
        }
1318
    }
1319
 
1320
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1321
    if (node && page < leaf->key[0]) {
1322
        __address left_pg = node->key[node->keys - 1];
1323
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1324
 
1325
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1326
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1327
                /*
1328
                 * The interval is contained in the rightmost interval
1329
                 * of the left neighbour and can be removed by
1330
                 * updating the size of the bigger interval.
1331
                 */
1403 jermar 1332
                node->value[node->keys - 1] -= count;
1387 jermar 1333
                return 1;
1334
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1335
                count_t new_cnt;
1387 jermar 1336
 
1337
                /*
1338
                 * The interval is contained in the rightmost interval
1339
                 * of the left neighbour but its removal requires
1340
                 * both updating the size of the original interval and
1341
                 * also inserting a new interval.
1342
                 */
1403 jermar 1343
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1344
                node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1345
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1346
                return 1;
1347
            }
1348
        }
1349
        return 0;
1350
    } else if (page < leaf->key[0]) {
1351
        return 0;
1352
    }
1353
 
1354
    if (page > leaf->key[leaf->keys - 1]) {
1355
        __address left_pg = leaf->key[leaf->keys - 1];
1356
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1357
 
1358
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1359
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1360
                /*
1361
                 * The interval is contained in the rightmost interval
1362
                 * of the leaf and can be removed by updating the size
1363
                 * of the bigger interval.
1364
                 */
1403 jermar 1365
                leaf->value[leaf->keys - 1] -= count;
1387 jermar 1366
                return 1;
1367
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1368
                count_t new_cnt;
1387 jermar 1369
 
1370
                /*
1371
                 * The interval is contained in the rightmost interval
1372
                 * of the leaf but its removal requires both updating
1373
                 * the size of the original interval and
1374
                 * also inserting a new interval.
1375
                 */
1403 jermar 1376
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1377
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1378
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1379
                return 1;
1380
            }
1381
        }
1382
        return 0;
1383
    }  
1384
 
1385
    /*
1386
     * The border cases have been already resolved.
1387
     * Now the interval can be only between intervals of the leaf.
1388
     */
1389
    for (i = 1; i < leaf->keys - 1; i++) {
1390
        if (page < leaf->key[i]) {
1391
            __address left_pg = leaf->key[i - 1];
1392
            count_t left_cnt = (count_t) leaf->value[i - 1];
1393
 
1394
            /*
1395
             * Now the interval is between intervals corresponding to (i - 1) and i.
1396
             */
1397
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1398
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1399
                    /*
1400
                    * The interval is contained in the interval (i - 1)
1401
                     * of the leaf and can be removed by updating the size
1402
                     * of the bigger interval.
1403
                     */
1403 jermar 1404
                    leaf->value[i - 1] -= count;
1387 jermar 1405
                    return 1;
1406
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1407
                    count_t new_cnt;
1387 jermar 1408
 
1409
                    /*
1410
                     * The interval is contained in the interval (i - 1)
1411
                     * of the leaf but its removal requires both updating
1412
                     * the size of the original interval and
1413
                     * also inserting a new interval.
1414
                     */
1403 jermar 1415
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1416
                    leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1417
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1418
                    return 1;
1419
                }
1420
            }
1421
            return 0;
1422
        }
1423
    }
1424
 
1425
error:
1426
    panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1427
}
1428
 
1409 jermar 1429
/** Remove reference to address space area share info.
1430
 *
1431
 * If the reference count drops to 0, the sh_info is deallocated.
1432
 *
1433
 * @param sh_info Pointer to address space area share info.
1434
 */
1435
void sh_info_remove_reference(share_info_t *sh_info)
1436
{
1437
    bool dealloc = false;
1438
 
1439
    mutex_lock(&sh_info->lock);
1440
    ASSERT(sh_info->refcount);
1441
    if (--sh_info->refcount == 0) {
1442
        dealloc = true;
1443
        bool cond;
1444
 
1445
        /*
1446
         * Now walk carefully the pagemap B+tree and free/remove
1447
         * reference from all frames found there.
1448
         */
1449
        for (cond = true; cond;) {
1450
            btree_node_t *node;
1451
 
1452
            ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
1453
            node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
1454
            if ((cond = node->keys)) {
1455
                frame_free(ADDR2PFN((__address) node->value[0]));
1456
                btree_remove(&sh_info->pagemap, node->key[0], node);
1457
            }
1458
        }
1459
 
1460
    }
1461
    mutex_unlock(&sh_info->lock);
1462
 
1463
    if (dealloc) {
1464
        btree_destroy(&sh_info->pagemap);
1465
        free(sh_info);
1466
    }
1467
}
1468
 
1411 jermar 1469
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
1409 jermar 1470
static void anon_frame_free(as_area_t *area, __address page, __address frame);
1471
 
1235 jermar 1472
/*
1409 jermar 1473
 * Anonymous memory backend.
1474
 */
1475
mem_backend_t anon_backend = {
1476
    .backend_page_fault = anon_page_fault,
1477
    .backend_frame_free = anon_frame_free
1478
};
1479
 
1480
/** Service a page fault in the anonymous memory address space area.
1481
 *
1482
 * The address space area and page tables must be already locked.
1483
 *
1484
 * @param area Pointer to the address space area.
1485
 * @param addr Faulting virtual address.
1411 jermar 1486
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1409 jermar 1487
 *
1488
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
1489
 */
1411 jermar 1490
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
1409 jermar 1491
{
1492
    __address frame;
1493
 
1494
    if (area->sh_info) {
1495
        btree_node_t *leaf;
1496
 
1497
        /*
1498
         * The area is shared, chances are that the mapping can be found
1499
         * in the pagemap of the address space area share info structure.
1500
         * In the case that the pagemap does not contain the respective
1501
         * mapping, a new frame is allocated and the mapping is created.
1502
         */
1503
        mutex_lock(&area->sh_info->lock);
1413 jermar 1504
        frame = (__address) btree_search(&area->sh_info->pagemap,
1505
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
1409 jermar 1506
        if (!frame) {
1507
            bool allocate = true;
1508
            int i;
1509
 
1510
            /*
1511
             * Zero can be returned as a valid frame address.
1512
             * Just a small workaround.
1513
             */
1514
            for (i = 0; i < leaf->keys; i++) {
1515
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
1516
                    allocate = false;
1517
                    break;
1518
                }
1519
            }
1520
            if (allocate) {
1521
                frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1522
                memsetb(PA2KA(frame), FRAME_SIZE, 0);
1523
 
1524
                /*
1525
                 * Insert the address of the newly allocated frame to the pagemap.
1526
                 */
1413 jermar 1527
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
1409 jermar 1528
            }
1529
        }
1530
        mutex_unlock(&area->sh_info->lock);
1531
    } else {
1532
 
1533
        /*
1534
         * In general, there can be several reasons that
1535
         * can have caused this fault.
1536
         *
1537
         * - non-existent mapping: the area is an anonymous
1538
         *   area (e.g. heap or stack) and so far has not been
1539
         *   allocated a frame for the faulting page
1540
         *
1541
         * - non-present mapping: another possibility,
1542
         *   currently not implemented, would be frame
1543
         *   reuse; when this becomes a possibility,
1544
         *   do not forget to distinguish between
1545
         *   the different causes
1546
         */
1547
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1548
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
1549
    }
1550
 
1551
    /*
1552
     * Map 'page' to 'frame'.
1553
     * Note that TLB shootdown is not attempted as only new information is being
1554
     * inserted into page tables.
1555
     */
1556
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
1557
    if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
1558
        panic("Could not insert used space.\n");
1559
 
1560
    return AS_PF_OK;
1561
}
1562
 
1563
/** Free a frame that is backed by the anonymous memory backend.
1564
 *
1565
 * The address space area and page tables must be already locked.
1566
 *
1567
 * @param area Ignored.
1568
 * @param page Ignored.
1569
 * @param frame Frame to be released.
1570
 */
1571
void anon_frame_free(as_area_t *area, __address page, __address frame)
1572
{
1573
    frame_free(ADDR2PFN(frame));
1574
}
1575
 
1576
/*
1235 jermar 1577
 * Address space related syscalls.
1578
 */
1579
 
1580
/** Wrapper for as_area_create(). */
1581
__native sys_as_area_create(__address address, size_t size, int flags)
1582
{
1409 jermar 1583
    if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1235 jermar 1584
        return (__native) address;
1585
    else
1586
        return (__native) -1;
1587
}
1588
 
1589
/** Wrapper for as_area_resize. */
1590
__native sys_as_area_resize(__address address, size_t size, int flags)
1591
{
1306 jermar 1592
    return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 1593
}
1594
 
1306 jermar 1595
/** Wrapper for as_area_destroy. */
1596
__native sys_as_area_destroy(__address address)
1597
{
1598
    return (__native) as_area_destroy(AS, address);
1599
}