Subversion Repositories HelenOS-historic

Rev

Rev 1403 | Rev 1411 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1403 Rev 1409
Line 72... Line 72...
72
#include <arch/types.h>
72
#include <arch/types.h>
73
#include <typedefs.h>
73
#include <typedefs.h>
74
#include <syscall/copy.h>
74
#include <syscall/copy.h>
75
#include <arch/interrupt.h>
75
#include <arch/interrupt.h>
76
 
76
 
-
 
77
/** This structure contains information associated with the shared address space area. */
-
 
78
struct share_info {
-
 
79
    mutex_t lock;       /**< This lock must be acquired only when the as_area lock is held. */
-
 
80
    count_t refcount;   /**< This structure can be deallocated if refcount drops to 0. */
-
 
81
    btree_t pagemap;    /**< B+tree containing complete map of anonymous pages of the shared area. */
-
 
82
};
-
 
83
 
77
as_operations_t *as_operations = NULL;
84
as_operations_t *as_operations = NULL;
78
 
85
 
79
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
86
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
80
SPINLOCK_INITIALIZE(as_lock);
87
SPINLOCK_INITIALIZE(as_lock);
81
 
88
 
Line 87... Line 94...
87
 
94
 
88
/** Kernel address space. */
95
/** Kernel address space. */
89
as_t *AS_KERNEL = NULL;
96
as_t *AS_KERNEL = NULL;
90
 
97
 
91
static int area_flags_to_page_flags(int aflags);
98
static int area_flags_to_page_flags(int aflags);
92
static int get_area_flags(as_area_t *a);
-
 
93
static as_area_t *find_area_and_lock(as_t *as, __address va);
99
static as_area_t *find_area_and_lock(as_t *as, __address va);
94
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
100
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
95
static int used_space_insert(as_area_t *a, __address page, count_t count);
-
 
96
static int used_space_remove(as_area_t *a, __address page, count_t count);
101
static void sh_info_remove_reference(share_info_t *sh_info);
97
 
102
 
98
/** Initialize address space subsystem. */
103
/** Initialize address space subsystem. */
99
void as_init(void)
104
void as_init(void)
100
{
105
{
101
    as_arch_init();
106
    as_arch_init();
Line 146... Line 151...
146
 * @param as Target address space.
151
 * @param as Target address space.
147
 * @param flags Flags of the area memory.
152
 * @param flags Flags of the area memory.
148
 * @param size Size of area.
153
 * @param size Size of area.
149
 * @param base Base address of area.
154
 * @param base Base address of area.
150
 * @param attrs Attributes of the area.
155
 * @param attrs Attributes of the area.
-
 
156
 * @param backend Address space area backend. NULL if no backend is used.
-
 
157
 * @param backend_data NULL or a pointer to an array holding two void *.
151
 *
158
 *
152
 * @return Address space area on success or NULL on failure.
159
 * @return Address space area on success or NULL on failure.
153
 */
160
 */
154
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
161
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
-
 
162
           mem_backend_t *backend, void **backend_data)
155
{
163
{
156
    ipl_t ipl;
164
    ipl_t ipl;
157
    as_area_t *a;
165
    as_area_t *a;
158
   
166
   
159
    if (base % PAGE_SIZE)
167
    if (base % PAGE_SIZE)
Line 181... Line 189...
181
   
189
   
182
    a->flags = flags;
190
    a->flags = flags;
183
    a->attributes = attrs;
191
    a->attributes = attrs;
184
    a->pages = SIZE2FRAMES(size);
192
    a->pages = SIZE2FRAMES(size);
185
    a->base = base;
193
    a->base = base;
-
 
194
    a->sh_info = NULL;
-
 
195
    a->backend = backend;
-
 
196
    if (backend_data) {
-
 
197
        a->backend_data[0] = backend_data[0];
-
 
198
        a->backend_data[1] = backend_data[1];
-
 
199
    }
186
    btree_create(&a->used_space);
200
    btree_create(&a->used_space);
187
   
201
   
188
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
202
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
189
 
203
 
190
    mutex_unlock(&as->lock);
204
    mutex_unlock(&as->lock);
Line 229... Line 243...
229
        mutex_unlock(&area->lock);
243
        mutex_unlock(&area->lock);
230
        mutex_unlock(&as->lock);
244
        mutex_unlock(&as->lock);
231
        interrupts_restore(ipl);
245
        interrupts_restore(ipl);
232
        return ENOTSUP;
246
        return ENOTSUP;
233
    }
247
    }
-
 
248
    if (area->sh_info) {
-
 
249
        /*
-
 
250
         * Remapping of shared address space areas
-
 
251
         * is not supported.
-
 
252
         */
-
 
253
        mutex_unlock(&area->lock);
-
 
254
        mutex_unlock(&as->lock);
-
 
255
        interrupts_restore(ipl);
-
 
256
        return ENOTSUP;
-
 
257
    }
234
 
258
 
235
    pages = SIZE2FRAMES((address - area->base) + size);
259
    pages = SIZE2FRAMES((address - area->base) + size);
236
    if (!pages) {
260
    if (!pages) {
237
        /*
261
        /*
238
         * Zero size address space areas are not allowed.
262
         * Zero size address space areas are not allowed.
Line 300... Line 324...
300
                    pte_t *pte;
324
                    pte_t *pte;
301
           
325
           
302
                    page_table_lock(as, false);
326
                    page_table_lock(as, false);
303
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
327
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
304
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
328
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
-
 
329
                    if (area->backend && area->backend->backend_frame_free) {
-
 
330
                        area->backend->backend_frame_free(area,
305
                    frame_free(ADDR2PFN(PTE_GET_FRAME(pte)));
331
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
-
 
332
                    }
306
                    page_mapping_remove(as, b + i*PAGE_SIZE);
333
                    page_mapping_remove(as, b + i*PAGE_SIZE);
307
                    page_table_unlock(as, false);
334
                    page_table_unlock(as, false);
308
                }
335
                }
309
            }
336
            }
310
        }
337
        }
Line 389... Line 416...
389
           
416
           
390
                for (i = 0; i < (count_t) node->value[0]; i++) {
417
                for (i = 0; i < (count_t) node->value[0]; i++) {
391
                    page_table_lock(as, false);
418
                    page_table_lock(as, false);
392
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
419
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
393
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
420
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
-
 
421
                    if (area->backend && area->backend->backend_frame_free) {
-
 
422
                        area->backend->backend_frame_free(area,
394
                    frame_free(ADDR2PFN(PTE_GET_FRAME(pte)));
423
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
-
 
424
                    }
395
                    page_mapping_remove(as, b + i*PAGE_SIZE);
425
                    page_mapping_remove(as, b + i*PAGE_SIZE);
396
                    page_table_unlock(as, false);
426
                    page_table_unlock(as, false);
397
                }
427
                }
398
                if (!used_space_remove(area, b, i))
428
                if (!used_space_remove(area, b, i))
399
                    panic("Could not remove used space.\n");
429
                    panic("Could not remove used space.\n");
Line 408... Line 438...
408
    tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
438
    tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
409
    tlb_invalidate_pages(AS->asid, area->base, area->pages);
439
    tlb_invalidate_pages(AS->asid, area->base, area->pages);
410
    tlb_shootdown_finalize();
440
    tlb_shootdown_finalize();
411
 
441
 
412
    area->attributes |= AS_AREA_ATTR_PARTIAL;
442
    area->attributes |= AS_AREA_ATTR_PARTIAL;
-
 
443
   
-
 
444
    if (area->sh_info)
-
 
445
        sh_info_remove_reference(area->sh_info);
-
 
446
       
413
    mutex_unlock(&area->lock);
447
    mutex_unlock(&area->lock);
414
 
448
 
415
    /*
449
    /*
416
     * Remove the empty area from address space.
450
     * Remove the empty area from address space.
417
     */
451
     */
Line 482... Line 516...
482
     * Create copy of the source address space area.
516
     * Create copy of the source address space area.
483
     * The destination area is created with AS_AREA_ATTR_PARTIAL
517
     * The destination area is created with AS_AREA_ATTR_PARTIAL
484
     * attribute set which prevents race condition with
518
     * attribute set which prevents race condition with
485
     * preliminary as_page_fault() calls.
519
     * preliminary as_page_fault() calls.
486
     */
520
     */
487
    dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
521
    dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
488
    if (!dst_area) {
522
    if (!dst_area) {
489
        /*
523
        /*
490
         * Destination address space area could not be created.
524
         * Destination address space area could not be created.
491
         */
525
         */
492
        spinlock_unlock(&src_task->lock);
526
        spinlock_unlock(&src_task->lock);
Line 566... Line 600...
566
    area = find_area_and_lock(as, page);
600
    area = find_area_and_lock(as, page);
567
    if (!area) {
601
    if (!area) {
568
        panic("Page not part of any as_area.\n");
602
        panic("Page not part of any as_area.\n");
569
    }
603
    }
570
 
604
 
-
 
605
    ASSERT(!area->backend);
-
 
606
   
571
    page_mapping_insert(as, page, frame, get_area_flags(area));
607
    page_mapping_insert(as, page, frame, as_area_get_flags(area));
572
    if (!used_space_insert(area, page, 1))
608
    if (!used_space_insert(area, page, 1))
573
        panic("Could not insert used space.\n");
609
        panic("Could not insert used space.\n");
574
   
610
   
575
    mutex_unlock(&area->lock);
611
    mutex_unlock(&area->lock);
576
    page_table_unlock(as, true);
612
    page_table_unlock(as, true);
577
    interrupts_restore(ipl);
613
    interrupts_restore(ipl);
578
}
614
}
579
 
615
 
580
/** Handle page fault within the current address space.
616
/** Handle page fault within the current address space.
581
 *
617
 *
582
 * This is the high-level page fault handler.
618
 * This is the high-level page fault handler. It decides
-
 
619
 * whether the page fault can be resolved by any backend
-
 
620
 * and if so, it invokes the backend to resolve the page
-
 
621
 * fault.
-
 
622
 *
583
 * Interrupts are assumed disabled.
623
 * Interrupts are assumed disabled.
584
 *
624
 *
585
 * @param page Faulting page.
625
 * @param page Faulting page.
586
 * @param istate Pointer to interrupted state.
626
 * @param istate Pointer to interrupted state.
587
 *
627
 *
-
 
628
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
588
 * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace().
629
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
589
 */
630
 */
590
int as_page_fault(__address page, istate_t *istate)
631
int as_page_fault(__address page, istate_t *istate)
591
{
632
{
592
    pte_t *pte;
633
    pte_t *pte;
593
    as_area_t *area;
634
    as_area_t *area;
594
    __address frame;
-
 
595
   
635
   
596
    if (!THREAD)
636
    if (!THREAD)
597
        return 0;
637
        return AS_PF_FAULT;
598
       
638
       
599
    ASSERT(AS);
639
    ASSERT(AS);
600
 
640
 
601
    mutex_lock(&AS->lock);
641
    mutex_lock(&AS->lock);
602
    area = find_area_and_lock(AS, page);   
642
    area = find_area_and_lock(AS, page);   
Line 617... Line 657...
617
        mutex_unlock(&area->lock);
657
        mutex_unlock(&area->lock);
618
        mutex_unlock(&AS->lock);
658
        mutex_unlock(&AS->lock);
619
        goto page_fault;       
659
        goto page_fault;       
620
    }
660
    }
621
 
661
 
-
 
662
    if (!area->backend || !area->backend->backend_page_fault) {
-
 
663
        /*
-
 
664
         * The address space area is not backed by any backend
622
    ASSERT(!(area->flags & AS_AREA_DEVICE));
665
         * or the backend cannot handle page faults.
-
 
666
         */
-
 
667
        mutex_unlock(&area->lock);
-
 
668
        mutex_unlock(&AS->lock);
-
 
669
        goto page_fault;       
-
 
670
    }
623
 
671
 
624
    page_table_lock(AS, false);
672
    page_table_lock(AS, false);
625
   
673
   
626
    /*
674
    /*
627
     * To avoid race condition between two page faults
675
     * To avoid race condition between two page faults
Line 631... Line 679...
631
    if ((pte = page_mapping_find(AS, page))) {
679
    if ((pte = page_mapping_find(AS, page))) {
632
        if (PTE_PRESENT(pte)) {
680
        if (PTE_PRESENT(pte)) {
633
            page_table_unlock(AS, false);
681
            page_table_unlock(AS, false);
634
            mutex_unlock(&area->lock);
682
            mutex_unlock(&area->lock);
635
            mutex_unlock(&AS->lock);
683
            mutex_unlock(&AS->lock);
636
            return 1;
684
            return AS_PF_OK;
637
        }
685
        }
638
    }
686
    }
639
 
-
 
640
    /*
-
 
641
     * In general, there can be several reasons that
-
 
642
     * can have caused this fault.
-
 
643
     *
-
 
644
     * - non-existent mapping: the area is a scratch
-
 
645
     *   area (e.g. stack) and so far has not been
-
 
646
     *   allocated a frame for the faulting page
-
 
647
     *
-
 
648
     * - non-present mapping: another possibility,
-
 
649
     *   currently not implemented, would be frame
-
 
650
     *   reuse; when this becomes a possibility,
-
 
651
     *   do not forget to distinguish between
-
 
652
     *   the different causes
-
 
653
     */
-
 
654
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
655
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
656
   
687
   
657
    /*
688
    /*
658
     * Map 'page' to 'frame'.
-
 
659
     * Note that TLB shootdown is not attempted as only new information is being
-
 
660
     * inserted into page tables.
689
     * Resort to the backend page fault handler.
661
     */
690
     */
662
    page_mapping_insert(AS, page, frame, get_area_flags(area));
691
    if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
663
    if (!used_space_insert(area, ALIGN_DOWN(page, PAGE_SIZE), 1))
692
        page_table_unlock(AS, false);
664
        panic("Could not insert used space.\n");
693
        mutex_unlock(&area->lock);
665
    page_table_unlock(AS, false);
694
        mutex_unlock(&AS->lock);
-
 
695
        goto page_fault;
-
 
696
    }
666
   
697
   
-
 
698
    page_table_unlock(AS, false);
667
    mutex_unlock(&area->lock);
699
    mutex_unlock(&area->lock);
668
    mutex_unlock(&AS->lock);
700
    mutex_unlock(&AS->lock);
669
    return AS_PF_OK;
701
    return AS_PF_OK;
670
 
702
 
671
page_fault:
703
page_fault:
672
    if (!THREAD)
-
 
673
        return AS_PF_FAULT;
-
 
674
   
-
 
675
    if (THREAD->in_copy_from_uspace) {
704
    if (THREAD->in_copy_from_uspace) {
676
        THREAD->in_copy_from_uspace = false;
705
        THREAD->in_copy_from_uspace = false;
677
        istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
706
        istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
678
    } else if (THREAD->in_copy_to_uspace) {
707
    } else if (THREAD->in_copy_to_uspace) {
679
        THREAD->in_copy_to_uspace = false;
708
        THREAD->in_copy_to_uspace = false;
Line 791... Line 820...
791
 *
820
 *
792
 * @param a Address space area.
821
 * @param a Address space area.
793
 *
822
 *
794
 * @return Flags to be used in page_mapping_insert().
823
 * @return Flags to be used in page_mapping_insert().
795
 */
824
 */
796
int get_area_flags(as_area_t *a)
825
int as_area_get_flags(as_area_t *a)
797
{
826
{
798
    return area_flags_to_page_flags(a->flags);
827
    return area_flags_to_page_flags(a->flags);
799
}
828
}
800
 
829
 
801
/** Create page table.
830
/** Create page table.
Line 1379... Line 1408...
1379
 
1408
 
1380
error:
1409
error:
1381
    panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1410
    panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1382
}
1411
}
1383
 
1412
 
-
 
1413
/** Remove reference to address space area share info.
-
 
1414
 *
-
 
1415
 * If the reference count drops to 0, the sh_info is deallocated.
-
 
1416
 *
-
 
1417
 * @param sh_info Pointer to address space area share info.
-
 
1418
 */
-
 
1419
void sh_info_remove_reference(share_info_t *sh_info)
-
 
1420
{
-
 
1421
    bool dealloc = false;
-
 
1422
 
-
 
1423
    mutex_lock(&sh_info->lock);
-
 
1424
    ASSERT(sh_info->refcount);
-
 
1425
    if (--sh_info->refcount == 0) {
-
 
1426
        dealloc = true;
-
 
1427
        bool cond;
-
 
1428
       
-
 
1429
        /*
-
 
1430
         * Now walk carefully the pagemap B+tree and free/remove
-
 
1431
         * reference from all frames found there.
-
 
1432
         */
-
 
1433
        for (cond = true; cond;) {
-
 
1434
            btree_node_t *node;
-
 
1435
           
-
 
1436
            ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
-
 
1437
            node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
-
 
1438
            if ((cond = node->keys)) {
-
 
1439
                frame_free(ADDR2PFN((__address) node->value[0]));
-
 
1440
                btree_remove(&sh_info->pagemap, node->key[0], node);
-
 
1441
            }
-
 
1442
        }
-
 
1443
       
-
 
1444
    }
-
 
1445
    mutex_unlock(&sh_info->lock);
-
 
1446
   
-
 
1447
    if (dealloc) {
-
 
1448
        btree_destroy(&sh_info->pagemap);
-
 
1449
        free(sh_info);
-
 
1450
    }
-
 
1451
}
-
 
1452
 
-
 
1453
static int anon_page_fault(as_area_t *area, __address addr);
-
 
1454
static void anon_frame_free(as_area_t *area, __address page, __address frame);
-
 
1455
 
-
 
1456
/*
-
 
1457
 * Anonymous memory backend.
-
 
1458
 */
-
 
1459
mem_backend_t anon_backend = {
-
 
1460
    .backend_page_fault = anon_page_fault,
-
 
1461
    .backend_frame_free = anon_frame_free
-
 
1462
};
-
 
1463
 
-
 
1464
/** Service a page fault in the anonymous memory address space area.
-
 
1465
 *
-
 
1466
 * The address space area and page tables must be already locked.
-
 
1467
 *
-
 
1468
 * @param area Pointer to the address space area.
-
 
1469
 * @param addr Faulting virtual address.
-
 
1470
 *
-
 
1471
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
-
 
1472
 */
-
 
1473
int anon_page_fault(as_area_t *area, __address addr)
-
 
1474
{
-
 
1475
    __address frame;
-
 
1476
 
-
 
1477
    if (area->sh_info) {
-
 
1478
        btree_node_t *leaf;
-
 
1479
       
-
 
1480
        /*
-
 
1481
         * The area is shared, chances are that the mapping can be found
-
 
1482
         * in the pagemap of the address space area share info structure.
-
 
1483
         * In the case that the pagemap does not contain the respective
-
 
1484
         * mapping, a new frame is allocated and the mapping is created.
-
 
1485
         */
-
 
1486
        mutex_lock(&area->sh_info->lock);
-
 
1487
        frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf);
-
 
1488
        if (!frame) {
-
 
1489
            bool allocate = true;
-
 
1490
            int i;
-
 
1491
           
-
 
1492
            /*
-
 
1493
             * Zero can be returned as a valid frame address.
-
 
1494
             * Just a small workaround.
-
 
1495
             */
-
 
1496
            for (i = 0; i < leaf->keys; i++) {
-
 
1497
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
-
 
1498
                    allocate = false;
-
 
1499
                    break;
-
 
1500
                }
-
 
1501
            }
-
 
1502
            if (allocate) {
-
 
1503
                frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
1504
                memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
1505
               
-
 
1506
                /*
-
 
1507
                 * Insert the address of the newly allocated frame to the pagemap.
-
 
1508
                 */
-
 
1509
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf);
-
 
1510
            }
-
 
1511
        }
-
 
1512
        mutex_unlock(&area->sh_info->lock);
-
 
1513
    } else {
-
 
1514
 
-
 
1515
        /*
-
 
1516
         * In general, there can be several reasons that
-
 
1517
         * can have caused this fault.
-
 
1518
         *
-
 
1519
         * - non-existent mapping: the area is an anonymous
-
 
1520
         *   area (e.g. heap or stack) and so far has not been
-
 
1521
         *   allocated a frame for the faulting page
-
 
1522
         *
-
 
1523
         * - non-present mapping: another possibility,
-
 
1524
         *   currently not implemented, would be frame
-
 
1525
         *   reuse; when this becomes a possibility,
-
 
1526
         *   do not forget to distinguish between
-
 
1527
         *   the different causes
-
 
1528
         */
-
 
1529
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
1530
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
1531
    }
-
 
1532
   
-
 
1533
    /*
-
 
1534
     * Map 'page' to 'frame'.
-
 
1535
     * Note that TLB shootdown is not attempted as only new information is being
-
 
1536
     * inserted into page tables.
-
 
1537
     */
-
 
1538
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
-
 
1539
    if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
-
 
1540
        panic("Could not insert used space.\n");
-
 
1541
       
-
 
1542
    return AS_PF_OK;
-
 
1543
}
-
 
1544
 
-
 
1545
/** Free a frame that is backed by the anonymous memory backend.
-
 
1546
 *
-
 
1547
 * The address space area and page tables must be already locked.
-
 
1548
 *
-
 
1549
 * @param area Ignored.
-
 
1550
 * @param page Ignored.
-
 
1551
 * @param frame Frame to be released.
-
 
1552
 */
-
 
1553
void anon_frame_free(as_area_t *area, __address page, __address frame)
-
 
1554
{
-
 
1555
    frame_free(ADDR2PFN(frame));
-
 
1556
}
-
 
1557
 
1384
/*
1558
/*
1385
 * Address space related syscalls.
1559
 * Address space related syscalls.
1386
 */
1560
 */
1387
 
1561
 
1388
/** Wrapper for as_area_create(). */
1562
/** Wrapper for as_area_create(). */
1389
__native sys_as_area_create(__address address, size_t size, int flags)
1563
__native sys_as_area_create(__address address, size_t size, int flags)
1390
{
1564
{
1391
    if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
1565
    if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1392
        return (__native) address;
1566
        return (__native) address;
1393
    else
1567
    else
1394
        return (__native) -1;
1568
        return (__native) -1;
1395
}
1569
}
1396
 
1570