Subversion Repositories HelenOS-historic

Rev

Rev 1423 | Rev 1428 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1423 Rev 1424
Line 72... Line 72...
72
#include <arch/types.h>
72
#include <arch/types.h>
73
#include <typedefs.h>
73
#include <typedefs.h>
74
#include <syscall/copy.h>
74
#include <syscall/copy.h>
75
#include <arch/interrupt.h>
75
#include <arch/interrupt.h>
76
 
76
 
77
/** This structure contains information associated with the shared address space area. */
-
 
78
struct share_info {
-
 
79
    mutex_t lock;       /**< This lock must be acquired only when the as_area lock is held. */
-
 
80
    count_t refcount;   /**< This structure can be deallocated if refcount drops to 0. */
-
 
81
    btree_t pagemap;    /**< B+tree containing complete map of anonymous pages of the shared area. */
-
 
82
};
-
 
83
 
-
 
84
as_operations_t *as_operations = NULL;
77
as_operations_t *as_operations = NULL;
85
 
78
 
86
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
79
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
87
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
80
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
88
 
81
 
Line 157... Line 150...
157
 * @param backend_data NULL or a pointer to an array holding two void *.
150
 * @param backend_data NULL or a pointer to an array holding two void *.
158
 *
151
 *
159
 * @return Address space area on success or NULL on failure.
152
 * @return Address space area on success or NULL on failure.
160
 */
153
 */
161
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
154
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
162
           mem_backend_t *backend, void **backend_data)
155
           mem_backend_t *backend, mem_backend_data_t *backend_data)
163
{
156
{
164
    ipl_t ipl;
157
    ipl_t ipl;
165
    as_area_t *a;
158
    as_area_t *a;
166
   
159
   
167
    if (base % PAGE_SIZE)
160
    if (base % PAGE_SIZE)
Line 185... Line 178...
185
   
178
   
186
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
179
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
187
 
180
 
188
    mutex_initialize(&a->lock);
181
    mutex_initialize(&a->lock);
189
   
182
   
-
 
183
    a->as = as;
190
    a->flags = flags;
184
    a->flags = flags;
191
    a->attributes = attrs;
185
    a->attributes = attrs;
192
    a->pages = SIZE2FRAMES(size);
186
    a->pages = SIZE2FRAMES(size);
193
    a->base = base;
187
    a->base = base;
194
    a->sh_info = NULL;
188
    a->sh_info = NULL;
195
    a->backend = backend;
189
    a->backend = backend;
196
    if (backend_data) {
190
    if (backend_data)
197
        a->backend_data[0] = backend_data[0];
191
        a->backend_data = *backend_data;
-
 
192
    else
198
        a->backend_data[1] = backend_data[1];
193
        memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
199
    }
194
 
200
    btree_create(&a->used_space);
195
    btree_create(&a->used_space);
201
   
196
   
202
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
197
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
203
 
198
 
204
    mutex_unlock(&as->lock);
199
    mutex_unlock(&as->lock);
Line 233... Line 228...
233
        mutex_unlock(&as->lock);
228
        mutex_unlock(&as->lock);
234
        interrupts_restore(ipl);
229
        interrupts_restore(ipl);
235
        return ENOENT;
230
        return ENOENT;
236
    }
231
    }
237
 
232
 
238
    if (area->flags & AS_AREA_DEVICE) {
233
    if (area->backend == &phys_backend) {
239
        /*
234
        /*
240
         * Remapping of address space areas associated
235
         * Remapping of address space areas associated
241
         * with memory mapped devices is not supported.
236
         * with memory mapped devices is not supported.
242
         */
237
         */
243
        mutex_unlock(&area->lock);
238
        mutex_unlock(&area->lock);
Line 324... Line 319...
324
                    pte_t *pte;
319
                    pte_t *pte;
325
           
320
           
326
                    page_table_lock(as, false);
321
                    page_table_lock(as, false);
327
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
322
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
328
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
323
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
329
                    if (area->backend && area->backend->backend_frame_free) {
324
                    if (area->backend && area->backend->frame_free) {
330
                        area->backend->backend_frame_free(area,
325
                        area->backend->frame_free(area,
331
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
326
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
332
                    }
327
                    }
333
                    page_mapping_remove(as, b + i*PAGE_SIZE);
328
                    page_mapping_remove(as, b + i*PAGE_SIZE);
334
                    page_table_unlock(as, false);
329
                    page_table_unlock(as, false);
335
                }
330
                }
Line 409... Line 404...
409
           
404
           
410
            for (i = 0; i < (count_t) node->value[0]; i++) {
405
            for (i = 0; i < (count_t) node->value[0]; i++) {
411
                page_table_lock(as, false);
406
                page_table_lock(as, false);
412
                pte = page_mapping_find(as, b + i*PAGE_SIZE);
407
                pte = page_mapping_find(as, b + i*PAGE_SIZE);
413
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
408
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
414
                if (area->backend && area->backend->backend_frame_free) {
409
                if (area->backend && area->backend->frame_free) {
415
                    area->backend->backend_frame_free(area,
410
                    area->backend->frame_free(area,
416
                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
411
                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
417
                }
412
                }
418
                page_mapping_remove(as, b + i*PAGE_SIZE);
413
                page_mapping_remove(as, b + i*PAGE_SIZE);
419
                page_table_unlock(as, false);
414
                page_table_unlock(as, false);
420
            }
415
            }
Line 450... Line 445...
450
    return 0;
445
    return 0;
451
}
446
}
452
 
447
 
453
/** Share address space area with another or the same address space.
448
/** Share address space area with another or the same address space.
454
 *
449
 *
455
 * Address space area of anonymous memory is shared with a new address
450
 * Address space area mapping is shared with a new address space area.
456
 * space area. If the source address space area has not been shared so
451
 * If the source address space area has not been shared so far,
457
 * far, a new sh_info is created and the original mapping is duplicated
452
 * a new sh_info is created. The new address space area simply gets the
458
 * in its pagemap B+tree. The new address space are simply gets the
453
 * sh_info of the source area. The process of duplicating the
459
 * sh_info of the source area.
454
 * mapping is done through the backend share function.
460
 *
455
 *
461
 * @param src_as Pointer to source address space.
456
 * @param src_as Pointer to source address space.
462
 * @param src_base Base address of the source address space area.
457
 * @param src_base Base address of the source address space area.
463
 * @param acc_size Expected size of the source area.
458
 * @param acc_size Expected size of the source area.
464
 * @param dst_base Target base address.
459
 * @param dst_base Target base address.
Line 477... Line 472...
477
    ipl_t ipl;
472
    ipl_t ipl;
478
    int src_flags;
473
    int src_flags;
479
    size_t src_size;
474
    size_t src_size;
480
    as_area_t *src_area, *dst_area;
475
    as_area_t *src_area, *dst_area;
481
    share_info_t *sh_info;
476
    share_info_t *sh_info;
482
    link_t *cur;
477
    mem_backend_t *src_backend;
-
 
478
    mem_backend_data_t src_backend_data;
483
 
479
 
484
    ipl = interrupts_disable();
480
    ipl = interrupts_disable();
485
    mutex_lock(&src_as->lock);
481
    mutex_lock(&src_as->lock);
486
    src_area = find_area_and_lock(src_as, src_base);
482
    src_area = find_area_and_lock(src_as, src_base);
487
    if (!src_area) {
483
    if (!src_area) {
Line 491... Line 487...
491
        mutex_unlock(&src_as->lock);
487
        mutex_unlock(&src_as->lock);
492
        interrupts_restore(ipl);
488
        interrupts_restore(ipl);
493
        return ENOENT;
489
        return ENOENT;
494
    }
490
    }
495
   
491
   
496
    if (!src_area->backend || src_area->backend != &anon_backend) {
492
    if (!src_area->backend || !src_area->backend->share) {
497
        /*
493
        /*
498
         * As of now, only anonymous address space areas can be shared.
494
         * There is now backend or the backend does not
-
 
495
         * know how to share the area.
499
         */
496
         */
500
        mutex_unlock(&src_area->lock);
497
        mutex_unlock(&src_area->lock);
501
        mutex_unlock(&src_as->lock);
498
        mutex_unlock(&src_as->lock);
502
        interrupts_restore(ipl);
499
        interrupts_restore(ipl);
503
        return ENOTSUP;
500
        return ENOTSUP;
504
    }
501
    }
505
   
502
   
506
    src_size = src_area->pages * PAGE_SIZE;
503
    src_size = src_area->pages * PAGE_SIZE;
507
    src_flags = src_area->flags;
504
    src_flags = src_area->flags;
-
 
505
    src_backend = src_area->backend;
-
 
506
    src_backend_data = src_area->backend_data;
508
   
507
   
509
    if (src_size != acc_size) {
508
    if (src_size != acc_size) {
510
        mutex_unlock(&src_area->lock);
509
        mutex_unlock(&src_area->lock);
511
        mutex_unlock(&src_as->lock);
510
        mutex_unlock(&src_as->lock);
512
        interrupts_restore(ipl);
511
        interrupts_restore(ipl);
Line 529... Line 528...
529
        mutex_lock(&sh_info->lock);
528
        mutex_lock(&sh_info->lock);
530
        sh_info->refcount++;
529
        sh_info->refcount++;
531
        mutex_unlock(&sh_info->lock);
530
        mutex_unlock(&sh_info->lock);
532
    }
531
    }
533
 
532
 
534
    /*
-
 
535
     * Copy used portions of the area to sh_info's page map.
-
 
536
     */
-
 
537
    mutex_lock(&sh_info->lock);
-
 
538
    for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) {
-
 
539
        btree_node_t *node;
-
 
540
        int i;
-
 
541
       
-
 
542
        node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
543
        for (i = 0; i < node->keys; i++) {
-
 
544
            __address base = node->key[i];
-
 
545
            count_t count = (count_t) node->value[i];
-
 
546
            int j;
-
 
547
           
-
 
548
            for (j = 0; j < count; j++) {
-
 
549
                pte_t *pte;
-
 
550
           
-
 
551
                page_table_lock(src_as, false);
-
 
552
                pte = page_mapping_find(src_as, base + j*PAGE_SIZE);
-
 
553
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
-
 
554
                btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base,
-
 
555
                    (void *) PTE_GET_FRAME(pte), NULL);
-
 
556
                page_table_unlock(src_as, false);
533
    src_area->backend->share(src_area);
557
            }
-
 
558
               
-
 
559
        }
-
 
560
    }
-
 
561
    mutex_unlock(&sh_info->lock);
-
 
562
 
534
 
563
    mutex_unlock(&src_area->lock);
535
    mutex_unlock(&src_area->lock);
564
    mutex_unlock(&src_as->lock);
536
    mutex_unlock(&src_as->lock);
565
 
537
 
566
    /*
538
    /*
Line 570... Line 542...
570
     * preliminary as_page_fault() calls.
542
     * preliminary as_page_fault() calls.
571
     * The flags of the source area are masked against dst_flags_mask
543
     * The flags of the source area are masked against dst_flags_mask
572
     * to support sharing in less privileged mode.
544
     * to support sharing in less privileged mode.
573
     */
545
     */
574
    dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base,
546
    dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base,
575
                  AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
547
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
576
    if (!dst_area) {
548
    if (!dst_area) {
577
        /*
549
        /*
578
         * Destination address space area could not be created.
550
         * Destination address space area could not be created.
579
         */
551
         */
580
        sh_info_remove_reference(sh_info);
552
        sh_info_remove_reference(sh_info);
Line 596... Line 568...
596
    interrupts_restore(ipl);
568
    interrupts_restore(ipl);
597
   
569
   
598
    return 0;
570
    return 0;
599
}
571
}
600
 
572
 
601
/** Initialize mapping for one page of address space.
-
 
602
 *
-
 
603
 * This functions maps 'page' to 'frame' according
-
 
604
 * to attributes of the address space area to
-
 
605
 * wich 'page' belongs.
-
 
606
 *
-
 
607
 * @param as Target address space.
-
 
608
 * @param page Virtual page within the area.
-
 
609
 * @param frame Physical frame to which page will be mapped.
-
 
610
 */
-
 
611
void as_set_mapping(as_t *as, __address page, __address frame)
-
 
612
{
-
 
613
    as_area_t *area;
-
 
614
    ipl_t ipl;
-
 
615
   
-
 
616
    ipl = interrupts_disable();
-
 
617
    page_table_lock(as, true);
-
 
618
   
-
 
619
    area = find_area_and_lock(as, page);
-
 
620
    if (!area) {
-
 
621
        panic("Page not part of any as_area.\n");
-
 
622
    }
-
 
623
 
-
 
624
    ASSERT(!area->backend);
-
 
625
   
-
 
626
    page_mapping_insert(as, page, frame, as_area_get_flags(area));
-
 
627
    if (!used_space_insert(area, page, 1))
-
 
628
        panic("Could not insert used space.\n");
-
 
629
   
-
 
630
    mutex_unlock(&area->lock);
-
 
631
    page_table_unlock(as, true);
-
 
632
    interrupts_restore(ipl);
-
 
633
}
-
 
634
 
-
 
635
/** Check access mode for address space area.
573
/** Check access mode for address space area.
636
 *
574
 *
637
 * The address space area must be locked prior to this call.
575
 * The address space area must be locked prior to this call.
638
 *
576
 *
639
 * @param area Address space area.
577
 * @param area Address space area.
Line 700... Line 638...
700
        mutex_unlock(&area->lock);
638
        mutex_unlock(&area->lock);
701
        mutex_unlock(&AS->lock);
639
        mutex_unlock(&AS->lock);
702
        goto page_fault;       
640
        goto page_fault;       
703
    }
641
    }
704
 
642
 
705
    if (!area->backend || !area->backend->backend_page_fault) {
643
    if (!area->backend || !area->backend->page_fault) {
706
        /*
644
        /*
707
         * The address space area is not backed by any backend
645
         * The address space area is not backed by any backend
708
         * or the backend cannot handle page faults.
646
         * or the backend cannot handle page faults.
709
         */
647
         */
710
        mutex_unlock(&area->lock);
648
        mutex_unlock(&area->lock);
Line 733... Line 671...
733
    }
671
    }
734
   
672
   
735
    /*
673
    /*
736
     * Resort to the backend page fault handler.
674
     * Resort to the backend page fault handler.
737
     */
675
     */
738
    if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
676
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
739
        page_table_unlock(AS, false);
677
        page_table_unlock(AS, false);
740
        mutex_unlock(&area->lock);
678
        mutex_unlock(&area->lock);
741
        mutex_unlock(&AS->lock);
679
        mutex_unlock(&AS->lock);
742
        goto page_fault;
680
        goto page_fault;
743
    }
681
    }
Line 852... Line 790...
852
        flags |= PAGE_WRITE;
790
        flags |= PAGE_WRITE;
853
   
791
   
854
    if (aflags & AS_AREA_EXEC)
792
    if (aflags & AS_AREA_EXEC)
855
        flags |= PAGE_EXEC;
793
        flags |= PAGE_EXEC;
856
   
794
   
857
    if (!(aflags & AS_AREA_DEVICE))
795
    if (aflags & AS_AREA_CACHEABLE)
858
        flags |= PAGE_CACHEABLE;
796
        flags |= PAGE_CACHEABLE;
859
       
797
       
860
    return flags;
798
    return flags;
861
}
799
}
862
 
800
 
Line 1495... Line 1433...
1495
        btree_destroy(&sh_info->pagemap);
1433
        btree_destroy(&sh_info->pagemap);
1496
        free(sh_info);
1434
        free(sh_info);
1497
    }
1435
    }
1498
}
1436
}
1499
 
1437
 
1500
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
-
 
1501
static void anon_frame_free(as_area_t *area, __address page, __address frame);
-
 
1502
 
-
 
1503
/*
-
 
1504
 * Anonymous memory backend.
-
 
1505
 */
-
 
1506
mem_backend_t anon_backend = {
-
 
1507
    .backend_page_fault = anon_page_fault,
-
 
1508
    .backend_frame_free = anon_frame_free
-
 
1509
};
-
 
1510
 
-
 
1511
/** Service a page fault in the anonymous memory address space area.
-
 
1512
 *
-
 
1513
 * The address space area and page tables must be already locked.
-
 
1514
 *
-
 
1515
 * @param area Pointer to the address space area.
-
 
1516
 * @param addr Faulting virtual address.
-
 
1517
 * @param access Access mode that caused the fault (i.e. read/write/exec).
-
 
1518
 *
-
 
1519
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
-
 
1520
 */
-
 
1521
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
-
 
1522
{
-
 
1523
    __address frame;
-
 
1524
 
-
 
1525
    if (!as_area_check_access(area, access))
-
 
1526
        return AS_PF_FAULT;
-
 
1527
 
-
 
1528
    if (area->sh_info) {
-
 
1529
        btree_node_t *leaf;
-
 
1530
       
-
 
1531
        /*
-
 
1532
         * The area is shared, chances are that the mapping can be found
-
 
1533
         * in the pagemap of the address space area share info structure.
-
 
1534
         * In the case that the pagemap does not contain the respective
-
 
1535
         * mapping, a new frame is allocated and the mapping is created.
-
 
1536
         */
-
 
1537
        mutex_lock(&area->sh_info->lock);
-
 
1538
        frame = (__address) btree_search(&area->sh_info->pagemap,
-
 
1539
            ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
-
 
1540
        if (!frame) {
-
 
1541
            bool allocate = true;
-
 
1542
            int i;
-
 
1543
           
-
 
1544
            /*
-
 
1545
             * Zero can be returned as a valid frame address.
-
 
1546
             * Just a small workaround.
-
 
1547
             */
-
 
1548
            for (i = 0; i < leaf->keys; i++) {
-
 
1549
                if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
-
 
1550
                    allocate = false;
-
 
1551
                    break;
-
 
1552
                }
-
 
1553
            }
-
 
1554
            if (allocate) {
-
 
1555
                frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
1556
                memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
1557
               
-
 
1558
                /*
-
 
1559
                 * Insert the address of the newly allocated frame to the pagemap.
-
 
1560
                 */
-
 
1561
                btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
-
 
1562
            }
-
 
1563
        }
-
 
1564
        mutex_unlock(&area->sh_info->lock);
-
 
1565
    } else {
-
 
1566
 
-
 
1567
        /*
-
 
1568
         * In general, there can be several reasons that
-
 
1569
         * can have caused this fault.
-
 
1570
         *
-
 
1571
         * - non-existent mapping: the area is an anonymous
-
 
1572
         *   area (e.g. heap or stack) and so far has not been
-
 
1573
         *   allocated a frame for the faulting page
-
 
1574
         *
-
 
1575
         * - non-present mapping: another possibility,
-
 
1576
         *   currently not implemented, would be frame
-
 
1577
         *   reuse; when this becomes a possibility,
-
 
1578
         *   do not forget to distinguish between
-
 
1579
         *   the different causes
-
 
1580
         */
-
 
1581
        frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
-
 
1582
        memsetb(PA2KA(frame), FRAME_SIZE, 0);
-
 
1583
    }
-
 
1584
   
-
 
1585
    /*
-
 
1586
     * Map 'page' to 'frame'.
-
 
1587
     * Note that TLB shootdown is not attempted as only new information is being
-
 
1588
     * inserted into page tables.
-
 
1589
     */
-
 
1590
    page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
-
 
1591
    if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
-
 
1592
        panic("Could not insert used space.\n");
-
 
1593
       
-
 
1594
    return AS_PF_OK;
-
 
1595
}
-
 
1596
 
-
 
1597
/** Free a frame that is backed by the anonymous memory backend.
-
 
1598
 *
-
 
1599
 * The address space area and page tables must be already locked.
-
 
1600
 *
-
 
1601
 * @param area Ignored.
-
 
1602
 * @param page Ignored.
-
 
1603
 * @param frame Frame to be released.
-
 
1604
 */
-
 
1605
void anon_frame_free(as_area_t *area, __address page, __address frame)
-
 
1606
{
-
 
1607
    frame_free(ADDR2PFN(frame));
-
 
1608
}
-
 
1609
 
-
 
1610
/*
1438
/*
1611
 * Address space related syscalls.
1439
 * Address space related syscalls.
1612
 */
1440
 */
1613
 
1441
 
1614
/** Wrapper for as_area_create(). */
1442
/** Wrapper for as_area_create(). */
1615
__native sys_as_area_create(__address address, size_t size, int flags)
1443
__native sys_as_area_create(__address address, size_t size, int flags)
1616
{
1444
{
1617
    if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1445
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1618
        return (__native) address;
1446
        return (__native) address;
1619
    else
1447
    else
1620
        return (__native) -1;
1448
        return (__native) -1;
1621
}
1449
}
1622
 
1450