Subversion Repositories HelenOS-historic

Rev

Rev 1409 | Rev 1413 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1409 Rev 1411
Line 373... Line 373...
373
int as_area_destroy(as_t *as, __address address)
373
int as_area_destroy(as_t *as, __address address)
374
{
374
{
375
    as_area_t *area;
375
    as_area_t *area;
376
    __address base;
376
    __address base;
377
    ipl_t ipl;
377
    ipl_t ipl;
-
 
378
    bool cond;
378
 
379
 
379
    ipl = interrupts_disable();
380
    ipl = interrupts_disable();
380
    mutex_lock(&as->lock);
381
    mutex_lock(&as->lock);
381
 
382
 
382
    area = find_area_and_lock(as, address);
383
    area = find_area_and_lock(as, address);
Line 385... Line 386...
385
        interrupts_restore(ipl);
386
        interrupts_restore(ipl);
386
        return ENOENT;
387
        return ENOENT;
387
    }
388
    }
388
 
389
 
389
    base = area->base;
390
    base = area->base;
390
    if (!(area->flags & AS_AREA_DEVICE)) {
-
 
391
        bool cond; 
-
 
392
   
-
 
393
        /*
-
 
394
         * Releasing physical memory.
-
 
395
         * Areas mapping memory-mapped devices are treated differently than
-
 
396
         * areas backing frame_alloc()'ed memory.
-
 
397
         */
-
 
398
 
391
 
399
        /*
392
    /*
400
         * Visit only the pages mapped by used_space B+tree.
393
     * Visit only the pages mapped by used_space B+tree.
401
         * Note that we must be very careful when walking the tree
394
     * Note that we must be very careful when walking the tree
402
         * leaf list and removing used space as the leaf list changes
395
     * leaf list and removing used space as the leaf list changes
403
         * unpredictibly after each remove. The solution is to actually
396
     * unpredictibly after each remove. The solution is to actually
404
         * not walk the tree at all, but to remove items from the head
397
     * not walk the tree at all, but to remove items from the head
405
         * of the leaf list until there are some keys left.
398
     * of the leaf list until there are some keys left.
406
         */
399
     */
407
        for (cond = true; cond;) {
400
    for (cond = true; cond;) {
408
            btree_node_t *node;
401
        btree_node_t *node;
409
       
402
       
410
            ASSERT(!list_empty(&area->used_space.leaf_head));
403
        ASSERT(!list_empty(&area->used_space.leaf_head));
411
            node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
404
        node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
412
            if ((cond = (bool) node->keys)) {
405
        if ((cond = (bool) node->keys)) {
413
                __address b = node->key[0];
406
            __address b = node->key[0];
414
                count_t i;
407
            count_t i;
415
                pte_t *pte;
408
            pte_t *pte;
416
           
409
           
417
                for (i = 0; i < (count_t) node->value[0]; i++) {
410
            for (i = 0; i < (count_t) node->value[0]; i++) {
418
                    page_table_lock(as, false);
411
                page_table_lock(as, false);
419
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
412
                pte = page_mapping_find(as, b + i*PAGE_SIZE);
420
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
413
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
421
                    if (area->backend && area->backend->backend_frame_free) {
414
                if (area->backend && area->backend->backend_frame_free) {
422
                        area->backend->backend_frame_free(area,
415
                    area->backend->backend_frame_free(area,
423
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
416
                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
424
                    }
-
 
425
                    page_mapping_remove(as, b + i*PAGE_SIZE);
-
 
426
                    page_table_unlock(as, false);
-
 
427
                }
417
                }
428
                if (!used_space_remove(area, b, i))
418
                page_mapping_remove(as, b + i*PAGE_SIZE);
429
                    panic("Could not remove used space.\n");
419
                page_table_unlock(as, false);
430
            }
420
            }
-
 
421
            if (!used_space_remove(area, b, i))
-
 
422
                panic("Could not remove used space.\n");
431
        }
423
        }
432
    }
424
    }
433
    btree_destroy(&area->used_space);
425
    btree_destroy(&area->used_space);
434
 
426
 
435
    /*
427
    /*
Line 621... Line 613...
621
 * fault.
613
 * fault.
622
 *
614
 *
623
 * Interrupts are assumed disabled.
615
 * Interrupts are assumed disabled.
624
 *
616
 *
625
 * @param page Faulting page.
617
 * @param page Faulting page.
-
 
618
 * @param access Access mode that caused the fault (i.e. read/write/exec).
626
 * @param istate Pointer to interrupted state.
619
 * @param istate Pointer to interrupted state.
627
 *
620
 *
628
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
621
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
629
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
622
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
630
 */
623
 */
631
int as_page_fault(__address page, istate_t *istate)
624
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
632
{
625
{
633
    pte_t *pte;
626
    pte_t *pte;
634
    as_area_t *area;
627
    as_area_t *area;
635
   
628
   
636
    if (!THREAD)
629
    if (!THREAD)
Line 686... Line 679...
686
    }
679
    }
687
   
680
   
688
    /*
681
    /*
689
     * Resort to the backend page fault handler.
682
     * Resort to the backend page fault handler.
690
     */
683
     */
691
    if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
684
    if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
692
        page_table_unlock(AS, false);
685
        page_table_unlock(AS, false);
693
        mutex_unlock(&area->lock);
686
        mutex_unlock(&area->lock);
694
        mutex_unlock(&AS->lock);
687
        mutex_unlock(&AS->lock);
695
        goto page_fault;
688
        goto page_fault;
696
    }
689
    }
Line 1448... Line 1441...
1448
        btree_destroy(&sh_info->pagemap);
1441
        btree_destroy(&sh_info->pagemap);
1449
        free(sh_info);
1442
        free(sh_info);
1450
    }
1443
    }
1451
}
1444
}
1452
 
1445
 
1453
static int anon_page_fault(as_area_t *area, __address addr);
1446
static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
1454
static void anon_frame_free(as_area_t *area, __address page, __address frame);
1447
static void anon_frame_free(as_area_t *area, __address page, __address frame);
1455
 
1448
 
1456
/*
1449
/*
1457
 * Anonymous memory backend.
1450
 * Anonymous memory backend.
1458
 */
1451
 */
Line 1465... Line 1458...
1465
 *
1458
 *
1466
 * The address space area and page tables must be already locked.
1459
 * The address space area and page tables must be already locked.
1467
 *
1460
 *
1468
 * @param area Pointer to the address space area.
1461
 * @param area Pointer to the address space area.
1469
 * @param addr Faulting virtual address.
1462
 * @param addr Faulting virtual address.
-
 
1463
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1470
 *
1464
 *
1471
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
1465
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
1472
 */
1466
 */
1473
int anon_page_fault(as_area_t *area, __address addr)
1467
int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
1474
{
1468
{
1475
    __address frame;
1469
    __address frame;
1476
 
1470
 
1477
    if (area->sh_info) {
1471
    if (area->sh_info) {
1478
        btree_node_t *leaf;
1472
        btree_node_t *leaf;