Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1380 → Rev 1379

/kernel/trunk/genarch/include/mm/page_ht.h
70,7 → 70,7
};
 
extern page_mapping_operations_t ht_mapping_operations;
extern mutex_t page_ht_lock;
extern spinlock_t page_ht_lock;
extern hash_table_t page_ht;
extern hash_table_operations_t ht_operations;
 
/kernel/trunk/genarch/src/mm/as_ht.c
39,7 → 39,7
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
#include <synch/mutex.h>
#include <synch/spinlock.h>
 
static pte_t *ht_create(int flags);
 
66,7 → 66,6
{
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
mutex_initialize(&page_ht_lock);
}
return NULL;
}
82,8 → 81,8
void ht_lock(as_t *as, bool lock)
{
if (lock)
mutex_lock(&as->lock);
mutex_lock(&page_ht_lock);
spinlock_lock(&as->lock);
spinlock_lock(&page_ht_lock);
}
 
/** Unlock page table.
96,7 → 95,7
*/
void ht_unlock(as_t *as, bool unlock)
{
mutex_unlock(&page_ht_lock);
spinlock_unlock(&page_ht_lock);
if (unlock)
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/asid.c
57,7 → 57,6
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
104,7 → 103,7
list_remove(tmp);
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
mutex_lock_active(&as->lock);
spinlock_lock(&as->lock);
 
/*
* Steal the ASID.
118,7 → 117,7
* was stolen by invalidating its asid member.
*/
as->asid = ASID_INVALID;
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
/kernel/trunk/genarch/src/mm/as_pt.c
35,7 → 35,6
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <synch/mutex.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
79,7 → 78,7
*/
ipl = interrupts_disable();
mutex_lock(&AS_KERNEL->lock);
spinlock_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((__address) AS_KERNEL->page_table);
 
src = (__address) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
87,7 → 86,7
 
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
memcpy((void *) dst, (void *) src, PAGE_SIZE - (src - (__address) src_ptl0));
mutex_unlock(&AS_KERNEL->lock);
spinlock_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
}
 
105,7 → 104,7
void pt_lock(as_t *as, bool lock)
{
if (lock)
mutex_lock(&as->lock);
spinlock_lock(&as->lock);
}
 
/** Unlock page tables.
119,5 → 118,5
void pt_unlock(as_t *as, bool unlock)
{
if (unlock)
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/page_ht.c
61,7 → 61,7
* after address space lock and after any address space area
* locks.
*/
mutex_t page_ht_lock;
SPINLOCK_INITIALIZE(page_ht_lock);
 
/**
* Page hash table.
/kernel/trunk/generic/include/synch/mutex.h
44,9 → 44,7
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
#define mutex_lock_timeout(mtx,usec) \
_mutex_lock_timeout((mtx),(usec),SYNCH_NON_BLOCKING)
#define mutex_lock_active(mtx) \
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock);
extern void mutex_unlock(mutex_t *mtx);
/kernel/trunk/generic/include/mm/as.h
44,7 → 44,6
#include <arch/types.h>
#include <typedefs.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
#include <adt/btree.h>
 
67,10 → 66,9
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /* Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_FAULT 0 /**< The page fault was not resolved by asp_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace(). */
 
/** Address space area structure.
*
78,7 → 76,7
* In the future, it should not be difficult to support shared areas.
*/
struct as_area {
mutex_t lock;
SPINLOCK_DECLARE(lock);
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
93,10 → 91,10
* set up during system initialization.
*/
struct as {
/** Protected by asidlock. */
/** Protected by asidlock. Must be acquired before as->lock. */
link_t inactive_as_with_asid_link;
 
mutex_t lock;
SPINLOCK_DECLARE(lock);
 
/** Number of processors on wich is this address space active. */
count_t refcount;
/kernel/trunk/generic/include/typedefs.h
63,14 → 63,19
typedef struct waitq waitq_t;
typedef struct futex futex_t;
 
typedef struct chunk chunk_t;
 
typedef struct buddy_system buddy_system_t;
typedef struct buddy_system_operations buddy_system_operations_t;
 
typedef enum as_area_type as_area_type_t;
typedef struct as_area as_area_t;
typedef struct as as_t;
 
typedef struct link link_t;
 
typedef char *char_ptr;
 
typedef struct the the_t;
 
typedef struct chardev chardev_t;
/kernel/trunk/generic/src/proc/scheduler.c
421,6 → 421,8
 
relink_rq(priority);
 
spinlock_lock(&THREAD->lock);
 
/*
* If both the old and the new task are the same, lots of work is avoided.
*/
452,7 → 454,6
before_task_runs();
}
 
spinlock_lock(&THREAD->lock);
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
/kernel/trunk/generic/src/mm/as.c
54,7 → 54,6
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
#include <adt/btree.h>
#include <proc/task.h>
75,7 → 74,7
 
as_operations_t *as_operations = NULL;
 
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
/** Address space lock. It protects inactive_as_with_asid_head. */
SPINLOCK_INITIALIZE(as_lock);
 
/**
111,7 → 110,7
 
as = (as_t *) malloc(sizeof(as_t), 0);
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
spinlock_initialize(&as->lock, "as_lock");
btree_create(&as->as_area_btree);
if (flags & FLAG_AS_KERNEL)
163,10 → 162,10
return NULL;
ipl = interrupts_disable();
mutex_lock(&as->lock);
spinlock_lock(&as->lock);
if (!check_area_conflicts(as, base, size, NULL)) {
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return NULL;
}
173,7 → 172,7
a = (as_area_t *) malloc(sizeof(as_area_t), 0);
 
mutex_initialize(&a->lock);
spinlock_initialize(&a->lock, "as_area_lock");
a->flags = flags;
a->attributes = attrs;
182,7 → 181,7
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
 
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
 
return a;
204,7 → 203,7
size_t pages;
ipl = interrupts_disable();
mutex_lock(&as->lock);
spinlock_lock(&as->lock);
/*
* Locate the area.
211,7 → 210,7
*/
area = find_area_and_lock(as, address);
if (!area) {
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return ENOENT;
}
221,8 → 220,8
* Remapping of address space areas associated
* with memory mapped devices is not supported.
*/
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
232,8 → 231,8
/*
* Zero size address space areas are not allowed.
*/
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return EPERM;
}
279,8 → 278,8
* Check for overlaps with other address space areas.
*/
if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return EADDRNOTAVAIL;
}
288,8 → 287,8
 
area->pages = pages;
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
 
return 0;
310,11 → 309,11
int i;
 
ipl = interrupts_disable();
mutex_lock(&as->lock);
spinlock_lock(&as->lock);
 
area = find_area_and_lock(as, address);
if (!area) {
mutex_unlock(&as->lock);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
return ENOENT;
}
351,7 → 350,7
tlb_shootdown_finalize();
 
area->attributes |= AS_AREA_ATTR_PARTIAL;
mutex_unlock(&area->lock);
spinlock_unlock(&area->lock);
 
/*
* Remove the empty area from address space.
360,7 → 359,7
free(area);
mutex_unlock(&AS->lock);
spinlock_unlock(&AS->lock);
interrupts_restore(ipl);
return 0;
}
398,7 → 397,7
spinlock_lock(&src_task->lock);
src_as = src_task->as;
mutex_lock(&src_as->lock);
spinlock_lock(&src_as->lock);
src_area = find_area_and_lock(src_as, src_base);
if (!src_area) {
/*
405,14 → 404,14
* Could not find the source address space area.
*/
spinlock_unlock(&src_task->lock);
mutex_unlock(&src_as->lock);
spinlock_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOENT;
}
src_size = src_area->pages * PAGE_SIZE;
src_flags = src_area->flags;
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
spinlock_unlock(&src_area->lock);
spinlock_unlock(&src_as->lock);
 
 
if (src_size != acc_size) {
442,11 → 441,11
* Avoid deadlock by first locking the address space with lower address.
*/
if (AS < src_as) {
mutex_lock(&AS->lock);
mutex_lock(&src_as->lock);
spinlock_lock(&AS->lock);
spinlock_lock(&src_as->lock);
} else {
mutex_lock(&AS->lock);
mutex_lock(&src_as->lock);
spinlock_lock(&AS->lock);
spinlock_lock(&src_as->lock);
}
for (i = 0; i < SIZE2FRAMES(src_size); i++) {
476,12 → 475,12
* fully initialized. Clear the AS_AREA_ATTR_PARTIAL
* attribute.
*/
mutex_lock(&dst_area->lock);
spinlock_lock(&dst_area->lock);
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
mutex_unlock(&dst_area->lock);
spinlock_unlock(&dst_area->lock);
mutex_unlock(&AS->lock);
mutex_unlock(&src_as->lock);
spinlock_unlock(&AS->lock);
spinlock_unlock(&src_as->lock);
interrupts_restore(ipl);
return 0;
512,7 → 511,7
 
page_mapping_insert(as, page, frame, get_area_flags(area));
mutex_unlock(&area->lock);
spinlock_unlock(&area->lock);
page_table_unlock(as, true);
interrupts_restore(ipl);
}
533,12 → 532,9
as_area_t *area;
__address frame;
if (!THREAD)
return 0;
ASSERT(AS);
 
mutex_lock(&AS->lock);
spinlock_lock(&AS->lock);
area = find_area_and_lock(AS, page);
if (!area) {
/*
545,7 → 541,7
* No area contained mapping for 'page'.
* Signal page fault to low-level handler.
*/
mutex_unlock(&AS->lock);
spinlock_unlock(&AS->lock);
goto page_fault;
}
 
554,8 → 550,8
* The address space area is not fully initialized.
* Avoid possible race by returning error.
*/
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
goto page_fault;
}
 
571,8 → 567,8
if ((pte = page_mapping_find(AS, page))) {
if (PTE_PRESENT(pte)) {
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
return 1;
}
}
602,8 → 598,8
page_mapping_insert(AS, page, frame, get_area_flags(area));
page_table_unlock(AS, false);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
return AS_PF_OK;
 
page_fault:
625,9 → 621,6
 
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* the scheduling. Sleeping here would lead to deadlock on wakeup.
*
* @param old Old address space or NULL.
* @param new New address space.
*/
643,7 → 636,7
* First, take care of the old address space.
*/
if (old) {
mutex_lock_active(&old->lock);
spinlock_lock(&old->lock);
ASSERT(old->refcount);
if((--old->refcount == 0) && (old != AS_KERNEL)) {
/*
655,13 → 648,13
ASSERT(old->asid != ASID_INVALID);
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
}
mutex_unlock(&old->lock);
spinlock_unlock(&old->lock);
}
 
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&new->lock);
spinlock_lock(&new->lock);
if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
if (new->asid != ASID_INVALID)
list_remove(&new->inactive_as_with_asid_link);
669,7 → 662,7
needs_asid = true; /* defer call to asid_get() until new->lock is released */
}
SET_PTL0_ADDRESS(new->page_table);
mutex_unlock(&new->lock);
spinlock_unlock(&new->lock);
 
if (needs_asid) {
/*
679,9 → 672,9
asid_t asid;
asid = asid_get();
mutex_lock_active(&new->lock);
spinlock_lock(&new->lock);
new->asid = asid;
mutex_unlock(&new->lock);
spinlock_unlock(&new->lock);
}
spinlock_unlock(&as_lock);
interrupts_restore(ipl);
805,7 → 798,7
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
if (a) {
/* va is the base address of an address space area */
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
return a;
}
818,11 → 811,11
/* First, search the leaf node itself. */
for (i = 0; i < leaf->keys; i++) {
a = (as_area_t *) leaf->value[i];
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
return a;
}
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
}
 
/*
831,11 → 824,11
*/
if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) lnode->value[lnode->keys - 1];
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
if (va < a->base + a->pages * PAGE_SIZE) {
return a;
}
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
}
 
return NULL;
880,21 → 873,21
/* First, check the two border cases. */
if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) node->value[node->keys - 1];
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
return false;
}
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
}
if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) node->value[0];
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
return false;
}
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
}
/* Second, check the leaf node. */
904,12 → 897,12
if (a == avoid_area)
continue;
mutex_lock(&a->lock);
spinlock_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
return false;
}
mutex_unlock(&a->lock);
spinlock_unlock(&a->lock);
}
 
/*
924,7 → 917,7
return true;
}
 
/** Return size of the address space area with given base. */
/** Return size of address space of current task pointed to by base */
size_t as_get_size(__address base)
{
ipl_t ipl;
935,7 → 928,7
src_area = find_area_and_lock(AS, base);
if (src_area){
size = src_area->pages * PAGE_SIZE;
mutex_unlock(&src_area->lock);
spinlock_unlock(&src_area->lock);
} else {
size = 0;
}