Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1379 → Rev 1380

/kernel/trunk/genarch/include/mm/page_ht.h
70,7 → 70,7
};
 
extern page_mapping_operations_t ht_mapping_operations;
extern spinlock_t page_ht_lock;
extern mutex_t page_ht_lock;
extern hash_table_t page_ht;
extern hash_table_operations_t ht_operations;
 
/kernel/trunk/genarch/src/mm/as_ht.c
39,7 → 39,7
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
 
static pte_t *ht_create(int flags);
 
66,6 → 66,7
{
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
mutex_initialize(&page_ht_lock);
}
return NULL;
}
81,8 → 82,8
void ht_lock(as_t *as, bool lock)
{
if (lock)
spinlock_lock(&as->lock);
spinlock_lock(&page_ht_lock);
mutex_lock(&as->lock);
mutex_lock(&page_ht_lock);
}
 
/** Unlock page table.
95,7 → 96,7
*/
void ht_unlock(as_t *as, bool unlock)
{
spinlock_unlock(&page_ht_lock);
mutex_unlock(&page_ht_lock);
if (unlock)
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/asid.c
57,6 → 57,7
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
103,7 → 104,7
list_remove(tmp);
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
spinlock_lock(&as->lock);
mutex_lock_active(&as->lock);
 
/*
* Steal the ASID.
117,7 → 118,7
* was stolen by invalidating its asid member.
*/
as->asid = ASID_INVALID;
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
/kernel/trunk/genarch/src/mm/as_pt.c
35,6 → 35,7
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <synch/mutex.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
78,7 → 79,7
*/
ipl = interrupts_disable();
spinlock_lock(&AS_KERNEL->lock);
mutex_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((__address) AS_KERNEL->page_table);
 
src = (__address) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
86,7 → 87,7
 
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
memcpy((void *) dst, (void *) src, PAGE_SIZE - (src - (__address) src_ptl0));
spinlock_unlock(&AS_KERNEL->lock);
mutex_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
}
 
104,7 → 105,7
void pt_lock(as_t *as, bool lock)
{
if (lock)
spinlock_lock(&as->lock);
mutex_lock(&as->lock);
}
 
/** Unlock page tables.
118,5 → 119,5
void pt_unlock(as_t *as, bool unlock)
{
if (unlock)
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
}
/kernel/trunk/genarch/src/mm/page_ht.c
61,7 → 61,7
* after address space lock and after any address space area
* locks.
*/
SPINLOCK_INITIALIZE(page_ht_lock);
mutex_t page_ht_lock;
 
/**
* Page hash table.
/kernel/trunk/generic/include/synch/mutex.h
44,7 → 44,9
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_NON_BLOCKING)
#define mutex_lock_timeout(mtx,usec) \
_mutex_lock_timeout((mtx),(usec),SYNCH_NON_BLOCKING)
#define mutex_lock_active(mtx) \
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock);
extern void mutex_unlock(mutex_t *mtx);
/kernel/trunk/generic/include/mm/as.h
44,6 → 44,7
#include <arch/types.h>
#include <typedefs.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
#include <adt/btree.h>
 
66,9 → 67,10
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /* Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by asp_page_fault(). */
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
 
/** Address space area structure.
*
76,7 → 78,7
* In the future, it should not be difficult to support shared areas.
*/
struct as_area {
SPINLOCK_DECLARE(lock);
mutex_t lock;
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
91,10 → 93,10
* set up during system initialization.
*/
struct as {
/** Protected by asidlock. Must be acquired before as->lock. */
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
 
SPINLOCK_DECLARE(lock);
mutex_t lock;
 
/** Number of processors on wich is this address space active. */
count_t refcount;
/kernel/trunk/generic/include/typedefs.h
63,19 → 63,14
typedef struct waitq waitq_t;
typedef struct futex futex_t;
 
typedef struct chunk chunk_t;
 
typedef struct buddy_system buddy_system_t;
typedef struct buddy_system_operations buddy_system_operations_t;
 
typedef enum as_area_type as_area_type_t;
typedef struct as_area as_area_t;
typedef struct as as_t;
 
typedef struct link link_t;
 
typedef char *char_ptr;
 
typedef struct the the_t;
 
typedef struct chardev chardev_t;
/kernel/trunk/generic/src/proc/scheduler.c
421,8 → 421,6
 
relink_rq(priority);
 
spinlock_lock(&THREAD->lock);
 
/*
* If both the old and the new task are the same, lots of work is avoided.
*/
454,6 → 452,7
before_task_runs();
}
 
spinlock_lock(&THREAD->lock);
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
/kernel/trunk/generic/src/mm/as.c
54,6 → 54,7
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
#include <adt/btree.h>
#include <proc/task.h>
74,7 → 75,7
 
as_operations_t *as_operations = NULL;
 
/** Address space lock. It protects inactive_as_with_asid_head. */
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
SPINLOCK_INITIALIZE(as_lock);
 
/**
110,7 → 111,7
 
as = (as_t *) malloc(sizeof(as_t), 0);
link_initialize(&as->inactive_as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");
mutex_initialize(&as->lock);
btree_create(&as->as_area_btree);
if (flags & FLAG_AS_KERNEL)
162,10 → 163,10
return NULL;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
mutex_lock(&as->lock);
if (!check_area_conflicts(as, base, size, NULL)) {
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return NULL;
}
172,7 → 173,7
a = (as_area_t *) malloc(sizeof(as_area_t), 0);
 
spinlock_initialize(&a->lock, "as_area_lock");
mutex_initialize(&a->lock);
a->flags = flags;
a->attributes = attrs;
181,7 → 182,7
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
 
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
 
return a;
203,7 → 204,7
size_t pages;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
mutex_lock(&as->lock);
/*
* Locate the area.
210,7 → 211,7
*/
area = find_area_and_lock(as, address);
if (!area) {
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return ENOENT;
}
220,8 → 221,8
* Remapping of address space areas associated
* with memory mapped devices is not supported.
*/
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
231,8 → 232,8
/*
* Zero size address space areas are not allowed.
*/
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return EPERM;
}
278,8 → 279,8
* Check for overlaps with other address space areas.
*/
if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return EADDRNOTAVAIL;
}
287,8 → 288,8
 
area->pages = pages;
spinlock_unlock(&area->lock);
spinlock_unlock(&as->lock);
mutex_unlock(&area->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
 
return 0;
309,11 → 310,11
int i;
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
mutex_lock(&as->lock);
 
area = find_area_and_lock(as, address);
if (!area) {
spinlock_unlock(&as->lock);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
return ENOENT;
}
350,7 → 351,7
tlb_shootdown_finalize();
 
area->attributes |= AS_AREA_ATTR_PARTIAL;
spinlock_unlock(&area->lock);
mutex_unlock(&area->lock);
 
/*
* Remove the empty area from address space.
359,7 → 360,7
free(area);
spinlock_unlock(&AS->lock);
mutex_unlock(&AS->lock);
interrupts_restore(ipl);
return 0;
}
397,7 → 398,7
spinlock_lock(&src_task->lock);
src_as = src_task->as;
spinlock_lock(&src_as->lock);
mutex_lock(&src_as->lock);
src_area = find_area_and_lock(src_as, src_base);
if (!src_area) {
/*
404,14 → 405,14
* Could not find the source address space area.
*/
spinlock_unlock(&src_task->lock);
spinlock_unlock(&src_as->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOENT;
}
src_size = src_area->pages * PAGE_SIZE;
src_flags = src_area->flags;
spinlock_unlock(&src_area->lock);
spinlock_unlock(&src_as->lock);
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
 
 
if (src_size != acc_size) {
441,11 → 442,11
* Avoid deadlock by first locking the address space with lower address.
*/
if (AS < src_as) {
spinlock_lock(&AS->lock);
spinlock_lock(&src_as->lock);
mutex_lock(&AS->lock);
mutex_lock(&src_as->lock);
} else {
spinlock_lock(&AS->lock);
spinlock_lock(&src_as->lock);
mutex_lock(&AS->lock);
mutex_lock(&src_as->lock);
}
for (i = 0; i < SIZE2FRAMES(src_size); i++) {
475,12 → 476,12
* fully initialized. Clear the AS_AREA_ATTR_PARTIAL
* attribute.
*/
spinlock_lock(&dst_area->lock);
mutex_lock(&dst_area->lock);
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
spinlock_unlock(&dst_area->lock);
mutex_unlock(&dst_area->lock);
spinlock_unlock(&AS->lock);
spinlock_unlock(&src_as->lock);
mutex_unlock(&AS->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return 0;
511,7 → 512,7
 
page_mapping_insert(as, page, frame, get_area_flags(area));
spinlock_unlock(&area->lock);
mutex_unlock(&area->lock);
page_table_unlock(as, true);
interrupts_restore(ipl);
}
532,9 → 533,12
as_area_t *area;
__address frame;
if (!THREAD)
return 0;
ASSERT(AS);
 
spinlock_lock(&AS->lock);
mutex_lock(&AS->lock);
area = find_area_and_lock(AS, page);
if (!area) {
/*
541,7 → 545,7
* No area contained mapping for 'page'.
* Signal page fault to low-level handler.
*/
spinlock_unlock(&AS->lock);
mutex_unlock(&AS->lock);
goto page_fault;
}
 
550,8 → 554,8
* The address space area is not fully initialized.
* Avoid possible race by returning error.
*/
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
goto page_fault;
}
 
567,8 → 571,8
if ((pte = page_mapping_find(AS, page))) {
if (PTE_PRESENT(pte)) {
page_table_unlock(AS, false);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
return 1;
}
}
598,8 → 602,8
page_mapping_insert(AS, page, frame, get_area_flags(area));
page_table_unlock(AS, false);
spinlock_unlock(&area->lock);
spinlock_unlock(&AS->lock);
mutex_unlock(&area->lock);
mutex_unlock(&AS->lock);
return AS_PF_OK;
 
page_fault:
621,6 → 625,9
 
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* the scheduling. Sleeping here would lead to deadlock on wakeup.
*
* @param old Old address space or NULL.
* @param new New address space.
*/
636,7 → 643,7
* First, take care of the old address space.
*/
if (old) {
spinlock_lock(&old->lock);
mutex_lock_active(&old->lock);
ASSERT(old->refcount);
if((--old->refcount == 0) && (old != AS_KERNEL)) {
/*
648,13 → 655,13
ASSERT(old->asid != ASID_INVALID);
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
}
spinlock_unlock(&old->lock);
mutex_unlock(&old->lock);
}
 
/*
* Second, prepare the new address space.
*/
spinlock_lock(&new->lock);
mutex_lock_active(&new->lock);
if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
if (new->asid != ASID_INVALID)
list_remove(&new->inactive_as_with_asid_link);
662,7 → 669,7
needs_asid = true; /* defer call to asid_get() until new->lock is released */
}
SET_PTL0_ADDRESS(new->page_table);
spinlock_unlock(&new->lock);
mutex_unlock(&new->lock);
 
if (needs_asid) {
/*
672,9 → 679,9
asid_t asid;
asid = asid_get();
spinlock_lock(&new->lock);
mutex_lock_active(&new->lock);
new->asid = asid;
spinlock_unlock(&new->lock);
mutex_unlock(&new->lock);
}
spinlock_unlock(&as_lock);
interrupts_restore(ipl);
798,7 → 805,7
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
if (a) {
/* va is the base address of an address space area */
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
return a;
}
811,11 → 818,11
/* First, search the leaf node itself. */
for (i = 0; i < leaf->keys; i++) {
a = (as_area_t *) leaf->value[i];
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
return a;
}
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
}
 
/*
824,11 → 831,11
*/
if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) lnode->value[lnode->keys - 1];
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
if (va < a->base + a->pages * PAGE_SIZE) {
return a;
}
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
}
 
return NULL;
873,21 → 880,21
/* First, check the two border cases. */
if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) node->value[node->keys - 1];
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
return false;
}
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
}
if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
a = (as_area_t *) node->value[0];
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
return false;
}
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
}
/* Second, check the leaf node. */
897,12 → 904,12
if (a == avoid_area)
continue;
spinlock_lock(&a->lock);
mutex_lock(&a->lock);
if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
return false;
}
spinlock_unlock(&a->lock);
mutex_unlock(&a->lock);
}
 
/*
917,7 → 924,7
return true;
}
 
/** Return size of address space of current task pointed to by base */
/** Return size of the address space area with given base. */
size_t as_get_size(__address base)
{
ipl_t ipl;
928,7 → 935,7
src_area = find_area_and_lock(AS, base);
if (src_area){
size = src_area->pages * PAGE_SIZE;
spinlock_unlock(&src_area->lock);
mutex_unlock(&src_area->lock);
} else {
size = 0;
}