/kernel/trunk/genarch/src/mm/asid.c |
---|
43,6 → 43,10 |
* ASID from an address space that has not been active for |
* a while. |
* |
* This code depends on the fact that ASIDS_ALLOCABLE |
* is greater than number of supported CPUs (i.e. the |
* amount of concurently active address spaces). |
* |
* Architectures that don't have hardware support for address |
* spaces do not compile with this file. |
*/ |
57,33 → 61,21 |
#include <debug.h> |
/** |
* asidlock protects both the asids_allocated counter |
* and the list of address spaces that were already |
* assigned ASID. |
* asidlock protects the asids_allocated counter. |
*/ |
SPINLOCK_INITIALIZE(asidlock); |
static count_t asids_allocated = 0; |
/** |
* List of address spaces with assigned ASID. |
* When the system runs short of allocable |
* ASIDS, inactive address spaces are guaranteed |
* to be at the beginning of the list. |
*/ |
LIST_INITIALIZE(as_with_asid_head); |
/** Allocate free address space identifier. |
* |
* This code depends on the fact that ASIDS_ALLOCABLE |
* is greater than number of supported CPUs. |
* Interrupts must be disabled and as_lock must be held |
* prior to this call |
* |
* @return New ASID. |
*/ |
asid_t asid_get(void) |
{ |
ipl_t ipl; |
asid_t asid; |
link_t *tmp; |
as_t *as; |
92,7 → 84,6 |
* Check if there is an unallocated ASID. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&asidlock); |
if (asids_allocated == ASIDS_ALLOCABLE) { |
106,11 → 97,11 |
* It is guaranteed to belong to an |
* inactive address space. |
*/ |
tmp = as_with_asid_head.next; |
ASSERT(tmp != &as_with_asid_head); |
ASSERT(!list_empty(&inactive_as_with_asid_head)); |
tmp = inactive_as_with_asid_head.next; |
list_remove(tmp); |
as = list_get_instance(tmp, as_t, as_with_asid_link); |
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link); |
spinlock_lock(&as->lock); |
/* |
145,7 → 136,6 |
} |
spinlock_unlock(&asidlock); |
interrupts_restore(ipl); |
return asid; |
} |
170,57 → 160,3 |
spinlock_unlock(&asidlock); |
interrupts_restore(ipl); |
} |
/** Install ASID. |
* |
* This function is to be executed on each address space switch. |
* |
* @param as Address space. |
*/ |
void asid_install(as_t *as) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&asidlock); |
spinlock_lock(&as->lock); |
if (as->asid != ASID_KERNEL) { |
if (as->asid != ASID_INVALID) { |
/* |
* This address space has valid ASID. |
* Remove 'as' from the list of address spaces |
* with assigned ASID, so that it can be later |
* appended to the tail of the same list. |
* This is to prevent stealing of ASIDs from |
* recently installed address spaces. |
*/ |
list_remove(&as->as_with_asid_link); |
} else { |
spinlock_unlock(&as->lock); |
spinlock_unlock(&asidlock); |
/* |
* This address space doesn't have ASID assigned. |
* It was stolen or the address space is being |
* installed for the first time. |
* Allocate new ASID for it. |
*/ |
as->asid = asid_get(); |
spinlock_lock(&asidlock); |
spinlock_lock(&as->lock); |
} |
/* |
* Now it is sure that 'as' has ASID. |
* It is therefore appended to the list |
* of address spaces from which it can |
* be stolen. |
*/ |
list_append(&as->as_with_asid_link, &as_with_asid_head); |
} |
spinlock_unlock(&as->lock); |
spinlock_unlock(&asidlock); |
interrupts_restore(ipl); |
} |
/kernel/trunk/genarch/src/mm/page_ht.c |
---|
169,7 → 169,7 |
spinlock_lock(&page_ht_lock); |
if (!hash_table_find(&page_ht, key)) { |
t = (pte_t *) malloc(sizeof(pte_t)); |
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); |
ASSERT(t != NULL); |
hash_table_insert(&page_ht, key, &t->link); |
/kernel/trunk/generic/include/mm/asid.h |
---|
45,9 → 45,10 |
#define ASIDS_ALLOCABLE ((ASID_MAX+1)-ASID_START) |
extern spinlock_t asidlock; |
extern link_t as_with_asid_head; |
#ifndef asid_get |
extern asid_t asid_get(void); |
#endif /* !def asid_get */ |
extern void asid_put(asid_t asid); |
#ifndef asid_install |
/kernel/trunk/generic/include/mm/as.h |
---|
76,9 → 76,13 |
*/ |
struct as { |
/** Protected by asidlock. Must be acquired before as->lock. */ |
link_t as_with_asid_link; |
link_t inactive_as_with_asid_link; |
SPINLOCK_DECLARE(lock); |
/** Number of processors on wich is this address space active. */ |
count_t refcount; |
link_t as_area_head; |
/** Page table pointer. Constant on architectures that use global page hash table. */ |
96,12 → 100,15 |
extern as_t *AS_KERNEL; |
extern as_operations_t *as_operations; |
extern spinlock_t as_lock; |
extern link_t inactive_as_with_asid_head; |
extern void as_init(void); |
extern as_t *as_create(int flags); |
extern as_area_t *as_area_create(as_t *as, as_area_type_t type, size_t size, __address base); |
extern void as_set_mapping(as_t *as, __address page, __address frame); |
extern int as_page_fault(__address page); |
extern void as_install(as_t *m); |
extern void as_switch(as_t *old, as_t *new); |
/* Interface to be implemented by architectures. */ |
#ifndef as_install_arch |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
327,7 → 327,7 |
* Both tasks and address spaces are different. |
* Replace the old one with the new one. |
*/ |
as_install(as2); |
as_switch(as1, as2); |
} |
TASK = THREAD->task; |
} |
335,7 → 335,7 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
/kernel/trunk/generic/src/mm/as.c |
---|
56,6 → 56,15 |
as_operations_t *as_operations = NULL; |
/** Address space lock. It protects inactive_as_with_asid_head. */ |
SPINLOCK_INITIALIZE(as_lock); |
/** |
* This list contains address spaces that are not active on any |
* processor and that have valid ASID. |
*/ |
LIST_INITIALIZE(inactive_as_with_asid_head); |
/** Kernel address space. */ |
as_t *AS_KERNEL = NULL; |
79,8 → 88,7 |
as_t *as; |
as = (as_t *) malloc(sizeof(as_t), 0); |
list_initialize(&as->as_with_asid_link); |
link_initialize(&as->inactive_as_with_asid_link); |
spinlock_initialize(&as->lock, "as_lock"); |
list_initialize(&as->as_area_head); |
89,6 → 97,7 |
else |
as->asid = ASID_INVALID; |
as->refcount = 0; |
as->page_table = page_table_create(flags); |
return as; |
267,29 → 276,73 |
return 1; |
} |
/** Install address space on CPU. |
/** Switch address spaces. |
* |
* @param as Address space. |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_install(as_t *as) |
void as_switch(as_t *old, as_t *new) |
{ |
ipl_t ipl; |
bool needs_asid = false; |
asid_install(as); |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
SET_PTL0_ADDRESS(as->page_table); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
spinlock_lock(&as_lock); |
/* |
* First, take care of the old address space. |
*/ |
if (old) { |
spinlock_lock(&old->lock); |
ASSERT(old->refcount); |
if((--old->refcount == 0) && (old != AS_KERNEL)) { |
/* |
* The old address space is no longer active on |
* any processor. It can be appended to the |
* list of inactive address spaces with assigned |
* ASID. |
*/ |
ASSERT(old->asid != ASID_INVALID); |
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); |
} |
spinlock_unlock(&old->lock); |
} |
/* |
* Second, prepare the new address space. |
*/ |
spinlock_lock(&new->lock); |
if ((new->refcount++ == 0) && (new != AS_KERNEL)) { |
if (new->asid != ASID_INVALID) |
list_remove(&new->inactive_as_with_asid_link); |
else |
needs_asid = true; /* defer call to asid_get() until new->lock is released */ |
} |
SET_PTL0_ADDRESS(new->page_table); |
spinlock_unlock(&new->lock); |
if (needs_asid) { |
/* |
* Allocation of new ASID was deferred |
* until now in order to avoid deadlock. |
*/ |
asid_t asid; |
asid = asid_get(); |
spinlock_lock(&new->lock); |
new->asid = asid; |
spinlock_unlock(&new->lock); |
} |
spinlock_unlock(&as_lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
* (e.g. write ASID to hardware register etc.) |
*/ |
as_install_arch(as); |
as_install_arch(new); |
AS = as; |
AS = new; |
} |
/** Compute flags for virtual address translation subsytem. |
/kernel/trunk/arch/sparc64/src/mm/frame.c |
---|
33,7 → 33,7 |
void frame_arch_init(void) |
{ |
zone_create(0, config.memory_size >> FRAME_WIDTH, ADDR2PFN(ALIGN_UP(config.base + config.kernel_size, FRAME_SIZE)), 0); |
zone_create(0, config.memory_size >> FRAME_WIDTH, 1, 0); |
/* |
* Workaround to prevent slab allocator from allocating frame 0. |
/kernel/trunk/arch/ia64/src/mm/frame.c |
---|
43,10 → 43,10 |
zone_create(0, config.memory_size >> FRAME_WIDTH, 1, 0); |
/* |
* Workaround to prevent slab allocator from allocating frame 0. |
* Remove the following statement when the kernel is no longer |
* Workaround to prevent slab allocator from allocating frame 0. |
* Remove the following statement when the kernel is no longer |
* identity mapped. |
*/ |
*/ |
frame_mark_unavailable(0, 1); |
/* |
/kernel/trunk/arch/ppc32/include/mm/asid.h |
---|
31,8 → 31,8 |
typedef int asid_t; |
#define ASID_MAX_ARCH 0 |
#define ASID_MAX_ARCH 3 |
#define asid_install(as) |
#define asid_get() (ASID_START+1) |
#endif |
/kernel/trunk/arch/ia32/include/mm/asid.h |
---|
37,8 → 37,8 |
typedef int asid_t; |
#define ASID_MAX_ARCH 0 |
#define ASID_MAX_ARCH 3 |
#define asid_install(as) |
#define asid_get() (ASID_START+1) |
#endif |