/trunk/kernel/genarch/src/mm/asid.c |
---|
62,15 → 62,9 |
#include <arch/mm/asid.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <debug.h> |
/** |
* asidlock protects the asids_allocated counter. |
*/ |
SPINLOCK_INITIALIZE(asidlock); |
static count_t asids_allocated = 0; |
/** Allocate free address space identifier. |
90,7 → 84,6 |
* Check if there is an unallocated ASID. |
*/ |
spinlock_lock(&asidlock); |
if (asids_allocated == ASIDS_ALLOCABLE) { |
/* |
108,7 → 101,6 |
list_remove(tmp); |
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link); |
mutex_lock_active(&as->lock); |
/* |
* Steal the ASID. |
130,8 → 122,6 |
*/ |
as_invalidate_translation_cache(as, 0, (count_t) -1); |
mutex_unlock(&as->lock); |
/* |
* Get the system rid of the stolen ASID. |
*/ |
156,8 → 146,6 |
tlb_shootdown_finalize(); |
} |
spinlock_unlock(&asidlock); |
return asid; |
} |
170,16 → 158,8 |
*/ |
void asid_put(asid_t asid) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&asidlock); |
asids_allocated--; |
asid_put_arch(asid); |
spinlock_unlock(&asidlock); |
interrupts_restore(ipl); |
} |
/** @} |
/trunk/kernel/generic/include/mm/as.h |
---|
89,6 → 89,17 |
@public |
/** Protected by asidlock. */ |
link_t inactive_as_with_asid_link; |
/** |
* Number of processors on wich is this address space active. |
* Protected by asidlock. |
*/ |
count_t cpu_refcount; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
* Protected by asidlock. |
*/ |
asid_t asid; |
mutex_t lock; |
95,18 → 106,9 |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
/** Number of processors on wich is this address space active. */ |
count_t cpu_refcount; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
*/ |
asid_t asid; |
/** Non-generic content. */ |
as_genarch_t genarch; |
133,6 → 135,17 |
typedef struct as { |
/** Protected by asidlock. */ |
link_t inactive_as_with_asid_link; |
/** |
* Number of processors on wich is this address space active. |
* Protected by asidlock. |
*/ |
count_t cpu_refcount; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
* Protected by asidlock. |
*/ |
asid_t asid; |
mutex_t lock; |
139,18 → 152,9 |
/** Number of references (i.e tasks that reference this as). */ |
count_t refcount; |
/** Number of processors on wich is this address space active. */ |
count_t cpu_refcount; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
*/ |
asid_t asid; |
/** Non-generic content. */ |
as_genarch_t genarch; |
249,7 → 253,6 |
extern as_operations_t *as_operations; |
#endif |
SPINLOCK_EXTERN(inactive_as_with_asid_lock); |
extern link_t inactive_as_with_asid_head; |
extern void as_init(void); |
/trunk/kernel/generic/src/mm/as.c |
---|
95,10 → 95,13 |
#endif |
/** |
* This lock protects inactive_as_with_asid_head list. It must be acquired |
* before as_t mutex. |
* This lock serializes access to the ASID subsystem. |
* It protects: |
* - inactive_as_with_asid_head list |
* - as->asid for each as of the as_t type |
* - asids_allocated counter |
*/ |
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); |
SPINLOCK_INITIALIZE(asidlock); |
/** |
* This list contains address spaces that are not active on any |
205,14 → 208,15 |
* Since there is no reference to this area, |
* it is safe not to lock its mutex. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
spinlock_lock(&asidlock); |
if (as->asid != ASID_INVALID && as != AS_KERNEL) { |
if (as != AS && as->cpu_refcount == 0) |
list_remove(&as->inactive_as_with_asid_link); |
asid_put(as->asid); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
spinlock_unlock(&asidlock); |
/* |
* Destroy address space areas of the address space. |
860,7 → 864,8 |
/** Switch address spaces. |
* |
* Note that this function cannot sleep as it is essentially a part of |
* scheduling. Sleeping here would lead to deadlock on wakeup. |
* scheduling. Sleeping here would lead to deadlock on wakeup. Another |
* thing which is forbidden in this context is locking the address space. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
867,17 → 872,12 |
*/ |
void as_switch(as_t *old_as, as_t *new_as) |
{ |
ipl_t ipl; |
bool needs_asid = false; |
spinlock_lock(&asidlock); |
ipl = interrupts_disable(); |
spinlock_lock(&inactive_as_with_asid_lock); |
/* |
* First, take care of the old address space. |
*/ |
if (old_as) { |
mutex_lock_active(&old_as->lock); |
ASSERT(old_as->cpu_refcount); |
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { |
/* |
890,7 → 890,6 |
list_append(&old_as->inactive_as_with_asid_link, |
&inactive_as_with_asid_head); |
} |
mutex_unlock(&old_as->lock); |
/* |
* Perform architecture-specific tasks when the address space |
902,43 → 901,24 |
/* |
* Second, prepare the new address space. |
*/ |
mutex_lock_active(&new_as->lock); |
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { |
if (new_as->asid != ASID_INVALID) { |
if (new_as->asid != ASID_INVALID) |
list_remove(&new_as->inactive_as_with_asid_link); |
} else { |
/* |
* Defer call to asid_get() until new_as->lock is released. |
*/ |
needs_asid = true; |
else |
new_as->asid = asid_get(); |
} |
} |
#ifdef AS_PAGE_TABLE |
SET_PTL0_ADDRESS(new_as->genarch.page_table); |
#endif |
mutex_unlock(&new_as->lock); |
if (needs_asid) { |
/* |
* Allocation of new ASID was deferred |
* until now in order to avoid deadlock. |
*/ |
asid_t asid; |
asid = asid_get(); |
mutex_lock_active(&new_as->lock); |
new_as->asid = asid; |
mutex_unlock(&new_as->lock); |
} |
spinlock_unlock(&inactive_as_with_asid_lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
* (e.g. write ASID to hardware register etc.) |
*/ |
as_install_arch(new_as); |
spinlock_unlock(&asidlock); |
AS = new_as; |
} |
/trunk/kernel/arch/sparc64/src/mm/as.c |
---|
42,7 → 42,6 |
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#include <arch/memstr.h> |
#include <synch/mutex.h> |
#include <arch/asm.h> |
#include <mm/frame.h> |
#include <bitops.h> |
100,13 → 99,7 |
int as_create_arch(as_t *as, int flags) |
{ |
#ifdef CONFIG_TSB |
ipl_t ipl; |
ipl = interrupts_disable(); |
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */ |
tsb_invalidate(as, 0, (count_t) -1); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
#endif |
return 0; |
} |
123,18 → 116,17 |
tlb_context_reg_t ctx; |
/* |
* Note that we don't lock the address space. |
* That's correct - we can afford it here |
* because we only read members that are |
* currently read-only. |
* Note that we don't and may not lock the address space. That's ok |
* since we only read members that are currently read-only. |
* |
* Moreover, the as->asid is protected by asidlock, which is being held. |
*/ |
/* |
* Write ASID to secondary context register. |
* The primary context register has to be set |
* from TL>0 so it will be filled from the |
* secondary context register from the TL=1 |
* code just before switch to userspace. |
* Write ASID to secondary context register. The primary context |
* register has to be set from TL>0 so it will be filled from the |
* secondary context register from the TL=1 code just before switch to |
* userspace. |
*/ |
ctx.v = 0; |
ctx.context = as->asid; |
184,10 → 176,10 |
{ |
/* |
* Note that we don't lock the address space. |
* That's correct - we can afford it here |
* because we only read members that are |
* currently read-only. |
* Note that we don't and may not lock the address space. That's ok |
* since we only read members that are currently read-only. |
* |
* Moreover, the as->asid is protected by asidlock, which is being held. |
*/ |
#ifdef CONFIG_TSB |
/trunk/kernel/arch/ia64/src/mm/as.c |
---|
36,11 → 36,10 |
#include <arch/mm/asid.h> |
#include <arch/mm/page.h> |
#include <genarch/mm/as_ht.h> |
#include <genarch/mm/page_ht.h> |
#include <genarch/mm/asid_fifo.h> |
#include <mm/asid.h> |
#include <arch.h> |
#include <arch/barrier.h> |
#include <synch/spinlock.h> |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
55,13 → 54,9 |
*/ |
void as_install_arch(as_t *as) |
{ |
ipl_t ipl; |
region_register rr; |
int i; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
ASSERT(as->asid != ASID_INVALID); |
/* |
80,9 → 75,6 |
} |
srlz_d(); |
srlz_i(); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
} |
/** @} |
/trunk/kernel/arch/ppc32/src/mm/as.c |
---|
54,12 → 54,8 |
void as_install_arch(as_t *as) |
{ |
asid_t asid; |
ipl_t ipl; |
uint32_t sr; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
asid = as->asid; |
/* Lower 2 GB, user and supervisor access */ |
79,9 → 75,6 |
: "r" ((0x4000 << 16) + (asid << 4) + sr), "r" (sr << 28) |
); |
} |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
} |
/** @} |
/trunk/kernel/arch/mips32/src/mm/as.c |
---|
34,12 → 34,12 |
#include <arch/mm/as.h> |
#include <genarch/mm/as_pt.h> |
#include <genarch/mm/page_pt.h> |
#include <genarch/mm/asid_fifo.h> |
#include <arch/mm/tlb.h> |
#include <mm/tlb.h> |
#include <mm/as.h> |
#include <arch/cp0.h> |
#include <arch.h> |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
57,7 → 57,6 |
void as_install_arch(as_t *as) |
{ |
entry_hi_t hi; |
ipl_t ipl; |
/* |
* Install ASID. |
64,12 → 63,8 |
*/ |
hi.value = cp0_entry_hi_read(); |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
hi.asid = as->asid; |
cp0_entry_hi_write(hi.value); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
} |
/** @} |