Subversion Repositories HelenOS-historic

Compare Revisions

No changes between revisions

Ignore whitespace Rev 726 → Rev 727

/kernel/trunk/genarch/Makefile.inc
45,3 → 45,7
GENARCH_SOURCES += \
genarch/src/mm/page_ht.c
endif
ifeq ($(CONFIG_ASID),y)
GENARCH_SOURCES += \
genarch/src/mm/asid.c
endif
/kernel/trunk/genarch/src/mm/asid.c
0,0 → 1,221
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* ASID management.
*
* Modern processor architectures optimize TLB utilization
* by using ASIDs (a.k.a. memory contexts on sparc64 and
* region identifiers on ia64). These ASIDs help to associate
* each TLB item with an address space, thus making
* finer-grained TLB invalidation possible.
*
* Unfortunatelly, there are usually less ASIDs available than
* there can be unique as_t structures (i.e. address spaces
* recognized by the kernel).
*
* When system runs short of ASIDs, it will attempt to steal
* ASID from an address space that has not been active for
* a while.
*
* Architectures that don't have hardware support for address
* spaces do not compile with this file.
*/
 
#include <mm/asid.h>
#include <mm/as.h>
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <list.h>
#include <debug.h>
 
/**
* asidlock protects both the asids_allocated counter
* and the list of address spaces that were already
* assigned ASID.
*/
SPINLOCK_INITIALIZE(asidlock);
 
static count_t asids_allocated = 0;
 
/**
* List of address spaces with assigned ASID.
* When the system runs short of allocable
* ASIDS, inactive address spaces are guaranteed
* to be at the beginning of the list.
*/
LIST_INITIALIZE(as_with_asid_head);
 
 
/** Allocate free address space identifier.
*
* This code depends on the fact that ASIDS_ALLOCABLE
* is greater than number of supported CPUs.
*
* @return New ASID.
*/
asid_t asid_get(void)
{
ipl_t ipl;
asid_t asid;
link_t *tmp;
as_t *as;
 
/*
* Check if there is an unallocated ASID.
*/
ipl = interrupts_disable();
spinlock_lock(&asidlock);
if (asids_allocated == ASIDS_ALLOCABLE) {
 
/*
* All ASIDs are already allocated.
* Resort to stealing.
*/
/*
* Remove the first item on the list.
* It is guaranteed to belong to an
* inactive address space.
*/
tmp = as_with_asid_head.next;
ASSERT(tmp != &as_with_asid_head);
list_remove(tmp);
as = list_get_instance(tmp, as_t, as_with_asid_link);
spinlock_lock(&as->lock);
 
/*
* Steal the ASID.
* Note that the stolen ASID is not active.
*/
asid = as->asid;
ASSERT(asid != ASID_INVALID);
 
/*
* Get the system rid of the stolen ASID.
*/
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
tlb_shootdown_finalize();
as->asid = ASID_INVALID;
spinlock_unlock(&as->lock);
} else {
 
/*
* There is at least one unallocated ASID.
* Find it and assign it.
*/
 
asid = asid_find_free();
asids_allocated++;
}
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
return asid;
}
 
/** Release address space identifier.
*
* This code relies on architecture
* dependent functionality.
*
* @param asid ASID to be released.
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
 
asids_allocated--;
asid_put_arch(asid);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
 
/** Install ASID.
*
* This function is to be executed on each address space switch.
*
* @param as Address space.
*/
void asid_install(as_t *as)
{
ipl_t ipl;
ipl = interrupts_disable();
spinlock_lock(&asidlock);
spinlock_lock(&as->lock);
if (as->asid != ASID_KERNEL) {
if (as->asid != ASID_INVALID) {
/*
* This address space has valid ASID.
* Remove 'as' from the list of address spaces
* with assigned ASID, so that it can be later
* appended to the tail of the same list.
* This is to prevent stealing of ASIDs from
* recently installed address spaces.
*/
list_remove(&as->as_with_asid_link);
} else {
spinlock_unlock(&as->lock);
spinlock_unlock(&asidlock);
/*
* This address space doesn't have ASID assigned.
* It was stolen or the address space is being
* installed for the first time.
* Allocate new ASID for it.
*/
as->asid = asid_get();
spinlock_lock(&asidlock);
spinlock_lock(&as->lock);
}
/*
* Now it is sure that 'as' has ASID.
* It is therefore appended to the list
* of address spaces from which it can
* be stolen.
*/
list_append(&as->as_with_asid_link, &as_with_asid_head);
}
spinlock_unlock(&as->lock);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
/kernel/trunk/generic/include/mm/asid.h
26,9 → 26,35
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* This is generic interface for managing
* Address Space IDentifiers (ASIDs).
*/
 
#ifndef __ASID_H__
#define __ASID_H__
 
#include <arch/mm/asid.h>
#include <typedefs.h>
 
#define ASID_KERNEL 0
#define ASID_INVALID 1
#define ASID_START 2
#define ASID_MAX ASID_MAX_ARCH
 
#define ASIDS_ALLOCABLE ((ASID_MAX+1)-ASID_START)
 
extern spinlock_t asidlock;
extern link_t as_with_asid_head;
 
extern asid_t asid_get(void);
extern void asid_put(asid_t asid);
 
#ifndef asid_install
extern void asid_install(as_t *as);
#endif /* !def asid_install */
 
#define asid_find_free() ASID_START
#define asid_put_arch(x)
 
#endif
/kernel/trunk/generic/include/mm/tlb.h
31,25 → 31,14
 
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
 
extern void tlb_init(void);
 
#ifdef CONFIG_SMP
extern void tlb_shootdown_start(void);
extern void tlb_shootdown_finalize(void);
extern void tlb_shootdown_ipi_recv(void);
#else
# define tlb_shootdown_start() ;
# define tlb_shootdown_finalize() ;
# define tlb_shootdown_ipi_recv() ;
#endif /* CONFIG_SMP */
 
/** Type of TLB shootdown message. */
enum tlb_invalidate_type {
TLB_INVL_INVALID = 0, /**< Invalid type. */
TLB_INVL_ALL, /**< Invalidate all entries in TLB. */
TLB_INVL_ASID, /**< Invalidate all entries belonging to one address space. */
TLB_INVL_PAGE /**< Invalidate one entry for specified page. */
TLB_INVL_PAGES /**< Invalidate specified page range belonging to one address space. */
};
 
typedef enum tlb_invalidate_type tlb_invalidate_type_t;
63,14 → 52,25
 
typedef struct tlb_shootdown_msg tlb_shootdown_msg_t;
 
extern void tlb_init(void);
 
#ifdef CONFIG_SMP
extern void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, __address page, count_t cnt);
extern void tlb_shootdown_finalize(void);
extern void tlb_shootdown_ipi_recv(void);
#else
# define tlb_shootdown_start(w, x, y, z)
# define tlb_shootdown_finalize()
# define tlb_shootdown_ipi_recv()
#endif /* CONFIG_SMP */
 
 
/* Export TLB interface that each architecture must implement. */
extern void tlb_arch_init(void);
extern void tlb_print(void);
extern void tlb_invalidate(asid_t asid);
extern void tlb_shootdown_ipi_send(void);
 
extern void tlb_invalidate_all(void);
extern void tlb_invalidate_asid(asid_t asid);
extern void tlb_invalidate_page(asid_t asid, __address page);
 
extern void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt);
#endif
/kernel/trunk/generic/include/mm/as.h
48,6 → 48,8
#define USTACK_ADDRESS USTACK_ADDRESS_ARCH
#define UDATA_ADDRESS UDATA_ADDRESS_ARCH
 
#define AS_KERNEL (1<<0) /**< Kernel address space. */
 
enum as_area_type {
AS_AREA_TEXT = 1, AS_AREA_DATA, AS_AREA_STACK
};
61,7 → 63,7
SPINLOCK_DECLARE(lock);
link_t link;
as_area_type_t type;
size_t size; /**< Size of this area. */
size_t size; /**< Size of this area in multiples of PAGE_SIZE. */
__address base; /**< Base address of this area. */
index_t *mapping; /**< Map of physical frame numbers mapped to virtual page numbers in this area. */
};
74,6 → 76,9
* set up during system initialization.
*/
struct as {
/** Protected by asidlock. Must be acquired before as-> lock. */
link_t as_with_asid_link;
 
SPINLOCK_DECLARE(lock);
link_t as_area_head;
pte_t *ptl0;
80,20 → 85,13
asid_t asid; /**< Address space identifier. */
};
 
extern as_t * as_create(pte_t *ptl0);
extern as_t * as_create(pte_t *ptl0, int flags);
extern as_area_t *as_area_create(as_t *as, as_area_type_t type, size_t size, __address base);
extern void as_area_set_mapping(as_area_t *a, index_t vpn, index_t pfn);
extern int as_page_fault(__address page);
extern void as_install(as_t *m);
 
/*
* Each architecture should implement this function.
* Its main purpose is to do TLB purges according
* to architecture's requirements. Note that
* some architectures invalidate their TLB automatically
* on hardware address space switch (e.g. ia32 and
* amd64).
*/
/* Interface to be implemented by architectures. */
#ifndef as_install_arch
extern void as_install_arch(as_t *as);
#endif /* !def as_install_arch */
/kernel/trunk/generic/src/main/kinit.c
146,7 → 146,7
if (KA2PA(config.init_addr) % FRAME_SIZE)
panic("config.init_addr is not frame aligned");
as = as_create(NULL);
as = as_create(NULL, 0);
if (!as)
panic("as_create\n");
u = task_create(as);
/kernel/trunk/generic/src/main/main.c
185,7 → 185,7
/*
* Create kernel address space.
*/
as = as_create(GET_PTL0_ADDRESS());
as = as_create(GET_PTL0_ADDRESS(), AS_KERNEL);
if (!as)
panic("can't create kernel address space\n");
 
/kernel/trunk/generic/src/mm/tlb.c
46,14 → 46,32
 
#ifdef CONFIG_SMP
/* must be called with interrupts disabled */
void tlb_shootdown_start(void)
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, __address page, count_t cnt)
{
int i;
 
CPU->tlb_active = 0;
spinlock_lock(&tlblock);
/*
* TODO: assemble shootdown message.
*/
tlb_shootdown_ipi_send();
tlb_invalidate(0); /* TODO: use valid ASID */
 
switch (type) {
case TLB_INVL_ALL:
tlb_invalidate_all();
break;
case TLB_INVL_ASID:
tlb_invalidate_asid(asid);
break;
case TLB_INVL_PAGES:
tlb_invalidate_pages(asid, page, cnt);
break;
default:
panic("unknown tlb_invalidate_type_t value: %d\n", type);
break;
}
busy_wait:
for (i = 0; i<config.cpu_count; i++)
77,7 → 95,7
CPU->tlb_active = 0;
spinlock_lock(&tlblock);
spinlock_unlock(&tlblock);
tlb_invalidate(0); /* TODO: use valid ASID */
tlb_invalidate_all(); /* TODO: use valid ASID */
CPU->tlb_active = 1;
}
#endif /* CONFIG_SMP */
/kernel/trunk/generic/src/mm/as.c
33,6 → 33,7
*/
 
#include <mm/as.h>
#include <mm/asid.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/tlb.h>
39,6 → 40,7
#include <mm/heap.h>
#include <arch/mm/page.h>
#include <genarch/mm/page_pt.h>
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <arch/mm/as.h>
#include <arch/types.h>
70,16 → 72,20
* FIXME: this interface must be meaningful for all possible VAT
* (Virtual Address Translation) mechanisms.
*/
as_t *as_create(pte_t *ptl0)
as_t *as_create(pte_t *ptl0, int flags)
{
as_t *as;
 
as = (as_t *) malloc(sizeof(as_t));
if (as) {
list_initialize(&as->as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");
list_initialize(&as->as_area_head);
 
as->asid = asid_get();
if (flags & AS_KERNEL)
as->asid = ASID_KERNEL;
else
as->asid = ASID_INVALID;
 
as->ptl0 = ptl0;
if (!as->ptl0) {
289,6 → 295,8
{
ipl_t ipl;
asid_install(as);
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->ptl0);
298,7 → 306,7
 
/*
* Perform architecture-specific steps.
* (e.g. invalidate TLB, install ASID etc.)
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(as);
/kernel/trunk/arch/sparc64/include/mm/asid.h
36,6 → 36,6
*/
typedef __u16 asid_t;
 
#define asid_get() 0
#define ASID_MAX_ARCH 0x4095 /* 2^12 - 1 */
 
#endif
/kernel/trunk/arch/sparc64/Makefile.inc
51,6 → 51,12
 
CONFIG_PAGE_HT = y
 
## Compile with support for address space identifiers.
#
 
CONFIG_ASID = y
 
 
ARCH_SOURCES = \
arch/$(ARCH)/src/cpu/cpu.c \
arch/$(ARCH)/src/asm.S \
/kernel/trunk/arch/sparc64/src/mm/tlb.c
165,14 → 165,19
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0);
}
 
/** Invalidate all ITLB and DLTB entries for specified page in specified address space.
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
*
* @param asid Address Space ID.
* @param page Page which to sweep out from ITLB and DTLB.
* @param page First page which to sweep out from ITLB and DTLB.
* @param cnt Number of ITLB and DTLB entries to invalidate.
*/
void tlb_invalidate_page(asid_t asid, __address page)
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
/* TODO: write asid to some Context register and encode the register in second parameter below. */
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page);
int i;
for (i = 0; i < cnt; i++) {
/* TODO: write asid to some Context register and encode the register in second parameter below. */
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
}
}
/kernel/trunk/arch/ia64/include/mm/asid.h
29,8 → 29,14
#ifndef __ia64_ASID_H__
#define __ia64_ASID_H__
 
typedef int asid_t;
#include <arch/types.h>
 
#define asid_get() 0
typedef __u32 asid_t;
 
/*
* ASID_MAX can range from 2^18 - 1 to 2^24 - ,
* depending on architecture implementation.
*/
#define ASID_MAX_ARCH 16777215 /* 2^24 - 1 */
 
#endif
/kernel/trunk/arch/ia64/Makefile.inc
47,6 → 47,12
 
CONFIG_PAGE_HT = y
 
## Compile with support for address space identifiers.
#
 
CONFIG_ASID = y
 
 
ARCH_SOURCES = \
arch/$(ARCH)/src/start.S \
arch/$(ARCH)/src/asm.S \
/kernel/trunk/arch/ppc32/include/mm/asid.h
31,6 → 31,8
 
typedef int asid_t;
 
#define asid_get() 0
#define ASID_MAX_ARCH 0
 
#define asid_install(as)
 
#endif
/kernel/trunk/arch/amd64/include/mm/asid.h
1,36 → 1,0
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __amd64_ASID_H__
#define __amd64_ASID_H__
 
typedef int asid_t;
 
#define asid_get() 0
 
#endif
link ../../../ia32/include/mm/asid.h
Property changes:
Added: svn:special
+*
\ No newline at end of property
/kernel/trunk/arch/mips32/include/mm/asid.h
30,16 → 30,9
#define __mips32_ASID_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
#define ASIDS 256
#define ASID_INVALID 0
#define ASID_START 1
#define ASID_MAX_ARCH 255
 
typedef __u8 asid_t;
 
extern asid_t asid_get(void);
extern void asid_put(asid_t asid);
extern bool asid_has_conflicts(asid_t asid);
 
#endif
/kernel/trunk/arch/mips32/Makefile.inc
51,7 → 51,12
 
CONFIG_PAGE_PT = y
 
## Compile with support for address space identifiers.
#
 
CONFIG_ASID = y
 
 
## Accepted MACHINEs
#
 
/kernel/trunk/arch/mips32/src/mm/asid.c
28,93 → 28,5
*/
 
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
#include <typedefs.h>
 
SPINLOCK_INITIALIZE(asid_usage_lock);
static count_t asid_usage[ASIDS]; /**< Usage tracking array for ASIDs */
 
/** Get ASID
*
* Get the least used ASID.
*
* @return ASID
*/
asid_t asid_get(void)
{
ipl_t ipl;
int i, j;
count_t min;
min = (unsigned) -1;
ipl = interrupts_disable();
spinlock_lock(&asid_usage_lock);
for (i = ASID_START, j = ASID_START; i < ASIDS; i++) {
if (asid_usage[i] < min) {
j = i;
min = asid_usage[i];
if (!min)
break;
}
}
 
asid_usage[j]++;
 
spinlock_unlock(&asid_usage_lock);
interrupts_restore(ipl);
 
return i;
}
 
/** Release ASID
*
* Release ASID by decrementing its usage count.
*
* @param asid ASID.
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asid_usage_lock);
 
ASSERT(asid != ASID_INVALID);
ASSERT(asid_usage[asid] > 0);
asid_usage[asid]--;
 
spinlock_unlock(&asid_usage_lock);
interrupts_restore(ipl);
}
 
/** Find out whether ASID is used by more address spaces
*
* Find out whether ASID is used by more address spaces.
*
* @param asid ASID in question.
*
* @return True if 'asid' is used by more address spaces, false otherwise.
*/
bool asid_has_conflicts(asid_t asid)
{
bool has_conflicts = false;
ipl_t ipl;
 
ASSERT(asid != ASID_INVALID);
 
ipl = interrupts_disable();
spinlock_lock(&asid_usage_lock);
 
if (asid_usage[asid] > 1)
has_conflicts = true;
 
spinlock_unlock(&asid_usage_lock);
interrupts_restore(ipl);
 
return has_conflicts;
}
/kernel/trunk/arch/mips32/src/mm/tlb.c
27,7 → 27,7
*/
 
#include <arch/mm/tlb.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
#include <mm/tlb.h>
#include <mm/page.h>
#include <mm/as.h>
494,13 → 494,15
cp0_entry_hi_write(hi_save.value);
}
 
/** Invalidate TLB entry for specified page belonging to specified address space.
/** Invalidate TLB entries for specified page range belonging to specified address space.
*
* @param asid Address space identifier.
* @param page Page whose TLB entry is to be invalidated.
* @param page First page whose TLB entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_page(asid_t asid, __address page)
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
int i;
ipl_t ipl;
entry_lo_t lo0, lo1;
entry_hi_t hi, hi_save;
511,27 → 513,29
hi_save.value = cp0_entry_hi_read();
ipl = interrupts_disable();
 
hi.value = 0;
prepare_entry_hi(&hi, asid, page);
cp0_entry_hi_write(hi.value);
for (i = 0; i < cnt; i++) {
hi.value = 0;
prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
cp0_entry_hi_write(hi.value);
 
tlbp();
index.value = cp0_index_read();
tlbp();
index.value = cp0_index_read();
 
if (!index.p) {
/* Entry was found, index register contains valid index. */
tlbr();
if (!index.p) {
/* Entry was found, index register contains valid index. */
tlbr();
 
lo0.value = cp0_entry_lo0_read();
lo1.value = cp0_entry_lo1_read();
lo0.value = cp0_entry_lo0_read();
lo1.value = cp0_entry_lo1_read();
 
lo0.v = 0;
lo1.v = 0;
lo0.v = 0;
lo1.v = 0;
 
cp0_entry_lo0_write(lo0.value);
cp0_entry_lo1_write(lo1.value);
cp0_entry_lo0_write(lo0.value);
cp0_entry_lo1_write(lo1.value);
 
tlbwi();
tlbwi();
}
}
interrupts_restore(ipl);
/kernel/trunk/arch/mips32/src/mm/as.c
35,7 → 35,7
 
/** Install address space.
*
* Install ASID and if necessary, purge TLB.
* Install ASID.
*
* @param as Address space structure.
*/
45,11 → 45,6
ipl_t ipl;
 
/*
* If necessary, purge TLB.
*/
tlb_invalidate_asid(as->asid); /* TODO: do it only if necessary */
 
/*
* Install ASID.
*/
hi.value = cp0_entry_hi_read();
/kernel/trunk/arch/ia32/include/mm/asid.h
26,11 → 26,19
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* ia32 has no hardware support for address space identifiers.
* This file is provided to do nop-implementation of mm/asid.h
* interface.
*/
 
#ifndef __ia32_ASID_H__
#define __ia32_ASID_H__
 
typedef int asid_t;
 
#define asid_get() 0
#define ASID_MAX_ARCH 0
 
#define asid_install(as)
 
#endif
/kernel/trunk/arch/ia32/src/mm/tlb.c
31,17 → 31,6
#include <arch/asm.h>
#include <arch/types.h>
 
/** Invalidate all TLB entries
*
* Invalidate all TLB entries.
*
* @param asid This argument is ignored.
*/
void tlb_invalidate(asid_t asid)
{
write_cr3(read_cr3());
}
 
/** Invalidate all entries in TLB. */
void tlb_invalidate_all(void)
{
57,12 → 46,16
tlb_invalidate_all();
}
 
/** Invalidate TLB entry for specified page belongs to specified address space.
/** Invalidate TLB entry for specified page range belonging to specified address space.
*
* @param asid This parameter is ignored as the architecture doesn't support it.
* @param page Address of the page whose entry is to be invalidated.
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_page(asid_t asid, __address page)
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
{
invlpg(page);
int i;
 
for (i = 0; i < cnt; i++)
invlpg(page + i * PAGE_SIZE);
}