Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 395 → Rev 396

/SPARTAN/trunk/test/mm/mapping1/test.c
0,0 → 1,79
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <print.h>
#include <test.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <arch/mm/page.h>
#include <arch/types.h>
#include <debug.h>
 
#define PAGE0 0x10000000
#define PAGE1 (PAGE0+PAGE_SIZE)
 
#define VALUE0 0x01234567
#define VALUE1 0x89abcdef
 
void test(void)
{
__address frame0, frame1;
__u32 v0, v1;
 
printf("Memory management test mapping #1\n");
 
frame0 = frame_alloc(FRAME_KA);
frame1 = frame_alloc(FRAME_KA);
*((__u32 *) frame0) = VALUE0;
*((__u32 *) frame1) = VALUE1;
printf("Mapping %X to %X.\n", PAGE0, KA2PA(frame0));
map_page_to_frame(PAGE0, KA2PA(frame0), PAGE_PRESENT, 0);
printf("Mapping %X to %X.\n", PAGE1, KA2PA(frame1));
map_page_to_frame(PAGE1, KA2PA(frame1), PAGE_PRESENT, 0);
printf("Value at %X is %X.\n", PAGE0, v0 = *((__u32 *) PAGE0));
printf("Value at %X is %X.\n", PAGE1, v1 = *((__u32 *) PAGE1));
ASSERT(v0 == VALUE0);
ASSERT(v1 == VALUE1);
 
printf("Writing 0 to %X.\n", PAGE0);
*((__u32 *) PAGE0) = 0;
printf("Writing 0 to %X.\n", PAGE1);
*((__u32 *) PAGE1) = 0;
printf("Value at %X is %X.\n", PAGE0, v0 = *((__u32 *) PAGE0));
printf("Value at %X is %X.\n", PAGE1, v1 = *((__u32 *) PAGE1));
 
ASSERT(v0 == 0);
ASSERT(v1 == 0);
printf("Test passed.\n");
}
/SPARTAN/trunk/Makefile.config
40,3 → 40,4
#TEST_DIR=fpu/mips1/
#TEST_DIR=print/print1/
#TEST_DIR=thread/thread1/
#TEST_DIR=mm/mapping1/
/SPARTAN/trunk/include/mm/tlb.h
29,6 → 29,8
#ifndef __TLB_H__
#define __TLB_H__
 
#include <arch/mm/asid.h>
 
extern void tlb_init(void);
 
#ifdef __SMP__
43,7 → 45,7
 
/* Export TLB interface that each architecture must implement. */
extern void tlb_init_arch(void);
extern void tlb_invalidate(int asid);
extern void tlb_invalidate(asid_t asid);
extern void tlb_shootdown_ipi_send(void);
 
#endif
/SPARTAN/trunk/arch/mips32/include/mm/asid.h
30,10 → 30,16
#define __mips32_ASID_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
#define ASIDS 256
#define ASID_INVALID 0
#define ASID_START 1
 
typedef __u8 asid_t;
 
extern asid_t asid_get(void);
extern void asid_put(asid_t asid);
extern bool asid_has_conflicts(asid_t asid);
 
#endif
/SPARTAN/trunk/arch/mips32/include/mm/tlb.h
41,14 → 41,17
#define PAGE_UNCACHED 2
#define PAGE_CACHEABLE_EXC_WRITE 5
 
struct entry_lo {
unsigned g : 1; /* global bit */
unsigned v : 1; /* valid bit */
unsigned d : 1; /* dirty/write-protect bit */
unsigned c : 3; /* cache coherency attribute */
unsigned pfn : 24; /* frame number */
unsigned zero: 2; /* zero */
} __attribute__ ((packed));
union entry_lo {
struct {
unsigned g : 1; /* global bit */
unsigned v : 1; /* valid bit */
unsigned d : 1; /* dirty/write-protect bit */
unsigned c : 3; /* cache coherency attribute */
unsigned pfn : 24; /* frame number */
unsigned zero: 2; /* zero */
} __attribute__ ((packed));
__u32 value;
};
 
struct pte {
unsigned g : 1; /* global bit */
60,24 → 63,38
unsigned a : 1; /* accessed */
} __attribute__ ((packed));
 
struct entry_hi {
unsigned asid : 8;
unsigned : 5;
unsigned vpn2 : 19;
} __attribute__ ((packed));
union entry_hi {
struct {
unsigned asid : 8;
unsigned : 5;
unsigned vpn2 : 19;
} __attribute__ ((packed));
__u32 value;
};
 
struct page_mask {
unsigned : 13;
unsigned mask : 12;
unsigned : 7;
} __attribute__ ((packed));
union page_mask {
struct {
unsigned : 13;
unsigned mask : 12;
unsigned : 7;
} __attribute__ ((packed));
__u32 value;
};
 
struct index {
unsigned index : 4;
unsigned : 27;
unsigned p : 1;
} __attribute__ ((packed));
union index {
struct {
unsigned index : 4;
unsigned : 27;
unsigned p : 1;
} __attribute__ ((packed));
__u32 value;
};
 
typedef union entry_lo entry_lo_t;
typedef union entry_hi entry_hi_t;
typedef union page_mask page_mask_t;
typedef union index tlb_index_t;
 
/** Probe TLB for Matching Entry
*
* Probe TLB for Matching Entry.
/SPARTAN/trunk/arch/mips32/src/mm/asid.c
1,5 → 1,6
/*
* Copyright (C) 2005 Martin Decky
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
30,9 → 31,8
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
#include <typedefs.h>
 
#define ASIDS 256
 
static spinlock_t asid_usage_lock;
static count_t asid_usage[ASIDS]; /**< Usage tracking array for ASIDs */
 
53,7 → 53,7
pri = cpu_priority_high();
spinlock_lock(&asid_usage_lock);
for (i=0, j = 0; (i<ASIDS); i++) {
for (i = ASID_START, j = ASID_START; i < ASIDS; i++) {
if (asid_usage[i] < min) {
j = i;
min = asid_usage[i];
62,7 → 62,7
}
}
 
asid_usage[i]++;
asid_usage[j]++;
 
spinlock_unlock(&asid_usage_lock);
cpu_priority_restore(pri);
83,6 → 83,8
pri = cpu_priority_high();
spinlock_lock(&asid_usage_lock);
 
ASSERT(asid != ASID_INVALID);
ASSERT(asid_usage[asid] > 0);
asid_usage[asid]--;
 
89,3 → 91,30
spinlock_unlock(&asid_usage_lock);
cpu_priority_restore(pri);
}
 
/** Find out whether ASID is used by more address spaces
*
* Find out whether ASID is used by more address spaces.
*
* @param asid ASID in question.
*
* @return True if 'asid' is used by more address spaces, false otherwise.
*/
bool asid_has_conflicts(asid_t asid)
{
bool has_conflicts = false;
pri_t pri;
 
ASSERT(asid != ASID_INVALID);
 
pri = cpu_priority_high();
spinlock_lock(&asid_usage_lock);
 
if (asid_usage[asid] > 1)
has_conflicts = true;
 
spinlock_unlock(&asid_usage_lock);
cpu_priority_restore(pri);
 
return has_conflicts;
}
/SPARTAN/trunk/arch/mips32/src/mm/tlb.c
37,6 → 37,7
#include <symtab.h>
#include <synch/spinlock.h>
#include <print.h>
#include <debug.h>
 
static void tlb_refill_fail(struct exception_regdump *pstate);
static void tlb_invalid_fail(struct exception_regdump *pstate);
43,7 → 44,7
static void tlb_modified_fail(struct exception_regdump *pstate);
 
static pte_t *find_mapping_and_check(__address badvaddr);
static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn);
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, int c, __address pfn);
 
/** Initialize TLB
*
82,7 → 83,7
*/
void tlb_refill(struct exception_regdump *pstate)
{
struct entry_lo lo;
entry_lo_t lo;
__address badvaddr;
pte_t *pte;
104,12 → 105,12
* New entry is to be inserted into TLB
*/
if ((badvaddr/PAGE_SIZE) % 2 == 0) {
cp0_entry_lo0_write(*((__u32 *) &lo));
cp0_entry_lo0_write(lo.value);
cp0_entry_lo1_write(0);
}
else {
cp0_entry_lo0_write(0);
cp0_entry_lo1_write(*((__u32 *) &lo));
cp0_entry_lo1_write(lo.value);
}
tlbwr();
 
129,9 → 130,9
*/
void tlb_invalid(struct exception_regdump *pstate)
{
struct index index;
tlb_index_t index;
__address badvaddr;
struct entry_lo lo;
entry_lo_t lo;
pte_t *pte;
 
badvaddr = cp0_badvaddr_read();
140,7 → 141,7
* Locate the faulting entry in TLB.
*/
tlbp();
*((__u32 *) &index) = cp0_index_read();
index.value = cp0_index_read();
spinlock_lock(&VM->lock);
147,8 → 148,10
/*
* Fail if the entry is not in TLB.
*/
if (index.p)
if (index.p) {
printf("TLB entry not found.\n");
goto fail;
}
 
pte = find_mapping_and_check(badvaddr);
if (!pte)
170,9 → 173,9
* The entry is to be updated in TLB.
*/
if ((badvaddr/PAGE_SIZE) % 2 == 0)
cp0_entry_lo0_write(*((__u32 *) &lo));
cp0_entry_lo0_write(lo.value);
else
cp0_entry_lo1_write(*((__u32 *) &lo));
cp0_entry_lo1_write(lo.value);
tlbwi();
 
spinlock_unlock(&VM->lock);
189,12 → 192,11
*
* @param pstate Interrupted register context.
*/
 
void tlb_modified(struct exception_regdump *pstate)
{
struct index index;
tlb_index_t index;
__address badvaddr;
struct entry_lo lo;
entry_lo_t lo;
pte_t *pte;
 
badvaddr = cp0_badvaddr_read();
203,7 → 205,7
* Locate the faulting entry in TLB.
*/
tlbp();
*((__u32 *) &index) = cp0_index_read();
index.value = cp0_index_read();
spinlock_lock(&VM->lock);
210,8 → 212,10
/*
* Fail if the entry is not in TLB.
*/
if (index.p)
if (index.p) {
printf("TLB entry not found.\n");
goto fail;
}
 
pte = find_mapping_and_check(badvaddr);
if (!pte)
240,9 → 244,9
* The entry is to be updated in TLB.
*/
if ((badvaddr/PAGE_SIZE) % 2 == 0)
cp0_entry_lo0_write(*((__u32 *) &lo));
cp0_entry_lo0_write(lo.value);
else
cp0_entry_lo1_write(*((__u32 *) &lo));
cp0_entry_lo1_write(lo.value);
tlbwi();
 
spinlock_unlock(&VM->lock);
288,14 → 292,35
panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
}
 
 
void tlb_invalidate(int asid)
/** Invalidate TLB entries with specified ASID
*
* Invalidate TLB entries with specified ASID.
*
* @param asid ASID.
*/
void tlb_invalidate(asid_t asid)
{
entry_hi_t hi;
pri_t pri;
int i;
ASSERT(asid != ASID_INVALID);
 
pri = cpu_priority_high();
// TODO
for (i = 0; i < TLB_SIZE; i++) {
cp0_index_write(i);
tlbr();
hi.value = cp0_entry_hi_read();
if (hi.asid == asid) {
cp0_pagemask_write(TLB_PAGE_MASK_16K);
cp0_entry_hi_write(0);
cp0_entry_lo0_write(0);
cp0_entry_lo1_write(0);
tlbwi();
}
}
cpu_priority_restore(pri);
}
311,34 → 336,40
*/
pte_t *find_mapping_and_check(__address badvaddr)
{
struct entry_hi hi;
entry_hi_t hi;
pte_t *pte;
 
*((__u32 *) &hi) = cp0_entry_hi_read();
hi.value = cp0_entry_hi_read();
 
/*
* Handler cannot succeed if the ASIDs don't match.
*/
if (hi.asid != VM->asid)
if (hi.asid != VM->asid) {
printf("EntryHi.asid=%d, VM->asid=%d\n", hi.asid, VM->asid);
return NULL;
}
/*
* Handler cannot succeed if badvaddr has no mapping.
*/
pte = find_mapping(badvaddr, 0);
if (!pte)
if (!pte) {
printf("No such mapping.\n");
return NULL;
}
 
/*
* Handler cannot succeed if the mapping is marked as invalid.
*/
if (!pte->v)
if (!pte->v) {
printf("Invalid mapping.\n");
return NULL;
}
 
return pte;
}
 
void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn)
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, int c, __address pfn)
{
lo->g = g;
lo->v = v;
/SPARTAN/trunk/arch/mips32/src/mm/vm.c
31,7 → 31,6
#include <mm/vm.h>
#include <arch/cp0.h>
#include <arch.h>
#include <print.h>
 
/** Install ASID of the current VM
*
41,10 → 40,10
*/
void vm_install_arch(vm_t *vm)
{
struct entry_hi hi;
entry_hi_t hi;
pri_t pri;
*((__u32 *) &hi) = cp0_entry_hi_read();
hi.value = cp0_entry_hi_read();
 
pri = cpu_priority_high();
spinlock_lock(&vm->lock);
/SPARTAN/trunk/arch/ia32/src/mm/tlb.c
27,9 → 27,16
*/
 
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <arch/asm.h>
 
void tlb_invalidate(int asid)
/** Invalidate all TLB entries
*
* Invalidate all TLB entries.
*
* @param asid This argument is ignored.
*/
void tlb_invalidate(asid_t asid)
{
write_cr3(read_cr3());
}