/kernel/trunk/arch/ppc32/include/interrupt.h |
---|
58,4 → 58,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/exception.h |
---|
102,4 → 102,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/types.h |
---|
67,4 → 67,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/boot/boot.h |
---|
82,8 → 82,8 |
} keyboard_t; |
typedef struct { |
memmap_t memmap; |
taskmap_t taskmap; |
memmap_t memmap; |
screen_t screen; |
keyboard_t keyboard; |
} bootinfo_t; |
96,4 → 96,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/arch.h |
---|
41,4 → 41,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/mm/frame.h |
---|
54,4 → 54,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/mm/memory_init.h |
---|
45,4 → 45,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/mm/page.h |
---|
132,24 → 132,6 |
extern void page_arch_init(void); |
typedef struct { |
unsigned v : 1; /**< Valid */ |
unsigned vsid : 24; /**< Virtual Segment ID */ |
unsigned h : 1; /**< Primary/secondary hash */ |
unsigned api : 6; /**< Abbreviated Page Index */ |
unsigned rpn : 20; /**< Real Page Number */ |
unsigned reserved0 : 3; |
unsigned r : 1; /**< Reference */ |
unsigned c : 1; /**< Change */ |
unsigned wimg : 4; /**< Access control */ |
unsigned reserved1 : 1; |
unsigned pp : 2; /**< Page protection */ |
} phte_t; |
extern void pht_refill(int n, istate_t *istate); |
extern bool pht_real_refill(int n, istate_t *istate) __attribute__ ((section("K_UNMAPPED_TEXT_START"))); |
extern void pht_init(void); |
#endif /* __ASM__ */ |
#endif /* KERNEL */ |
/kernel/trunk/arch/ppc32/include/mm/asid.h |
---|
35,15 → 35,13 |
#ifndef __ppc32_ASID_H__ |
#define __ppc32_ASID_H__ |
typedef int asid_t; |
#include <arch/types.h> |
#define ASID_MAX_ARCH 3 |
#define ASID_MAX_ARCH 4096 |
#define asid_get() (ASID_START+1) |
#define asid_put(asid) |
typedef __u32 asid_t; |
#endif |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/mm/tlb.h |
---|
35,9 → 35,25 |
#ifndef __ppc32_TLB_H__ |
#define __ppc32_TLB_H__ |
typedef struct { |
unsigned v : 1; /**< Valid */ |
unsigned vsid : 24; /**< Virtual Segment ID */ |
unsigned h : 1; /**< Primary/secondary hash */ |
unsigned api : 6; /**< Abbreviated Page Index */ |
unsigned rpn : 20; /**< Real Page Number */ |
unsigned reserved0 : 3; |
unsigned r : 1; /**< Reference */ |
unsigned c : 1; /**< Change */ |
unsigned wimg : 4; /**< Access control */ |
unsigned reserved1 : 1; |
unsigned pp : 2; /**< Page protection */ |
} phte_t; |
extern void pht_refill(int n, istate_t *istate); |
extern bool pht_real_refill(int n, istate_t *istate) __attribute__ ((section("K_UNMAPPED_TEXT_START"))); |
extern void pht_init(void); |
#endif |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/include/mm/as.h |
---|
44,8 → 44,6 |
#define USTACK_ADDRESS_ARCH (0x7fffffff-(PAGE_SIZE-1)) |
#define as_install_arch(as) |
extern void as_arch_init(void); |
#endif |
52,4 → 50,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/Makefile.inc |
---|
55,6 → 55,12 |
CONFIG_PAGE_PT = y |
DEFS += -DCONFIG_PAGE_PT |
## Compile with support for address space identifiers. |
# |
CONFIG_ASID = y |
CONFIG_ASID_FIFO = y |
## Compile with support for software integer division. |
# |
/kernel/trunk/arch/ppc32/src/exception.S |
---|
137,13 → 137,13 |
exc_data_storage: |
CONTEXT_STORE |
# li r3, 2 |
# mr r4, sp |
# addi r4, r4, 8 |
# bl pht_real_refill |
li r3, 2 |
mr r4, sp |
addi r4, r4, 8 |
bl pht_real_refill |
# cmpwi r3, 0 |
# bne iret_real |
cmpwi r3, 0 |
bne iret_real |
li r3, 2 |
b jump_to_kernel |
153,13 → 153,13 |
exc_instruction_storage: |
CONTEXT_STORE |
# li r3, 3 |
# mr r4, sp |
# addi r4, r4, 8 |
# bl pht_real_refill |
li r3, 3 |
mr r4, sp |
addi r4, r4, 8 |
bl pht_real_refill |
# cmpwi r3, 0 |
# bne iret_real |
cmpwi r3, 0 |
bne iret_real |
li r3, 3 |
b jump_to_kernel |
/kernel/trunk/arch/ppc32/src/proc/scheduler.c |
---|
46,7 → 46,6 |
/** Perform ppc32 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
pht_init(); |
tlb_invalidate_all(); |
asm volatile ( |
"mtsprg0 %0\n" |
61,4 → 60,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/src/mm/tlb.c |
---|
33,13 → 33,319 |
*/ |
#include <mm/tlb.h> |
#include <arch/mm/tlb.h> |
#include <arch/interrupt.h> |
#include <mm/as.h> |
#include <arch.h> |
#include <print.h> |
#include <symtab.h> |
/** Initialize Page Hash Table. |
/** Try to find PTE for faulting address |
* |
* Setup the Page Hash Table with no entries. |
* Try to find PTE for faulting address. |
* The as->lock must be held on entry to this function |
* if lock is true. |
* |
* @param as Address space. |
* @param lock Lock/unlock the address space. |
* @param badvaddr Faulting virtual address. |
* @param access Access mode that caused the fault. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, istate_t *istate, int *pfrc) |
{ |
/* |
* Check if the mapping exists in page tables. |
*/ |
pte_t *pte = page_mapping_find(as, badvaddr); |
if ((pte) && (pte->p)) { |
/* |
* Mapping found in page tables. |
* Immediately succeed. |
*/ |
return pte; |
} else { |
int rc; |
/* |
* Mapping not found in page tables. |
* Resort to higher-level page fault handler. |
*/ |
page_table_unlock(as, lock); |
switch (rc = as_page_fault(badvaddr, access, istate)) { |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
*/ |
page_table_lock(as, lock); |
pte = page_mapping_find(as, badvaddr); |
ASSERT((pte) && (pte->p)); |
return pte; |
case AS_PF_DEFER: |
page_table_lock(as, lock); |
*pfrc = rc; |
return NULL; |
case AS_PF_FAULT: |
page_table_lock(as, lock); |
printf("Page fault.\n"); |
*pfrc = rc; |
return NULL; |
default: |
panic("unexpected rc (%d)\n", rc); |
} |
} |
} |
static void pht_refill_fail(__address badvaddr, istate_t *istate) |
{ |
char *symbol = ""; |
char *sym2 = ""; |
char *s = get_symtab_entry(istate->pc); |
if (s) |
symbol = s; |
s = get_symtab_entry(istate->lr); |
if (s) |
sym2 = s; |
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2); |
} |
static void pht_insert(const __address vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
: "r" (vaddr) |
); |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
bool found = false; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) { |
found = true; |
break; |
} |
} |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
break; |
} |
} |
if (!found) { |
// TODO: A/C precedence groups |
i = page % 8; |
} |
} |
phte[base + i].v = 1; |
phte[base + i].vsid = vsid; |
phte[base + i].h = h; |
phte[base + i].api = api; |
phte[base + i].rpn = pfn; |
phte[base + i].r = 0; |
phte[base + i].c = 0; |
phte[base + i].pp = 2; // FIXME |
} |
static void pht_real_insert(const __address vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
: "r" (vaddr) |
); |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
phte_t *phte_physical = (phte_t *) (sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
bool found = false; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) { |
found = true; |
break; |
} |
} |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
break; |
} |
} |
if (!found) { |
// TODO: A/C precedence groups |
i = page % 8; |
} |
} |
phte_physical[base + i].v = 1; |
phte_physical[base + i].vsid = vsid; |
phte_physical[base + i].h = h; |
phte_physical[base + i].api = api; |
phte_physical[base + i].rpn = pfn; |
phte_physical[base + i].r = 0; |
phte_physical[base + i].c = 0; |
phte_physical[base + i].pp = 2; // FIXME |
} |
/** Process Instruction/Data Storage Interrupt |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
void pht_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
pte_t *pte; |
int pfrc; |
as_t *as; |
bool lock; |
if (AS == NULL) { |
as = AS_KERNEL; |
lock = false; |
} else { |
as = AS; |
lock = true; |
} |
if (n == VECTOR_DATA_STORAGE) { |
asm volatile ( |
"mfdar %0\n" |
: "=r" (badvaddr) |
); |
} else |
badvaddr = istate->pc; |
page_table_lock(as, lock); |
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(as, lock); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfrc); |
} |
} |
pte->a = 1; /* Record access to PTE */ |
pht_insert(badvaddr, pte->pfn); |
page_table_unlock(as, lock); |
return; |
fail: |
page_table_unlock(as, lock); |
pht_refill_fail(badvaddr, istate); |
} |
/** Process Instruction/Data Storage Interrupt in Real Mode |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
bool pht_real_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
if (n == VECTOR_DATA_STORAGE) { |
asm volatile ( |
"mfdar %0\n" |
: "=r" (badvaddr) |
); |
} else |
badvaddr = istate->pc; |
__u32 physmem; |
asm volatile ( |
"mfsprg3 %0\n" |
: "=r" (physmem) |
); |
if ((badvaddr >= PA2KA(0)) && (badvaddr < PA2KA(physmem))) { |
pht_real_insert(badvaddr, KA2PA(badvaddr) >> 12); |
return true; |
} |
return false; |
} |
void tlb_arch_init(void) |
{ |
tlb_invalidate_all(); |
55,33 → 361,24 |
} |
/** Invalidate all entries in TLB that belong to specified address space. |
* |
* @param asid This parameter is ignored as the architecture doesn't support it. |
*/ |
void tlb_invalidate_asid(asid_t asid) |
{ |
// TODO |
tlb_invalidate_all(); |
} |
/** Invalidate TLB entries for specified page range belonging to specified address space. |
* |
* @param asid This parameter is ignored as the architecture doesn't support it. |
* @param page Address of the first page whose entry is to be invalidated. |
* @param cnt Number of entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
{ |
// TODO |
tlb_invalidate_all(); |
} |
/** Print contents of Page Hash Table. */ |
void tlb_print(void) |
{ |
// TODO |
} |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/src/mm/as.c |
---|
34,12 → 34,55 |
#include <arch/mm/as.h> |
#include <genarch/mm/as_pt.h> |
#include <genarch/mm/asid_fifo.h> |
#include <arch.h> |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
{ |
as_operations = &as_pt_operations; |
asid_fifo_init(); |
} |
/** Install address space. |
* |
* Install ASID. |
* |
* @param as Address space structure. |
* |
*/ |
void as_install_arch(as_t *as) |
{ |
asid_t asid; |
ipl_t ipl; |
__u8 sr; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
asid = as->asid; |
/* Lower 2 GB, user and supervisor access */ |
for (sr = 0; sr < 8; sr++) { |
asm volatile ( |
"mtsrin %0, %1\n" |
: |
: "r" (0x6000 + (asid << 4) + sr), "r" (sr * 0x1000) |
); |
} |
/* Upper 2 GB, only supervisor access */ |
for (sr = 8; sr < 16; sr++) { |
asm volatile ( |
"mtsrin %0, %1\n" |
: |
: "r" (0x4000 + (asid << 4) + sr), "r" (sr * 0x1000) |
); |
} |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
} |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/src/mm/frame.c |
---|
66,6 → 66,13 |
third and fourth is reserved, other contain real mode code */ |
frame_mark_unavailable(0, 8); |
/* Mark the Page Hash Table frames as unavailable */ |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
frame_mark_unavailable(ADDR2PFN(sdr1 & 0xffff000), 16); // FIXME |
} |
/** @} |
/kernel/trunk/arch/ppc32/src/mm/memory_init.c |
---|
54,4 → 54,3 |
/** @} |
*/ |
/kernel/trunk/arch/ppc32/src/mm/page.c |
---|
32,343 → 32,12 |
/** @file |
*/ |
#include <arch/mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <arch/mm/frame.h> |
#include <arch/asm.h> |
#include <arch/interrupt.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/as.h> |
#include <arch.h> |
#include <arch/types.h> |
#include <arch/exception.h> |
#include <align.h> |
#include <config.h> |
#include <print.h> |
#include <symtab.h> |
/** Try to find PTE for faulting address |
* |
* Try to find PTE for faulting address. |
* The as->lock must be held on entry to this function |
* if lock is true. |
* |
* @param as Address space. |
* @param lock Lock/unlock the address space. |
* @param badvaddr Faulting virtual address. |
* @param access Access mode that caused the fault. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, istate_t *istate, int *pfrc) |
{ |
/* |
* Check if the mapping exists in page tables. |
*/ |
pte_t *pte = page_mapping_find(as, badvaddr); |
if ((pte) && (pte->p)) { |
/* |
* Mapping found in page tables. |
* Immediately succeed. |
*/ |
return pte; |
} else { |
int rc; |
/* |
* Mapping not found in page tables. |
* Resort to higher-level page fault handler. |
*/ |
page_table_unlock(as, lock); |
switch (rc = as_page_fault(badvaddr, access, istate)) { |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
*/ |
page_table_lock(as, lock); |
pte = page_mapping_find(as, badvaddr); |
ASSERT((pte) && (pte->p)); |
return pte; |
case AS_PF_DEFER: |
page_table_lock(as, lock); |
*pfrc = rc; |
return NULL; |
case AS_PF_FAULT: |
page_table_lock(as, lock); |
printf("Page fault.\n"); |
*pfrc = rc; |
return NULL; |
default: |
panic("unexpected rc (%d)\n", rc); |
} |
} |
} |
static void pht_refill_fail(__address badvaddr, istate_t *istate) |
{ |
char *symbol = ""; |
char *sym2 = ""; |
char *s = get_symtab_entry(istate->pc); |
if (s) |
symbol = s; |
s = get_symtab_entry(istate->lr); |
if (s) |
sym2 = s; |
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2); |
} |
static void pht_insert(const __address vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
: "r" (vaddr) |
); |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
bool found = false; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) { |
found = true; |
break; |
} |
} |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
break; |
} |
} |
if (!found) { |
// TODO: A/C precedence groups |
i = page % 8; |
} |
} |
phte[base + i].v = 1; |
phte[base + i].vsid = vsid; |
phte[base + i].h = h; |
phte[base + i].api = api; |
phte[base + i].rpn = pfn; |
phte[base + i].r = 0; |
phte[base + i].c = 0; |
phte[base + i].pp = 2; // FIXME |
} |
static void pht_real_insert(const __address vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
: "r" (vaddr) |
); |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
phte_t *phte_physical = (phte_t *) (sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
bool found = false; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) { |
found = true; |
break; |
} |
} |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
break; |
} |
} |
if (!found) { |
// TODO: A/C precedence groups |
i = page % 8; |
} |
} |
phte_physical[base + i].v = 1; |
phte_physical[base + i].vsid = vsid; |
phte_physical[base + i].h = h; |
phte_physical[base + i].api = api; |
phte_physical[base + i].rpn = pfn; |
phte_physical[base + i].r = 0; |
phte_physical[base + i].c = 0; |
phte_physical[base + i].pp = 2; // FIXME |
} |
/** Process Instruction/Data Storage Interrupt |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
void pht_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
pte_t *pte; |
int pfrc; |
as_t *as; |
bool lock; |
if (AS == NULL) { |
as = AS_KERNEL; |
lock = false; |
} else { |
as = AS; |
lock = true; |
} |
if (n == VECTOR_DATA_STORAGE) { |
asm volatile ( |
"mfdar %0\n" |
: "=r" (badvaddr) |
); |
} else |
badvaddr = istate->pc; |
page_table_lock(as, lock); |
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(as, lock); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfrc); |
} |
} |
pte->a = 1; /* Record access to PTE */ |
pht_insert(badvaddr, pte->pfn); |
page_table_unlock(as, lock); |
return; |
fail: |
page_table_unlock(as, lock); |
pht_refill_fail(badvaddr, istate); |
} |
/** Process Instruction/Data Storage Interrupt in Real Mode |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
bool pht_real_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
if (n == VECTOR_DATA_STORAGE) { |
asm volatile ( |
"mfdar %0\n" |
: "=r" (badvaddr) |
); |
} else |
badvaddr = istate->pc; |
__u32 physmem; |
asm volatile ( |
"mfsprg3 %0\n" |
: "=r" (physmem) |
); |
if ((badvaddr >= PA2KA(0)) && (badvaddr <= PA2KA(physmem))) { |
pht_real_insert(badvaddr, KA2PA(badvaddr) >> 12); |
return true; |
} |
return false; |
} |
void pht_init(void) |
{ |
// FIXME |
__u32 sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
); |
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); |
memsetb((__address) phte, 65536, 0); |
} |
void page_arch_init(void) |
{ |
if (config.cpu_active == 1) |
/kernel/trunk/arch/ppc32/src/interrupt.c |
---|
39,6 → 39,7 |
#include <time/clock.h> |
#include <ipc/sysipc.h> |
#include <arch/drivers/pic.h> |
#include <arch/mm/tlb.h> |
void start_decrementer(void) |
94,4 → 95,3 |
/** @} |
*/ |