Subversion Repositories HelenOS-historic

Compare Revisions

Regard whitespace Rev 1725 → Rev 1726

/kernel/trunk/arch/ppc32/include/mm/page.h
132,9 → 132,6
 
extern void page_arch_init(void);
 
#define PHT_BITS 16
#define PHT_ORDER 4
 
typedef struct {
unsigned v : 1; /**< Valid */
unsigned vsid : 24; /**< Virtual Segment ID */
150,6 → 147,7
} phte_t;
 
extern void pht_refill(int n, istate_t *istate);
extern bool pht_real_refill(int n, istate_t *istate) __attribute__ ((section("K_UNMAPPED_TEXT_START")));
extern void pht_init(void);
 
#endif /* __ASM__ */
160,4 → 158,3
 
/** @}
*/
 
/kernel/trunk/arch/ppc32/src/exception.S
121,10 → 121,6
exc_system_reset:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 0
b jump_to_kernel
 
133,10 → 129,6
exc_machine_check:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 1
b jump_to_kernel
 
145,10 → 137,14
exc_data_storage:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
# li r3, 2
# mr r4, sp
# addi r4, r4, 8
# bl pht_real_refill
# cmpwi r3, 0
# bne iret_real
li r3, 2
b jump_to_kernel
 
157,10 → 153,14
exc_instruction_storage:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
# li r3, 3
# mr r4, sp
# addi r4, r4, 8
# bl pht_real_refill
# cmpwi r3, 0
# bne iret_real
 
li r3, 3
b jump_to_kernel
 
169,10 → 169,6
exc_external:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 4
b jump_to_kernel
 
181,10 → 177,6
exc_alignment:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 5
b jump_to_kernel
 
193,10 → 185,6
exc_program:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 6
b jump_to_kernel
 
205,10 → 193,6
exc_fp_unavailable:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 7
b jump_to_kernel
 
217,10 → 201,6
exc_decrementer:
CONTEXT_STORE
 
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 8
b jump_to_kernel
 
229,10 → 209,6
exc_reserved0:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 9
b jump_to_kernel
 
241,10 → 217,6
exc_reserved1:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 10
b jump_to_kernel
 
260,10 → 232,6
exc_trace:
CONTEXT_STORE
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
li r3, 12
b jump_to_kernel
 
273,6 → 241,10
addi r12, r12, iret@l
mtlr r12
 
lis r12, exc_dispatch@ha
addi r12, r12, exc_dispatch@l
mtsrr0 r12
mfmsr r12
ori r12, r12, (msr_ir | msr_dr)@l
mtsrr1 r12
298,3 → 270,59
addis sp, sp, 0x8000
rfi
 
iret_real:
lwz r0, 8(sp)
lwz r2, 12(sp)
lwz r3, 16(sp)
lwz r4, 20(sp)
lwz r5, 24(sp)
lwz r6, 28(sp)
lwz r7, 32(sp)
lwz r8, 36(sp)
lwz r9, 40(sp)
lwz r10, 44(sp)
lwz r11, 48(sp)
lwz r13, 52(sp)
lwz r14, 56(sp)
lwz r15, 60(sp)
lwz r16, 64(sp)
lwz r17, 68(sp)
lwz r18, 72(sp)
lwz r19, 76(sp)
lwz r20, 80(sp)
lwz r21, 84(sp)
lwz r22, 88(sp)
lwz r23, 92(sp)
lwz r24, 96(sp)
lwz r25, 100(sp)
lwz r26, 104(sp)
lwz r27, 108(sp)
lwz r28, 112(sp)
lwz r29, 116(sp)
lwz r30, 120(sp)
lwz r31, 124(sp)
lwz r12, 128(sp)
mtcr r12
lwz r12, 132(sp)
mtsrr0 r12
lwz r12, 136(sp)
mtsrr1 r12
lwz r12, 140(sp)
mtlr r12
lwz r12, 144(sp)
mtctr r12
lwz r12, 148(sp)
mtxer r12
lwz r12, 152(sp)
lwz sp, 156(sp)
rfi
/kernel/trunk/arch/ppc32/src/mm/as.c
43,4 → 43,3
 
/** @}
*/
 
/kernel/trunk/arch/ppc32/src/mm/frame.c
70,4 → 70,3
 
/** @}
*/
 
/kernel/trunk/arch/ppc32/src/mm/page.c
48,9 → 48,7
#include <print.h>
#include <symtab.h>
 
static phte_t *phte;
 
 
/** Try to find PTE for faulting address
*
* Try to find PTE for faulting address.
66,8 → 64,7
* @return PTE on success, NULL otherwise.
*
*/
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
istate_t *istate, int *pfrc)
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, istate_t *istate, int *pfrc)
{
/*
* Check if the mapping exists in page tables.
132,8 → 129,8
{
__u32 page = (vaddr >> 12) & 0xffff;
__u32 api = (vaddr >> 22) & 0x3f;
__u32 vsid;
asm volatile (
"mfsrin %0, %1\n"
: "=r" (vsid)
140,6 → 137,13
: "r" (vaddr)
);
__u32 sdr1;
asm volatile (
"mfsdr1 %0\n"
: "=r" (sdr1)
);
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
/* Primary hash (xor) */
__u32 h = 0;
__u32 hash = vsid ^ page;
188,6 → 192,73
}
 
 
static void pht_real_insert(const __address vaddr, const pfn_t pfn)
{
__u32 page = (vaddr >> 12) & 0xffff;
__u32 api = (vaddr >> 22) & 0x3f;
__u32 vsid;
asm volatile (
"mfsrin %0, %1\n"
: "=r" (vsid)
: "r" (vaddr)
);
__u32 sdr1;
asm volatile (
"mfsdr1 %0\n"
: "=r" (sdr1)
);
phte_t *phte_physical = (phte_t *) (sdr1 & 0xffff0000);
/* Primary hash (xor) */
__u32 h = 0;
__u32 hash = vsid ^ page;
__u32 base = (hash & 0x3ff) << 3;
__u32 i;
bool found = false;
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) {
found = true;
break;
}
}
if (!found) {
/* Secondary hash (not) */
__u32 base2 = (~hash & 0x3ff) << 3;
/* Find unused or colliding
PTE in PTEG */
for (i = 0; i < 8; i++) {
if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) {
found = true;
base = base2;
h = 1;
break;
}
}
if (!found) {
// TODO: A/C precedence groups
i = page % 8;
}
}
phte_physical[base + i].v = 1;
phte_physical[base + i].vsid = vsid;
phte_physical[base + i].h = h;
phte_physical[base + i].api = api;
phte_physical[base + i].rpn = pfn;
phte_physical[base + i].r = 0;
phte_physical[base + i].c = 0;
phte_physical[base + i].pp = 2; // FIXME
}
 
 
/** Process Instruction/Data Storage Interrupt
*
* @param n Interrupt vector number.
250,42 → 321,58
}
 
 
void pht_init(void)
/** Process Instruction/Data Storage Interrupt in Real Mode
*
* @param n Interrupt vector number.
* @param istate Interrupted register context.
*
*/
bool pht_real_refill(int n, istate_t *istate)
{
memsetb((__address) phte, 1 << PHT_BITS, 0);
}
__address badvaddr;
 
if (n == VECTOR_DATA_STORAGE) {
asm volatile (
"mfdar %0\n"
: "=r" (badvaddr)
);
} else
badvaddr = istate->pc;
 
void page_arch_init(void)
{
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
__u32 physmem;
asm volatile (
"mfsprg3 %0\n"
: "=r" (physmem)
);
__address cur;
int flags;
if ((badvaddr >= PA2KA(0)) && (badvaddr <= PA2KA(physmem))) {
pht_real_insert(badvaddr, KA2PA(badvaddr) >> 12);
return true;
}
/* Frames below 128 MB are mapped using BAT,
map rest of the physical memory */
for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
return false;
}
/* Allocate page hash table */
phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
phte = (phte_t *) PA2KA((__address) physical_phte);
ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
pht_init();
void pht_init(void)
{
// FIXME
__u32 sdr1;
asm volatile (
"mtsdr1 %0\n"
:
: "r" ((__address) physical_phte)
"mfsdr1 %0\n"
: "=r" (sdr1)
);
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000);
memsetb((__address) phte, 65536, 0);
}
 
 
void page_arch_init(void)
{
if (config.cpu_active == 1)
page_mapping_operations = &pt_mapping_operations;
}
 
 
306,4 → 393,3
 
/** @}
*/