Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 2131 → Rev 2422

/branches/fs/kernel/arch/sparc64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/branches/fs/kernel/arch/sparc64/include/trap/mmu.h
129,7 → 129,21
wrpr %g0, 1, %tl
.endif
 
/*
* Switch from the MM globals.
*/
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
 
/*
* Read the Tag Access register for the higher-level handler.
* This is necessary to survive nested DTLB misses.
*/
mov VA_DMMU_TAG_ACCESS, %g2
ldxa [%g2] ASI_DMMU, %g2
 
/*
* g2 will be passed as an argument to fast_data_access_mmu_miss().
*/
PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
.endm
 
142,7 → 156,21
wrpr %g0, 1, %tl
.endif
 
/*
* Switch from the MM globals.
*/
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
 
/*
* Read the Tag Access register for the higher-level handler.
* This is necessary to survive nested DTLB misses.
*/
mov VA_DMMU_TAG_ACCESS, %g2
ldxa [%g2] ASI_DMMU, %g2
 
/*
* g2 will be passed as an argument to fast_data_access_mmu_miss().
*/
PREEMPTIBLE_HANDLER fast_data_access_protection
.endm
 
/branches/fs/kernel/arch/sparc64/include/mm/frame.h
35,7 → 35,20
#ifndef KERN_sparc64_FRAME_H_
#define KERN_sparc64_FRAME_H_
 
#define FRAME_WIDTH 13 /* 8K */
/*
* Page size supported by the MMU.
* For 8K there is the nasty illegal virtual aliasing problem.
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
*/
#define MMU_FRAME_WIDTH 13 /* 8K */
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH)
 
/*
* Page size exported to the generic memory management subsystems.
* This page size is not directly supported by the MMU, but we can emulate
* each 16K page with a pair of adjacent 8K pages.
*/
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
/branches/fs/kernel/arch/sparc64/include/mm/page.h
37,11 → 37,27
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/fs/kernel/arch/sparc64/include/mm/tlb.h
428,9 → 428,9
membar();
}
 
extern void fast_instruction_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_protection(int n, istate_t *istate);
extern void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate);
extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate);
extern void fast_data_access_protection(tlb_tag_access_reg_t tag , istate_t *istate);
 
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable);
 
/branches/fs/kernel/arch/sparc64/include/mm/as.h
81,10 → 81,11
#include <genarch/mm/as_ht.h>
 
#ifdef CONFIG_TSB
# include <arch/mm/tsb.h>
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt)
#include <arch/mm/tsb.h>
#define as_invalidate_translation_cache(as, page, cnt) \
tsb_invalidate((as), (page), (cnt))
#else
# define as_invalidate_translation_cache(as, page, cnt)
#define as_invalidate_translation_cache(as, page, cnt)
#endif
 
extern void as_arch_init(void);
/branches/fs/kernel/arch/sparc64/include/mm/tsb.h
112,8 → 112,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/branches/fs/kernel/arch/sparc64/include/barrier.h
39,12 → 39,12
* Our critical section barriers are prepared for the weakest RMO memory model.
*/
#define CS_ENTER_BARRIER() \
asm volatile ( \
asm volatile ( \
"membar #LoadLoad | #LoadStore\n" \
::: "memory" \
)
#define CS_LEAVE_BARRIER() \
asm volatile ( \
asm volatile ( \
"membar #StoreStore\n" \
"membar #LoadStore\n" \
::: "memory" \
/branches/fs/kernel/arch/sparc64/include/cpu.h
39,6 → 39,10
#include <arch/register.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
#define MANUF_FUJITSU 0x04
#define MANUF_ULTRASPARC 0x17 /**< UltraSPARC I, UltraSPARC II */
#define MANUF_SUN 0x3e
53,12 → 57,13
#define IMPL_SPARC64V 0x5
 
typedef struct {
uint32_t mid; /**< Processor ID as read from UPA_CONFIG. */
uint32_t mid; /**< Processor ID as read from
UPA_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in Hz. */
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
generated when the TICK register
matches this value. */
} cpu_arch_t;
#endif
/branches/fs/kernel/arch/sparc64/Makefile.inc
83,8 → 83,7
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/branches/fs/kernel/arch/sparc64/src/smp/smp.c
100,7 → 100,7
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %d) timed out\n",
__FUNCTION__, mid);
__FUNCTION__, mid);
}
}
 
/branches/fs/kernel/arch/sparc64/src/smp/ipi.c
74,13 → 74,13
panic("Interrupt Dispatch Status busy bit set\n");
do {
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0, (uintptr_t)
func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0,
(uintptr_t) func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
asi_u64_write(ASI_UDB_INTR_W, (mid <<
INTR_VEC_DISPATCH_MID_SHIFT) | ASI_UDB_INTR_W_DISPATCH,
0);
asi_u64_write(ASI_UDB_INTR_W,
(mid << INTR_VEC_DISPATCH_MID_SHIFT) |
ASI_UDB_INTR_W_DISPATCH, 0);
membar();
/branches/fs/kernel/arch/sparc64/src/asm.S
273,7 → 273,7
flushw
wrpr %g0, 0, %cleanwin ! avoid information leak
 
mov %i3, %o0 ! uarg
mov %i2, %o0 ! uarg
 
clr %i2
clr %i3
/branches/fs/kernel/arch/sparc64/src/proc/scheduler.c
62,9 → 62,8
* - preemptible trap handler switches to alternate globals
* before it explicitly uses %g7.
*/
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
- (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE,
STACK_ALIGNMENT));
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE -
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
write_to_ig_g6(sp);
write_to_ag_g6(sp);
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
/branches/fs/kernel/arch/sparc64/src/proc/thread.c
55,7 → 55,7
* belonging to a killed thread.
*/
frame_free(KA2PA(ALIGN_DOWN((uintptr_t)
t->arch.uspace_window_buffer, PAGE_SIZE)));
t->arch.uspace_window_buffer, PAGE_SIZE)));
}
}
 
75,8 → 75,8
* Mind the possible alignment of the userspace window buffer
* belonging to a killed thread.
*/
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
PAGE_SIZE);
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
PAGE_SIZE);
}
}
 
/branches/fs/kernel/arch/sparc64/src/sparc64.c
155,5 → 155,11
/* not reached */
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/sparc64/src/trap/interrupt.c
83,9 → 83,9
} else if (data0 > config.base) {
/*
* This is a cross-call.
* data0 contains address of kernel function.
* data0 contains address of the kernel function.
* We call the function only after we verify
* it is on of the supported ones.
* it is one of the supported ones.
*/
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
/branches/fs/kernel/arch/sparc64/src/cpu/cpu.c
63,10 → 63,10
mid = *((uint32_t *) prop->value);
if (mid == CPU->arch.mid) {
prop = ofw_tree_getprop(node,
"clock-frequency");
"clock-frequency");
if (prop && prop->value)
clock_frequency = *((uint32_t *)
prop->value);
prop->value);
}
}
node = ofw_tree_find_peer_by_device_type(node, "cpu");
/branches/fs/kernel/arch/sparc64/src/mm/cache_asm.S
File deleted
/branches/fs/kernel/arch/sparc64/src/mm/cache.c
File deleted
/branches/fs/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,14
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
92,8 → 92,8
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
126,11 → 126,12
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
137,15 → 138,15
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
158,15 → 159,16
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
 
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t)
void itlb_pte_copy(pte_t *t, index_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
173,8 → 175,8
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
196,9 → 198,10
}
 
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
209,9 → 212,9
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
itlb_pte_copy(t, index);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
itsb_pte_copy(t, index);
#endif
page_table_unlock(AS, true);
} else {
222,7 → 225,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
__FUNCTION__);
}
}
}
231,24 → 234,29
*
* Note that some faults (e.g. kernel faults) were already resolved by the
* low-level, assembly language part of the fast_data_access_mmu_miss handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_mmu_miss(int n, istate_t *istate)
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
"kernel page fault.");
}
 
page_table_lock(AS, true);
259,32 → 267,39
* Insert it into DTLB.
*/
t->a = true;
dtlb_pte_copy(t, true);
dtlb_pte_copy(t, index, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
dtsb_pte_copy(t, index, true);
#endif
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
 
/** DTLB protection fault handler. */
void fast_data_access_protection(int n, istate_t *istate)
/** DTLB protection fault handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
296,10 → 311,11
*/
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
dtsb_pte_copy(t, index, false);
#endif
page_table_unlock(AS, true);
} else {
310,7 → 326,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
328,10 → 344,10
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,16 → 356,16
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
356,29 → 372,32
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
va = tag.vpn << MMU_PAGE_WIDTH;
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
}
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
}
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,8 → 412,8
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
481,11 → 500,11
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/branches/fs/kernel/arch/sparc64/src/mm/as.c
42,7 → 42,6
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#include <arch/memstr.h>
#include <synch/mutex.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <bitops.h>
61,8 → 60,13
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
/*
* The order must be calculated with respect to the emulated
* 16K page size.
*/
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
 
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
70,9 → 74,10
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
sizeof(tsb_entry_t));
 
memsetb((uintptr_t) as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
}
80,8 → 85,12
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
/*
* The count must be calculated with respect to the emualted 16K page
* size.
*/
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
92,13 → 101,7
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
ipl_t ipl;
 
ipl = interrupts_disable();
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */
tsb_invalidate(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
#endif
return 0;
}
115,18 → 118,17
tlb_context_reg_t ctx;
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
/*
* Write ASID to secondary context register.
* The primary context register has to be set
* from TL>0 so it will be filled from the
* secondary context register from the TL=1
* code just before switch to userspace.
* Write ASID to secondary context register. The primary context
* register has to be set from TL>0 so it will be filled from the
* secondary context register from the TL=1 code just before switch to
* userspace.
*/
ctx.v = 0;
ctx.context = as->asid;
139,7 → 141,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
158,9 → 160,9
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
176,10 → 178,10
{
 
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
 
#ifdef CONFIG_TSB
189,7 → 191,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
/branches/fs/kernel/arch/sparc64/src/mm/cache.S
0,0 → 1,93
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/arch.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
.register %g3, #scratch
 
/** Flush the whole D-cache. */
.global dcache_flush
dcache_flush:
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1
stxa %g0, [%g1] ASI_DCACHE_TAG
0: membar #Sync
subcc %g1, DCACHE_LINE_SIZE, %g1
bnz,pt %xcc, 0b
stxa %g0, [%g1] ASI_DCACHE_TAG
membar #Sync
retl
! beware SF Erratum #51, do not put the MEMBAR here
nop
 
/** Flush only D-cache lines of one virtual color.
*
* @param o0 Virtual color to be flushed.
*/
.global dcache_flush_color
dcache_flush_color:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
 
/** Flush only D-cache lines of one virtual color and one tag.
*
* @param o0 Virtual color to lookup the tag.
* @param o1 Tag of the cachelines to be flushed.
*/
.global dcache_flush_tag
dcache_flush_tag:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: ldxa [%g2] ASI_DCACHE_TAG, %g3
srlx %g3, DCACHE_TAG_SHIFT, %g3
cmp %g3, %o1
bnz 1f
nop
stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
1: subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
/branches/fs/kernel/arch/sparc64/src/mm/tsb.c
34,6 → 34,7
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
40,7 → 41,7
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
 
/** Invalidate portion of TSB.
*
59,28 → 60,39
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
 
if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
cnt = ITSB_ENTRY_COUNT;
else
cnt = pages * 2;
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t)
void itsb_pte_copy(pte_t *t, index_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
 
ASSERT(index <= 1);
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < ITSB_ENTRY_COUNT);
tsb = &as->arch.itsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
95,10 → 107,11
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
110,16 → 123,22
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
ASSERT(index <= 1);
 
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < DTSB_ENTRY_COUNT);
tsb = &as->arch.dtsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
134,10 → 153,11
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
148,7 → 168,7
write_barrier();
tsb->tag.invalid = true; /* mark the entry as valid */
tsb->tag.invalid = false; /* mark the entry as valid */
}
 
/** @}
/branches/fs/kernel/arch/sparc64/src/mm/frame.c
65,10 → 65,10
if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
zone_create(ADDR2PFN(start),
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
confdata, 0);
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
confdata, 0);
last_frame = max(last_frame, start + ALIGN_UP(size,
FRAME_SIZE));
FRAME_SIZE));
}
 
/*
/branches/fs/kernel/arch/sparc64/src/mm/page.c
73,9 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
107,26 → 107,26
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= FRAME_SIZE)
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
134,8 → 134,10
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
142,8 → 144,8
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
150,11 → 152,11
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
/branches/fs/kernel/arch/ia64/include/types.h
70,6 → 70,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia64/src/ia64.c
176,5 → 176,11
#endif
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia64/src/mm/as.c
36,11 → 36,10
#include <arch/mm/asid.h>
#include <arch/mm/page.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/page_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <mm/asid.h>
#include <arch.h>
#include <arch/barrier.h>
#include <synch/spinlock.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
55,13 → 54,9
*/
void as_install_arch(as_t *as)
{
ipl_t ipl;
region_register rr;
int i;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->asid != ASID_INVALID);
/*
80,9 → 75,6
}
srlz_d();
srlz_i();
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/arm32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/arm32/src/arm32.c
82,5 → 82,11
/* TODO */
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ppc32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ppc32/src/mm/as.c
54,12 → 54,8
void as_install_arch(as_t *as)
{
asid_t asid;
ipl_t ipl;
uint32_t sr;
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
asid = as->asid;
/* Lower 2 GB, user and supervisor access */
79,9 → 75,6
: "r" ((0x4000 << 16) + (asid << 4) + sr), "r" (sr << 28)
);
}
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/ppc32/src/mm/page.c
53,7 → 53,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/ppc32/src/interrupt.c
60,11 → 60,19
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
75,7 → 83,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
if (!ack)
pic_ack_interrupt(inum);
}
}
 
/branches/fs/kernel/arch/ppc32/src/drivers/cuda.c
48,7 → 48,8
#define PACKET_ADB 0x00
#define PACKET_CUDA 0x01
 
#define CUDA_POWERDOWN 0x0a
#define CUDA_POWERDOWN 0x0a
#define CUDA_RESET 0x11
 
#define RS 0x200
#define B (0 * RS)
191,9 → 192,6
};
 
 
void send_packet(const uint8_t kind, index_t count, ...);
 
 
static void receive_packet(uint8_t *kind, index_t count, uint8_t data[])
{
cuda[B] = cuda[B] & ~TIP;
316,7 → 314,7
}
 
 
void send_packet(const uint8_t kind, index_t count, ...)
static void send_packet(const uint8_t kind, count_t count, ...)
{
index_t i;
va_list va;
341,13 → 339,17
 
 
void cpu_halt(void) {
#ifdef CONFIG_POWEROFF
send_packet(PACKET_CUDA, 1, CUDA_POWERDOWN);
#endif
asm volatile (
"b 0\n"
);
}
 
void arch_reboot(void) {
send_packet(PACKET_CUDA, 1, CUDA_RESET);
asm volatile (
"b 0\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32xen/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia32xen/src/ia32xen.c
211,5 → 211,11
{
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32xen/src/smp/smp.c
80,13 → 80,13
 
if (config.cpu_count > 1) {
page_mapping_insert(AS_KERNEL, l_apic_address, (uintptr_t) l_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, io_apic_address, (uintptr_t) io_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
l_apic = (uint32_t *) l_apic_address;
io_apic = (uint32_t *) io_apic_address;
}
}
}
 
/*
/branches/fs/kernel/arch/ia32xen/src/interrupt.c
176,14 → 176,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
194,7 → 201,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/amd64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/amd64/include/asm.h
55,10 → 55,17
return v;
}
 
static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); };
static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); };
static inline void cpu_sleep(void)
{
asm volatile ("hlt\n");
};
 
static inline void cpu_halt(void)
{
asm volatile ("hlt\n");
};
 
 
/** Byte from port
*
* Get byte from port
/branches/fs/kernel/arch/amd64/src/pm.c
33,6 → 33,7
/** @file
*/
 
#include <arch.h>
#include <arch/pm.h>
#include <arch/asm.h>
#include <mm/as.h>
227,5 → 228,24
tr_load(gdtselector(TSS_DES));
}
 
/* Reboot the machine by initiating
* a triple fault
*/
void arch_reboot(void)
{
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
idtr_load(&idtr);
interrupts_restore(ipl);
asm volatile (
"int $0x03\n"
"cli\n"
"hlt\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/amd64/src/boot/vga323.pal
0,0 → 1,0
link ../../../ia32/src/boot/vga323.pal
Property changes:
Added: svn:special
+*
\ No newline at end of property
/branches/fs/kernel/arch/amd64/src/boot/boot.S
72,8 → 72,26
movl %eax, grub_eax # save parameters from GRUB
movl %ebx, grub_ebx
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe long_mode_unsupported
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jc long_mode_supported
 
long_mode_unsupported:
movl $long_mode_msg, %esi
jmp error_halt
long_mode_supported:
#ifdef CONFIG_FB
mov $vesa_init, %esi;
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
mov $e_vesa_init - vesa_init, %ecx
cld
92,25 → 110,7
shr $16, %ebx
mov %bx, KA2PA(vesa_bpp)
#endif
 
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe long_mode_unsupported
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jc long_mode_supported
 
long_mode_unsupported:
cli
hlt
long_mode_supported:
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
303,11 → 303,12
#define VESA_INFO_SIZE 1024
 
#define VESA_MODE_ATTRIBUTES_OFFSET 0
#define VESA_MODE_LIST_PTR_OFFSET 14
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_WIDTH_OFFSET 18
#define VESA_MODE_HEIGHT_OFFSET 20
#define VESA_MODE_BPP_OFFSET 25
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_PHADDR_OFFSET 40
 
#define VESA_END_OF_MODES 0xffff
317,14 → 318,14
#define VESA_GET_INFO 0x4f00
#define VESA_GET_MODE_INFO 0x4f01
#define VESA_SET_MODE 0x4f02
#define VESA_SET_PALETTE 0x4f09
 
#define CONFIG_VESA_BPP_a 255
 
#if CONFIG_VESA_BPP == 24
#undef CONFIG_VESA_BPP_a
#define CONFIG_VESA_BPP_a 32
#define CONFIG_VESA_BPP_VARIANT 32
#endif
 
mov $VESA_GET_INFO, %ax
mov $e_vesa_init - vesa_init, %di
push %di
339,7 → 340,7
mov VESA_MODE_LIST_PTR_OFFSET(%di), %si
add $VESA_INFO_SIZE, %di
 
1:# Try next mode
mov %gs:(%si), %cx
cmp $VESA_END_OF_MODES, %cx
369,10 → 370,13
mov $CONFIG_VESA_BPP, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
 
#ifdef CONFIG_VESA_BPP_VARIANT
jz 2f
mov $CONFIG_VESA_BPP_a, %al
mov $CONFIG_VESA_BPP_VARIANT, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
#endif
jnz 1b
2:
386,7 → 390,60
pop %di
cmp $VESA_OK, %al
jnz 0f
 
#if CONFIG_VESA_BPP == 8
# Set 3:2:3 VGA palette
mov VESA_MODE_ATTRIBUTES_OFFSET(%di), %ax
push %di
mov $vga323 - vesa_init, %di
mov $0x100, %ecx
bt $5, %ax # Test if VGA compatible registers are present
jnc vga_compat
# Try VESA routine to set palette
mov $VESA_SET_PALETTE, %ax
xor %bl, %bl
xor %dx, %dx
int $0x10
jmp vga_not_compat
vga_compat:
# Try VGA registers to set palette
movw $0x3c6, %dx # Set palette mask
movb $0xff, %al
outb %al, %dx
movw $0x3c8, %dx # First index to set
xor %al, %al
outb %al, %dx
movw $0x3c9, %dx # Data port
vga_loop:
movb %es:2(%di), %al
outb %al, %dx
movb %es:1(%di), %al
outb %al, %dx
movb %es:(%di), %al
outb %al, %dx
addw $4, %di
loop vga_loop
vga_not_compat:
pop %di
#endif
mov VESA_MODE_PHADDR_OFFSET(%di), %esi
mov VESA_MODE_WIDTH_OFFSET(%di), %ax
shl $16, %eax
427,8 → 484,10
mov $0xffffffff, %edi # EGA text mode used, because of problems with VESA
xor %ax, %ax
jz 8b # Force relative jump
 
vga323:
#include "vga323.pal"
.code32
vesa_init_protect:
movw $gdtselector(KDATA_DES), %cx
441,11 → 500,73
movw %cx, %fs
movw %cx, %gs
movl $START_STACK, %esp # initialize stack pointer
jmpl $gdtselector(KTEXT32_DES), $vesa_meeting_point
.align 4
e_vesa_init:
#endif
#endif
 
# Print string from %esi to EGA display (in red) and halt
error_halt:
movl $0xb8000, %edi # base of EGA text mode memory
xorl %eax, %eax
movw $0x3d4, %dx # read bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
shl $8, %ax
movw $0x3d4, %dx # read bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
cmp $1920, %ax
jbe cursor_ok
movw $1920, %ax # sanity check for the cursor on the last line
cursor_ok:
movw %ax, %bx
shl $1, %eax
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
cmp $0, %al
je ploop_end
stosw
inc %bx
jmp ploop
ploop_end:
movw $0x3d4, %dx # write bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bh, %al
outb %al, %dx
movw $0x3d4, %dx # write bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bl, %al
outb %al, %dx
cli
hlt
.section K_DATA_START, "aw", @progbits
.align 4096
513,3 → 634,6
 
grub_ebx:
.long 0
 
long_mode_msg:
.asciz "64 bit long mode not supported. System halted."
/branches/fs/kernel/arch/amd64/src/mm/page.c
83,7 → 83,7
{
uintptr_t cur;
int i;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
 
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
112,10 → 112,8
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
} else
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
}
 
 
208,7 → 206,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/amd64/src/interrupt.c
156,14 → 156,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
174,7 → 181,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/ppc64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ppc64/src/mm/as.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64mm
/** @addtogroup ppc64mm
* @{
*/
/** @file
41,6 → 41,6
as_operations = &as_pt_operations;
}
 
/** @}
/** @}
*/
 
/branches/fs/kernel/arch/ppc64/src/mm/page.c
264,10 → 264,8
uintptr_t cur;
int flags;
/* Frames below 128 MB are mapped using BAT,
map rest of the physical memory */
for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
flags = PAGE_CACHEABLE | PAGE_WRITE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
296,7 → 294,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/ppc64/src/interrupt.c
60,11 → 60,19
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
75,7 → 83,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
if (!ack)
pic_ack_interrupt(inum);
}
}
 
/branches/fs/kernel/arch/ppc64/src/ppc64.c
128,6 → 128,7
void arch_grab_console(void)
{
}
 
/** Return console to userspace
*
*/
135,5 → 136,11
{
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/mips32/_link.ld.in
10,7 → 10,7
 
OUTPUT_ARCH(mips)
 
ENTRY(kernel_image_start)
ENTRY(kernel_image_start)
 
SECTIONS {
. = KERNEL_LOAD_ADDRESS;
31,9 → 31,10
*(.rodata*);
*(.sdata);
*(.reginfo);
/* Unfortunately IRIX does not allow us
* to include this as a last section :-(
* BSS/SBSS addresses will be wrong */
*(.sbss);
*(.scommon);
*(.bss); /* uninitialized static variables */
*(COMMON); /* global variables */
symbol_table = .;
*(symtab.*);
}
40,14 → 41,6
_gp = . + 0x8000;
.lit8 : { *(.lit8) }
.lit4 : { *(.lit4) }
.sbss : {
*(.sbss);
*(.scommon);
}
.bss : {
*(.bss); /* uninitialized static variables */
*(COMMON); /* global variables */
}
 
kdata_end = .;
 
/branches/fs/kernel/arch/mips32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/mips32/src/mips32.c
178,5 → 178,11
return 0;
}
 
void arch_reboot(void)
{
___halt();
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/mips32/src/mm/as.c
34,12 → 34,12
 
#include <arch/mm/as.h>
#include <genarch/mm/as_pt.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/asid_fifo.h>
#include <arch/mm/tlb.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch/cp0.h>
#include <arch.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
57,7 → 57,6
void as_install_arch(as_t *as)
{
entry_hi_t hi;
ipl_t ipl;
 
/*
* Install ASID.
64,12 → 63,8
*/
hi.value = cp0_entry_hi_read();
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
hi.asid = as->asid;
cp0_entry_hi_write(hi.value);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/ia32/include/cpuid.h
49,13 → 49,11
unsigned : 31;
} __attribute__ ((packed));
 
typedef union cpuid_extended_feature_info
{
typedef union cpuid_extended_feature_info {
struct __cpuid_extended_feature_info bits;
uint32_t word;
}cpuid_extended_feature_info;
uint32_t word;
} cpuid_extended_feature_info;
 
 
struct __cpuid_feature_info {
unsigned : 23;
unsigned mmx : 1;
65,11 → 63,10
unsigned : 5;
} __attribute__ ((packed));
 
typedef union cpuid_feature_info
{
typedef union cpuid_feature_info {
struct __cpuid_feature_info bits;
uint32_t word ;
}cpuid_feature_info;
uint32_t word;
} cpuid_feature_info;
 
 
static inline uint32_t has_cpuid(void)
100,16 → 97,9
static inline void cpuid(uint32_t cmd, cpu_info_t *info)
{
asm volatile (
"movl %4, %%eax\n"
"cpuid\n"
"movl %%eax, %0\n"
"movl %%ebx, %1\n"
"movl %%ecx, %2\n"
"movl %%edx, %3\n"
: "=m" (info->cpuid_eax), "=m" (info->cpuid_ebx), "=m" (info->cpuid_ecx), "=m" (info->cpuid_edx)
: "m" (cmd)
: "eax", "ebx", "ecx", "edx"
: "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx), "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx)
: "a" (cmd)
);
}
 
/branches/fs/kernel/arch/ia32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia32/include/asm.h
59,12 → 59,12
*/
static inline void cpu_halt(void)
{
asm("hlt\n");
asm volatile ("hlt\n");
};
 
static inline void cpu_sleep(void)
{
asm("hlt\n");
asm volatile ("hlt\n");
};
 
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
/branches/fs/kernel/arch/ia32/src/pm.c
121,7 → 121,7
void idt_init(void)
{
idescriptor_t *d;
int i;
unsigned int i;
 
for (i = 0; i < IDT_ITEMS; i++) {
d = &idt[i];
230,5 → 230,28
gdtr_load(&cpugdtr);
}
 
/* Reboot the machine by initiating
* a triple fault
*/
void arch_reboot(void)
{
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
ptr_16_32_t idtr;
idtr.limit = sizeof(idt);
idtr.base = (uintptr_t) idt;
idtr_load(&idtr);
interrupts_restore(ipl);
asm volatile (
"int $0x03\n"
"cli\n"
"hlt\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32/src/smp/smp.c
82,13 → 82,13
 
if (config.cpu_count > 1) {
page_mapping_insert(AS_KERNEL, l_apic_address, (uintptr_t) l_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, io_apic_address, (uintptr_t) io_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
l_apic = (uint32_t *) l_apic_address;
io_apic = (uint32_t *) io_apic_address;
}
}
}
 
/*
/branches/fs/kernel/arch/ia32/src/smp/apic.c
139,7 → 139,14
 
static void l_apic_timer_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* Holding a spinlock could prevent clock() from preempting
* the current thread. In this case, we don't need to hold the
* irq->lock so we just unlock it and then lock it again.
*/
spinlock_unlock(&irq->lock);
clock();
spinlock_lock(&irq->lock);
}
 
/** Initialize APIC on BSP. */
162,6 → 169,7
io_apic_disable_irqs(0xffff);
irq_initialize(&l_apic_timer_irq);
l_apic_timer_irq.preack = true;
l_apic_timer_irq.devno = device_assign_devno();
l_apic_timer_irq.inr = IRQ_CLK;
l_apic_timer_irq.claim = l_apic_timer_claim;
/branches/fs/kernel/arch/ia32/src/boot/vga323.pal
0,0 → 1,256
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x09, 0x00, 0x00, 0x00
.byte 0x12, 0x00, 0x00, 0x00
.byte 0x1b, 0x00, 0x00, 0x00
.byte 0x24, 0x00, 0x00, 0x00
.byte 0x2d, 0x00, 0x00, 0x00
.byte 0x36, 0x00, 0x00, 0x00
.byte 0x3f, 0x00, 0x00, 0x00
.byte 0x00, 0x15, 0x00, 0x00
.byte 0x09, 0x15, 0x00, 0x00
.byte 0x12, 0x15, 0x00, 0x00
.byte 0x1b, 0x15, 0x00, 0x00
.byte 0x24, 0x15, 0x00, 0x00
.byte 0x2d, 0x15, 0x00, 0x00
.byte 0x36, 0x15, 0x00, 0x00
.byte 0x3f, 0x15, 0x00, 0x00
.byte 0x00, 0x2a, 0x00, 0x00
.byte 0x09, 0x2a, 0x00, 0x00
.byte 0x12, 0x2a, 0x00, 0x00
.byte 0x1b, 0x2a, 0x00, 0x00
.byte 0x24, 0x2a, 0x00, 0x00
.byte 0x2d, 0x2a, 0x00, 0x00
.byte 0x36, 0x2a, 0x00, 0x00
.byte 0x3f, 0x2a, 0x00, 0x00
.byte 0x00, 0x3f, 0x00, 0x00
.byte 0x09, 0x3f, 0x00, 0x00
.byte 0x12, 0x3f, 0x00, 0x00
.byte 0x1b, 0x3f, 0x00, 0x00
.byte 0x24, 0x3f, 0x00, 0x00
.byte 0x2d, 0x3f, 0x00, 0x00
.byte 0x36, 0x3f, 0x00, 0x00
.byte 0x3f, 0x3f, 0x00, 0x00
.byte 0x00, 0x00, 0x09, 0x00
.byte 0x09, 0x00, 0x09, 0x00
.byte 0x12, 0x00, 0x09, 0x00
.byte 0x1b, 0x00, 0x09, 0x00
.byte 0x24, 0x00, 0x09, 0x00
.byte 0x2d, 0x00, 0x09, 0x00
.byte 0x36, 0x00, 0x09, 0x00
.byte 0x3f, 0x00, 0x09, 0x00
.byte 0x00, 0x15, 0x09, 0x00
.byte 0x09, 0x15, 0x09, 0x00
.byte 0x12, 0x15, 0x09, 0x00
.byte 0x1b, 0x15, 0x09, 0x00
.byte 0x24, 0x15, 0x09, 0x00
.byte 0x2d, 0x15, 0x09, 0x00
.byte 0x36, 0x15, 0x09, 0x00
.byte 0x3f, 0x15, 0x09, 0x00
.byte 0x00, 0x2a, 0x09, 0x00
.byte 0x09, 0x2a, 0x09, 0x00
.byte 0x12, 0x2a, 0x09, 0x00
.byte 0x1b, 0x2a, 0x09, 0x00
.byte 0x24, 0x2a, 0x09, 0x00
.byte 0x2d, 0x2a, 0x09, 0x00
.byte 0x36, 0x2a, 0x09, 0x00
.byte 0x3f, 0x2a, 0x09, 0x00
.byte 0x00, 0x3f, 0x09, 0x00
.byte 0x09, 0x3f, 0x09, 0x00
.byte 0x12, 0x3f, 0x09, 0x00
.byte 0x1b, 0x3f, 0x09, 0x00
.byte 0x24, 0x3f, 0x09, 0x00
.byte 0x2d, 0x3f, 0x09, 0x00
.byte 0x36, 0x3f, 0x09, 0x00
.byte 0x3f, 0x3f, 0x09, 0x00
.byte 0x00, 0x00, 0x12, 0x00
.byte 0x09, 0x00, 0x12, 0x00
.byte 0x12, 0x00, 0x12, 0x00
.byte 0x1b, 0x00, 0x12, 0x00
.byte 0x24, 0x00, 0x12, 0x00
.byte 0x2d, 0x00, 0x12, 0x00
.byte 0x36, 0x00, 0x12, 0x00
.byte 0x3f, 0x00, 0x12, 0x00
.byte 0x00, 0x15, 0x12, 0x00
.byte 0x09, 0x15, 0x12, 0x00
.byte 0x12, 0x15, 0x12, 0x00
.byte 0x1b, 0x15, 0x12, 0x00
.byte 0x24, 0x15, 0x12, 0x00
.byte 0x2d, 0x15, 0x12, 0x00
.byte 0x36, 0x15, 0x12, 0x00
.byte 0x3f, 0x15, 0x12, 0x00
.byte 0x00, 0x2a, 0x12, 0x00
.byte 0x09, 0x2a, 0x12, 0x00
.byte 0x12, 0x2a, 0x12, 0x00
.byte 0x1b, 0x2a, 0x12, 0x00
.byte 0x24, 0x2a, 0x12, 0x00
.byte 0x2d, 0x2a, 0x12, 0x00
.byte 0x36, 0x2a, 0x12, 0x00
.byte 0x3f, 0x2a, 0x12, 0x00
.byte 0x00, 0x3f, 0x12, 0x00
.byte 0x09, 0x3f, 0x12, 0x00
.byte 0x12, 0x3f, 0x12, 0x00
.byte 0x1b, 0x3f, 0x12, 0x00
.byte 0x24, 0x3f, 0x12, 0x00
.byte 0x2d, 0x3f, 0x12, 0x00
.byte 0x36, 0x3f, 0x12, 0x00
.byte 0x3f, 0x3f, 0x12, 0x00
.byte 0x00, 0x00, 0x1b, 0x00
.byte 0x09, 0x00, 0x1b, 0x00
.byte 0x12, 0x00, 0x1b, 0x00
.byte 0x1b, 0x00, 0x1b, 0x00
.byte 0x24, 0x00, 0x1b, 0x00
.byte 0x2d, 0x00, 0x1b, 0x00
.byte 0x36, 0x00, 0x1b, 0x00
.byte 0x3f, 0x00, 0x1b, 0x00
.byte 0x00, 0x15, 0x1b, 0x00
.byte 0x09, 0x15, 0x1b, 0x00
.byte 0x12, 0x15, 0x1b, 0x00
.byte 0x1b, 0x15, 0x1b, 0x00
.byte 0x24, 0x15, 0x1b, 0x00
.byte 0x2d, 0x15, 0x1b, 0x00
.byte 0x36, 0x15, 0x1b, 0x00
.byte 0x3f, 0x15, 0x1b, 0x00
.byte 0x00, 0x2a, 0x1b, 0x00
.byte 0x09, 0x2a, 0x1b, 0x00
.byte 0x12, 0x2a, 0x1b, 0x00
.byte 0x1b, 0x2a, 0x1b, 0x00
.byte 0x24, 0x2a, 0x1b, 0x00
.byte 0x2d, 0x2a, 0x1b, 0x00
.byte 0x36, 0x2a, 0x1b, 0x00
.byte 0x3f, 0x2a, 0x1b, 0x00
.byte 0x00, 0x3f, 0x1b, 0x00
.byte 0x09, 0x3f, 0x1b, 0x00
.byte 0x12, 0x3f, 0x1b, 0x00
.byte 0x1b, 0x3f, 0x1b, 0x00
.byte 0x24, 0x3f, 0x1b, 0x00
.byte 0x2d, 0x3f, 0x1b, 0x00
.byte 0x36, 0x3f, 0x1b, 0x00
.byte 0x3f, 0x3f, 0x1b, 0x00
.byte 0x00, 0x00, 0x24, 0x00
.byte 0x09, 0x00, 0x24, 0x00
.byte 0x12, 0x00, 0x24, 0x00
.byte 0x1b, 0x00, 0x24, 0x00
.byte 0x24, 0x00, 0x24, 0x00
.byte 0x2d, 0x00, 0x24, 0x00
.byte 0x36, 0x00, 0x24, 0x00
.byte 0x3f, 0x00, 0x24, 0x00
.byte 0x00, 0x15, 0x24, 0x00
.byte 0x09, 0x15, 0x24, 0x00
.byte 0x12, 0x15, 0x24, 0x00
.byte 0x1b, 0x15, 0x24, 0x00
.byte 0x24, 0x15, 0x24, 0x00
.byte 0x2d, 0x15, 0x24, 0x00
.byte 0x36, 0x15, 0x24, 0x00
.byte 0x3f, 0x15, 0x24, 0x00
.byte 0x00, 0x2a, 0x24, 0x00
.byte 0x09, 0x2a, 0x24, 0x00
.byte 0x12, 0x2a, 0x24, 0x00
.byte 0x1b, 0x2a, 0x24, 0x00
.byte 0x24, 0x2a, 0x24, 0x00
.byte 0x2d, 0x2a, 0x24, 0x00
.byte 0x36, 0x2a, 0x24, 0x00
.byte 0x3f, 0x2a, 0x24, 0x00
.byte 0x00, 0x3f, 0x24, 0x00
.byte 0x09, 0x3f, 0x24, 0x00
.byte 0x12, 0x3f, 0x24, 0x00
.byte 0x1b, 0x3f, 0x24, 0x00
.byte 0x24, 0x3f, 0x24, 0x00
.byte 0x2d, 0x3f, 0x24, 0x00
.byte 0x36, 0x3f, 0x24, 0x00
.byte 0x3f, 0x3f, 0x24, 0x00
.byte 0x00, 0x00, 0x2d, 0x00
.byte 0x09, 0x00, 0x2d, 0x00
.byte 0x12, 0x00, 0x2d, 0x00
.byte 0x1b, 0x00, 0x2d, 0x00
.byte 0x24, 0x00, 0x2d, 0x00
.byte 0x2d, 0x00, 0x2d, 0x00
.byte 0x36, 0x00, 0x2d, 0x00
.byte 0x3f, 0x00, 0x2d, 0x00
.byte 0x00, 0x15, 0x2d, 0x00
.byte 0x09, 0x15, 0x2d, 0x00
.byte 0x12, 0x15, 0x2d, 0x00
.byte 0x1b, 0x15, 0x2d, 0x00
.byte 0x24, 0x15, 0x2d, 0x00
.byte 0x2d, 0x15, 0x2d, 0x00
.byte 0x36, 0x15, 0x2d, 0x00
.byte 0x3f, 0x15, 0x2d, 0x00
.byte 0x00, 0x2a, 0x2d, 0x00
.byte 0x09, 0x2a, 0x2d, 0x00
.byte 0x12, 0x2a, 0x2d, 0x00
.byte 0x1b, 0x2a, 0x2d, 0x00
.byte 0x24, 0x2a, 0x2d, 0x00
.byte 0x2d, 0x2a, 0x2d, 0x00
.byte 0x36, 0x2a, 0x2d, 0x00
.byte 0x3f, 0x2a, 0x2d, 0x00
.byte 0x00, 0x3f, 0x2d, 0x00
.byte 0x09, 0x3f, 0x2d, 0x00
.byte 0x12, 0x3f, 0x2d, 0x00
.byte 0x1b, 0x3f, 0x2d, 0x00
.byte 0x24, 0x3f, 0x2d, 0x00
.byte 0x2d, 0x3f, 0x2d, 0x00
.byte 0x36, 0x3f, 0x2d, 0x00
.byte 0x3f, 0x3f, 0x2d, 0x00
.byte 0x00, 0x00, 0x36, 0x00
.byte 0x09, 0x00, 0x36, 0x00
.byte 0x12, 0x00, 0x36, 0x00
.byte 0x1b, 0x00, 0x36, 0x00
.byte 0x24, 0x00, 0x36, 0x00
.byte 0x2d, 0x00, 0x36, 0x00
.byte 0x36, 0x00, 0x36, 0x00
.byte 0x3f, 0x00, 0x36, 0x00
.byte 0x00, 0x15, 0x36, 0x00
.byte 0x09, 0x15, 0x36, 0x00
.byte 0x12, 0x15, 0x36, 0x00
.byte 0x1b, 0x15, 0x36, 0x00
.byte 0x24, 0x15, 0x36, 0x00
.byte 0x2d, 0x15, 0x36, 0x00
.byte 0x36, 0x15, 0x36, 0x00
.byte 0x3f, 0x15, 0x36, 0x00
.byte 0x00, 0x2a, 0x36, 0x00
.byte 0x09, 0x2a, 0x36, 0x00
.byte 0x12, 0x2a, 0x36, 0x00
.byte 0x1b, 0x2a, 0x36, 0x00
.byte 0x24, 0x2a, 0x36, 0x00
.byte 0x2d, 0x2a, 0x36, 0x00
.byte 0x36, 0x2a, 0x36, 0x00
.byte 0x3f, 0x2a, 0x36, 0x00
.byte 0x00, 0x3f, 0x36, 0x00
.byte 0x09, 0x3f, 0x36, 0x00
.byte 0x12, 0x3f, 0x36, 0x00
.byte 0x1b, 0x3f, 0x36, 0x00
.byte 0x24, 0x3f, 0x36, 0x00
.byte 0x2d, 0x3f, 0x36, 0x00
.byte 0x36, 0x3f, 0x36, 0x00
.byte 0x3f, 0x3f, 0x36, 0x00
.byte 0x00, 0x00, 0x3f, 0x00
.byte 0x09, 0x00, 0x3f, 0x00
.byte 0x12, 0x00, 0x3f, 0x00
.byte 0x1b, 0x00, 0x3f, 0x00
.byte 0x24, 0x00, 0x3f, 0x00
.byte 0x2d, 0x00, 0x3f, 0x00
.byte 0x36, 0x00, 0x3f, 0x00
.byte 0x3f, 0x00, 0x3f, 0x00
.byte 0x00, 0x15, 0x3f, 0x00
.byte 0x09, 0x15, 0x3f, 0x00
.byte 0x12, 0x15, 0x3f, 0x00
.byte 0x1b, 0x15, 0x3f, 0x00
.byte 0x24, 0x15, 0x3f, 0x00
.byte 0x2d, 0x15, 0x3f, 0x00
.byte 0x36, 0x15, 0x3f, 0x00
.byte 0x3f, 0x15, 0x3f, 0x00
.byte 0x00, 0x2a, 0x3f, 0x00
.byte 0x09, 0x2a, 0x3f, 0x00
.byte 0x12, 0x2a, 0x3f, 0x00
.byte 0x1b, 0x2a, 0x3f, 0x00
.byte 0x24, 0x2a, 0x3f, 0x00
.byte 0x2d, 0x2a, 0x3f, 0x00
.byte 0x36, 0x2a, 0x3f, 0x00
.byte 0x3f, 0x2a, 0x3f, 0x00
.byte 0x00, 0x3f, 0x3f, 0x00
.byte 0x09, 0x3f, 0x3f, 0x00
.byte 0x12, 0x3f, 0x3f, 0x00
.byte 0x1b, 0x3f, 0x3f, 0x00
.byte 0x24, 0x3f, 0x3f, 0x00
.byte 0x2d, 0x3f, 0x3f, 0x00
.byte 0x36, 0x3f, 0x3f, 0x00
.byte 0x3f, 0x3f, 0x3f, 0x00
/branches/fs/kernel/arch/ia32/src/boot/boot.S
63,9 → 63,24
jmpl $selector(KTEXT_DES), $multiboot_meeting_point
multiboot_meeting_point:
pushl %ebx # save parameters from GRUB
pushl %eax
movl %eax, grub_eax # save parameters from GRUB
movl %ebx, grub_ebx
xorl %eax, %eax
cpuid
cmp $0x0, %eax # any function > 0?
jbe pse_unsupported
movl $0x1, %eax # Basic function code 1
cpuid
bt $3, %edx # Test if PSE is supported
jc pse_supported
 
pse_unsupported:
movl $pse_msg, %esi
jmp error_halt
pse_supported:
#ifdef CONFIG_FB
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
89,8 → 104,8
call map_kernel # map kernel and turn paging on
popl %eax
popl %ebx
movl grub_eax, %eax
movl grub_ebx, %ebx
cmpl $MULTIBOOT_LOADER_MAGIC, %eax # compare GRUB signature
je valid_boot
209,7 → 224,7
rep movsb
#endif
 
call main_bsp # never returns
 
cli
222,19 → 237,20
# For simplicity, we map the entire 4G space.
#
movl %cr4, %ecx
orl $(1<<4), %ecx
movl %ecx, %cr4 # turn PSE on
orl $(1 << 4), %ecx # turn PSE on
andl $(~(1 << 5)), %ecx # turn PAE off
movl %ecx, %cr4
movl $(page_directory+0), %esi
movl $(page_directory+2048), %edi
movl $(page_directory + 0), %esi
movl $(page_directory + 2048), %edi
xorl %ecx, %ecx
xorl %ebx, %ebx
0:
movl $((1<<7)|(1<<0)), %eax
movl $((1 << 7) | (1 << 1) | (1 << 0)), %eax
orl %ebx, %eax
movl %eax, (%esi,%ecx,4) # mapping 0x00000000+%ecx*4M => 0x00000000+%ecx*4M
movl %eax, (%edi,%ecx,4) # mapping 0x80000000+%ecx*4M => 0x00000000+%ecx*4M
addl $(4*1024*1024), %ebx
movl %eax, (%esi, %ecx, 4) # mapping 0x00000000 + %ecx * 4M => 0x00000000 + %ecx * 4M
movl %eax, (%edi, %ecx, 4) # mapping 0x80000000 + %ecx * 4M => 0x00000000 + %ecx * 4M
addl $(4 * 1024 * 1024), %ebx
 
incl %ecx
cmpl $512, %ecx
242,12 → 258,71
 
movl %esi, %cr3
# turn paging on
movl %cr0, %ebx
orl $(1<<31), %ebx
orl $(1 << 31), %ebx # turn paging on
movl %ebx, %cr0
ret
 
# Print string from %esi to EGA display (in red) and halt
error_halt:
movl $0xb8000, %edi # base of EGA text mode memory
xorl %eax, %eax
movw $0x3d4, %dx # read bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
shl $8, %ax
movw $0x3d4, %dx # read bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
cmp $1920, %ax
jbe cursor_ok
movw $1920, %ax # sanity check for the cursor on the last line
cursor_ok:
movw %ax, %bx
shl $1, %eax
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
cmp $0, %al
je ploop_end
stosw
inc %bx
jmp ploop
ploop_end:
movw $0x3d4, %dx # write bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bh, %al
outb %al, %dx
movw $0x3d4, %dx # write bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bl, %al
outb %al, %dx
cli
hlt
 
#ifdef CONFIG_FB
vesa_init:
jmp $selector(VESA_INIT_DES), $vesa_init_real - vesa_init
278,11 → 353,12
#define VESA_INFO_SIZE 1024
 
#define VESA_MODE_ATTRIBUTES_OFFSET 0
#define VESA_MODE_LIST_PTR_OFFSET 14
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_WIDTH_OFFSET 18
#define VESA_MODE_HEIGHT_OFFSET 20
#define VESA_MODE_BPP_OFFSET 25
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_PHADDR_OFFSET 40
 
#define VESA_END_OF_MODES 0xffff
292,12 → 368,10
#define VESA_GET_INFO 0x4f00
#define VESA_GET_MODE_INFO 0x4f01
#define VESA_SET_MODE 0x4f02
#define VESA_SET_PALETTE 0x4f09
 
#define CONFIG_VESA_BPP_a 255
 
#if CONFIG_VESA_BPP == 24
#undef CONFIG_VESA_BPP_a
#define CONFIG_VESA_BPP_a 32
#define CONFIG_VESA_BPP_VARIANT 32
#endif
 
mov $VESA_GET_INFO, %ax
338,18 → 412,21
cmp VESA_MODE_WIDTH_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_HEIGHT,%ax
mov $CONFIG_VESA_HEIGHT, %ax
cmp VESA_MODE_HEIGHT_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_BPP, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
 
#ifdef CONFIG_VESA_BPP_VARIANT
jz 2f
mov $CONFIG_VESA_BPP_a, %al
mov $CONFIG_VESA_BPP_VARIANT, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
#endif
jnz 1b
 
2:
mov %cx, %bx
361,7 → 438,60
pop %di
cmp $VESA_OK, %al
jnz 0f
 
#if CONFIG_VESA_BPP == 8
# Set 3:2:3 VGA palette
mov VESA_MODE_ATTRIBUTES_OFFSET(%di), %ax
push %di
mov $vga323 - vesa_init, %di
mov $0x100, %ecx
bt $5, %ax # Test if VGA compatible registers are present
jnc vga_compat
# Try VESA routine to set palette
mov $VESA_SET_PALETTE, %ax
xor %bl, %bl
xor %dx, %dx
int $0x10
jmp vga_not_compat
vga_compat:
# Try VGA registers to set palette
movw $0x3c6, %dx # Set palette mask
movb $0xff, %al
outb %al, %dx
movw $0x3c8, %dx # First index to set
xor %al, %al
outb %al, %dx
movw $0x3c9, %dx # Data port
vga_loop:
movb %es:2(%di), %al
outb %al, %dx
movb %es:1(%di), %al
outb %al, %dx
movb %es:(%di), %al
outb %al, %dx
addw $4, %di
loop vga_loop
vga_not_compat:
pop %di
#endif
mov VESA_MODE_PHADDR_OFFSET(%di), %esi
mov VESA_MODE_WIDTH_OFFSET(%di), %ax
shl $16, %eax
402,12 → 532,12
mov $0xffffffff, %edi # EGA text mode used, because of problems with VESA
xor %ax, %ax
jz 8b # Force relative jump
 
vga323:
#include "vga323.pal"
 
.code32
vesa_init_protect:
popl %esp
 
movw $selector(KDATA_DES), %cx
movw %cx, %es
movw %cx, %fs
415,6 → 545,8
movw %cx, %ds # kernel data + stack
movw %cx, %ss
movl $START_STACK, %esp # initialize stack pointer
 
jmpl $selector(KTEXT_DES), $vesa_meeting_point
 
.align 4
426,3 → 558,12
.align 4096
page_directory:
.space 4096, 0
 
grub_eax:
.long 0
 
grub_ebx:
.long 0
 
pse_msg:
.asciz "Page Size Extension not supported. System halted."
/branches/fs/kernel/arch/ia32/src/mm/page.c
61,7 → 61,7
* PA2KA(identity) mapping for all frames until last_frame.
*/
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
flags = PAGE_CACHEABLE | PAGE_WRITE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
69,10 → 69,8
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
} else
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
 
paging_on();
}
86,7 → 84,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
95,12 → 93,12
 
void page_fault(int n, istate_t *istate)
{
uintptr_t page;
uintptr_t page;
pf_access_t access;
page = read_cr2();
page = read_cr2();
if (istate->error_word & PFERR_CODE_RSVD)
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page directory.\n");
 
if (istate->error_word & PFERR_CODE_RW)
107,14 → 105,14
access = PF_ACCESS_WRITE;
else
access = PF_ACCESS_READ;
 
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate, "Page fault: %#x", page);
 
decode_istate(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
decode_istate(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
 
/** @}
/branches/fs/kernel/arch/ia32/src/interrupt.c
176,14 → 176,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
194,7 → 201,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/ia32/src/drivers/i8254.c
82,6 → 82,7
void i8254_init(void)
{
irq_initialize(&i8254_irq);
i8254_irq.preack = true;
i8254_irq.devno = device_assign_devno();
i8254_irq.inr = IRQ_CLK;
i8254_irq.claim = i8254_claim;