/SPARTAN/trunk/include/mm/page.h |
---|
31,6 → 31,7 |
#include <arch/types.h> |
#include <arch/mm/page.h> |
#include <typedefs.h> |
#define PAGE_NOT_CACHEABLE (0<<0) |
#define PAGE_CACHEABLE (1<<0) |
47,5 → 48,6 |
extern void page_init(void); |
extern void map_page_to_frame(__address page, __address frame, int flags, __address root); |
extern void map_structure(__address s, size_t size); |
#endif |
/SPARTAN/trunk/src/mm/page.c |
---|
28,6 → 28,8 |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <arch/types.h> |
#include <typedefs.h> |
void page_init(void) |
{ |
34,3 → 36,25 |
page_arch_init(); |
map_page_to_frame(0x0, 0x0, PAGE_NOT_PRESENT, 0); |
} |
/** Map memory structure |
* |
* Identity-map memory structure |
* considering possible crossings |
* of page boundaries. |
* |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(__address s, size_t size) |
{ |
int i, cnt, length; |
/* TODO: implement portable way of computing page address from address */ |
length = size + (s - (s & 0xfffff000)); |
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0); |
for (i = 0; i < cnt; i++) |
map_page_to_frame(s + i*PAGE_SIZE, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0); |
} |
/SPARTAN/trunk/arch/ia32/include/atomic.h |
---|
47,7 → 47,7 |
#endif /* __SMP__ */ |
} |
static inline int test_and_set(int *val) { |
static inline int test_and_set(volatile int *val) { |
int v; |
__asm__ volatile ( |
61,6 → 61,6 |
} |
extern void spinlock_arch(int *val); |
extern void spinlock_arch(volatile int *val); |
#endif |
/SPARTAN/trunk/arch/ia32/src/smp/mps.c |
---|
404,7 → 404,6 |
{ |
struct __processor_entry *pr; |
__address src, dst; |
__address frame; |
int i; |
waitq_initialize(&ap_completion_wq); |
416,23 → 415,16 |
/* |
* We need to access data in frame 0. |
* We boldly make use of kernel address space mapping. |
*/ |
frame = frame_alloc(0); |
map_page_to_frame(frame,0,PAGE_CACHEABLE,0); |
/* |
* Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot() |
*/ |
*((__u16 *) (frame + 0x467+0)) = ((__address) ap_boot) >> 4; /* segment */ |
*((__u16 *) (frame + 0x467+2)) = 0; /* offset */ |
*((__u16 *) (PA2KA(0x467+0))) = ((__address) ap_boot) >> 4; /* segment */ |
*((__u16 *) (PA2KA(0x467+2))) = 0; /* offset */ |
/* |
* Give back and unmap the borrowed frame. |
*/ |
map_page_to_frame(frame,0,PAGE_NOT_PRESENT,0); |
frame_free(frame); |
/* |
* Save 0xa to address 0xf of the CMOS RAM. |
* BIOS will not do the POST after the INIT signal. |
*/ |
471,6 → 463,7 |
panic("couldn't allocate memory for GDT\n"); |
memcopy(gdt, gdt_new, GDT_ITEMS*sizeof(struct descriptor)); |
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0); |
gdtr.base = KA2PA((__address) gdt_new); |
if (l_apic_send_init_ipi(pr[i].l_apic_id)) { |
/SPARTAN/trunk/arch/ia32/src/mm/page.c |
---|
60,10 → 60,10 |
bootstrap_dba = dba; |
/* |
* Identity mapping for all but 0th page. |
* PA2KA(identity) mapping for all but 0th page. |
* Identity mapping for all frames. |
* PA2KA(identity) mapping for all frames. |
*/ |
for (i = 1; i < frames; i++) { |
for (i = 0; i < frames; i++) { |
map_page_to_frame(i * PAGE_SIZE, i * PAGE_SIZE, PAGE_CACHEABLE, KA2PA(dba)); |
map_page_to_frame(PA2KA(i * PAGE_SIZE), i * PAGE_SIZE, PAGE_CACHEABLE, KA2PA(dba)); |
} |
/SPARTAN/trunk/arch/ia32/src/acpi/acpi.c |
---|
78,15 → 78,8 |
void map_sdt(struct acpi_sdt_header *sdt) |
{ |
int i, cnt, length; |
map_page_to_frame((__address) sdt, (__address) sdt, PAGE_NOT_CACHEABLE, 0); |
length = sdt->length + ((__address) sdt) - ((__address) sdt)&0xfffff000; |
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0); |
for (i = 1; i < cnt; i++) |
map_page_to_frame(((__address) sdt) + i*PAGE_SIZE, ((__address) sdt) + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0); |
map_structure((__address) sdt, sdt->length); |
} |
void acpi_init(void) |