/trunk/kernel/generic/include/macros.h |
---|
38,12 → 38,13 |
#include <arch/types.h> |
#include <typedefs.h> |
#define is_digit(d) (((d) >= '0') && ((d) <= '9')) |
#define is_lower(c) (((c) >= 'a') && ((c) <= 'z')) |
#define is_upper(c) (((c) >= 'A') && ((c) <= 'Z')) |
#define is_alpha(c) (is_lower(c) || is_upper(c)) |
#define is_alphanum(c) (is_alpha(c) || is_digit(c)) |
#define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r')) |
#define is_digit(d) (((d) >= '0') && ((d) <= '9')) |
#define is_lower(c) (((c) >= 'a') && ((c) <= 'z')) |
#define is_upper(c) (((c) >= 'A') && ((c) <= 'Z')) |
#define is_alpha(c) (is_lower(c) || is_upper(c)) |
#define is_alphanum(c) (is_alpha(c) || is_digit(c)) |
#define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || \ |
((c) == '\r')) |
#define min(a,b) ((a) < (b) ? (a) : (b)) |
#define max(a,b) ((a) > (b) ? (a) : (b)) |
/trunk/kernel/generic/src/main/main.c |
---|
126,7 → 126,7 |
static void main_ap_separated_stack(void); |
#endif |
#define CONFIG_STACK_SIZE ((1<<STACK_FRAMES)*STACK_SIZE) |
#define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE) |
/** Main kernel routine for bootstrap CPU. |
* |
/trunk/kernel/generic/src/proc/task.c |
---|
268,7 → 268,7 |
/** Get accounting data of given task. |
* |
* Note that task_lock on @t must be already held and |
* Note that task lock of 't' must be already held and |
* interrupts must be already disabled. |
* |
* @param t Pointer to thread. |
/trunk/kernel/generic/src/mm/frame.c |
---|
1138,7 → 1138,7 |
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
spinlock_unlock(&zone->lock); |
/trunk/kernel/arch/sparc64/include/mm/page.h |
---|
40,7 → 40,7 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */ |
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */ |
#ifdef KERNEL |
/trunk/kernel/arch/sparc64/include/mm/tsb.h |
---|
42,9 → 42,10 |
* again, is nice because TSBs need to be locked |
* in TLBs - only one TLB entry will do. |
*/ |
#define TSB_SIZE 2 /* when changing this, change as.c as well */ |
#define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) |
#define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) |
#define TSB_SIZE 2 /* when changing this, change |
* as.c as well */ |
#define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) |
#define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) |
#define TSB_TAG_TARGET_CONTEXT_SHIFT 48 |
80,12 → 81,14 |
uint64_t value; |
struct { |
uint64_t base : 51; /**< TSB base address, bits 63:13. */ |
unsigned split : 1; /**< Split vs. common TSB for 8K and 64K pages. |
* HelenOS uses only 8K pages for user mappings, |
* so we always set this to 0. |
*/ |
unsigned split : 1; /**< Split vs. common TSB for 8K and 64K |
* pages. HelenOS uses only 8K pages |
* for user mappings, so we always set |
* this to 0. |
*/ |
unsigned : 9; |
unsigned size : 3; /**< TSB size. Number of entries is 512*2^size. */ |
unsigned size : 3; /**< TSB size. Number of entries is |
* 512 * 2^size. */ |
} __attribute__ ((packed)); |
}; |
typedef union tsb_base_reg tsb_base_reg_t; |
/trunk/kernel/arch/sparc64/src/smp/ipi.c |
---|
61,7 → 61,7 |
bool done; |
/* |
* This functin might enable interrupts for a while. |
* This function might enable interrupts for a while. |
* In order to prevent migration to another processor, |
* we explicitly disable preemption. |
*/ |
/trunk/kernel/arch/sparc64/src/sparc64.c |
---|
50,6 → 50,7 |
bootinfo_t bootinfo; |
/** Perform sparc64 specific initialization before main_bsp() is called. */ |
void arch_pre_main(void) |
{ |
/* Copy init task info. */ |
69,6 → 70,7 |
ofw_tree_init(bootinfo.ofw_root); |
} |
/** Perform sparc64 specific initialization before mm is initialized. */ |
void arch_pre_mm_init(void) |
{ |
if (config.cpu_active == 1) |
75,10 → 77,16 |
trap_init(); |
} |
/** Perform sparc64 specific initialization afterr mm is initialized. */ |
void arch_post_mm_init(void) |
{ |
if (config.cpu_active == 1) { |
irq_init(1<<11, 128); |
/* |
* We have 2^11 different interrupt vectors. |
* But we only create 128 buckets. |
*/ |
irq_init(1 << 11, 128); |
standalone_sparc64_console_init(); |
} |
} |
/trunk/kernel/arch/sparc64/src/mm/tlb.c |
---|
57,9 → 57,12 |
static void dtlb_pte_copy(pte_t *t, bool ro); |
static void itlb_pte_copy(pte_t *t); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const |
char *str); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
char *context_encoding[] = { |
"Primary", |
90,7 → 93,8 |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable) |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool |
locked, bool cacheable) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
124,7 → 128,8 |
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param ro If true, the entry will be created read-only, regardless of its w field. |
* @param ro If true, the entry will be created read-only, regardless of its w |
* field. |
*/ |
void dtlb_pte_copy(pte_t *t, bool ro) |
{ |
212,11 → 217,13 |
page_table_unlock(AS, true); |
} else { |
/* |
* Forward the page fault to the address space page fault handler. |
* Forward the page fault to the address space page fault |
* handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__); |
do_fast_instruction_access_mmu_miss_fault(istate, |
__FUNCTION__); |
} |
} |
} |
223,9 → 230,8 |
/** DTLB miss handler. |
* |
* Note that some faults (e.g. kernel faults) were already resolved |
* by the low-level, assembly language part of the fast_data_access_mmu_miss |
* handler. |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
*/ |
void fast_data_access_mmu_miss(int n, istate_t *istate) |
{ |
239,9 → 245,11 |
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__FUNCTION__); |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault."); |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
"kernel page fault."); |
} |
page_table_lock(AS, true); |
263,7 → 271,8 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__FUNCTION__); |
} |
} |
} |
282,8 → 291,9 |
t = page_mapping_find(AS, va); |
if (t && PTE_WRITABLE(t)) { |
/* |
* The mapping was found in the software page hash table and is writable. |
* Demap the old mapping and insert an updated mapping into DTLB. |
* The mapping was found in the software page hash table and is |
* writable. Demap the old mapping and insert an updated mapping |
* into DTLB. |
*/ |
t->a = true; |
t->d = true; |
295,11 → 305,13 |
page_table_unlock(AS, true); |
} else { |
/* |
* Forward the page fault to the address space page fault handler. |
* Forward the page fault to the address space page fault |
* handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, __FUNCTION__); |
do_fast_data_access_protection_fault(istate, tag, |
__FUNCTION__); |
} |
} |
} |
316,8 → 328,11 |
d.value = itlb_data_access_read(i); |
t.value = itlb_tag_read_read(i); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", |
i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
printf("D-TLB contents:\n"); |
325,13 → 340,17 |
d.value = dtlb_data_access_read(i); |
t.value = dtlb_tag_read_read(i); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", |
i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
} |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str) |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char |
*str) |
{ |
fault_if_from_uspace(istate, "%s\n", str); |
dump_istate(istate); |
338,25 → 357,29 |
panic("%s\n", str); |
} |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t |
tag, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context); |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
panic("%s\n", str); |
} |
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) |
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t |
tag, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context); |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
dump_istate(istate); |
panic("%s\n", str); |
370,8 → 393,9 |
sfsr.value = dtlb_sfsr_read(); |
sfar = dtlb_sfar_read(); |
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n", |
sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); |
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " |
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, |
sfsr.ow, sfsr.fv); |
printf("DTLB SFAR: address=%p\n", sfar); |
dtlb_sfsr_write(0); |
406,7 → 430,8 |
} |
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context). |
/** Invalidate all ITLB and DTLB entries that belong to specified ASID |
* (Context). |
* |
* @param asid Address Space ID. |
*/ |
429,7 → 454,8 |
nucleus_leave(); |
} |
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space. |
/** Invalidate all ITLB and DTLB entries for specified page range in specified |
* address space. |
* |
* @param asid Address Space ID. |
* @param page First page which to sweep out from ITLB and DTLB. |
448,8 → 474,10 |
mmu_primary_context_write(ctx.v); |
for (i = 0; i < cnt; i++) { |
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); |
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * |
PAGE_SIZE); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * |
PAGE_SIZE); |
} |
mmu_primary_context_write(pc_save.v); |
/trunk/kernel/arch/sparc64/src/mm/as.c |
---|
61,7 → 61,8 |
int as_constructor_arch(as_t *as, int flags) |
{ |
#ifdef CONFIG_TSB |
int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH); |
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
if (!tsb) |
68,8 → 69,10 |
return -1; |
as->arch.itsb = (tsb_entry_t *) tsb; |
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t)); |
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0); |
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
sizeof(tsb_entry_t)); |
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) |
* sizeof(tsb_entry_t), 0); |
#endif |
return 0; |
} |
77,7 → 80,8 |
int as_destructor_arch(as_t *as) |
{ |
#ifdef CONFIG_TSB |
count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH; |
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
return cnt; |
#else |
99,7 → 103,8 |
return 0; |
} |
/** Perform sparc64-specific tasks when an address space becomes active on the processor. |
/** Perform sparc64-specific tasks when an address space becomes active on the |
* processor. |
* |
* Install ASID and map TSBs. |
* |
134,7 → 139,7 |
uintptr_t tsb = (uintptr_t) as->arch.itsb; |
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
/* |
* TSBs were allocated from memory not covered |
* by the locked 4M kernel DTLB entry. We need |
160,7 → 165,8 |
#endif |
} |
/** Perform sparc64-specific tasks when an address space is removed from the processor. |
/** Perform sparc64-specific tasks when an address space is removed from the |
* processor. |
* |
* Demap TSBs. |
* |
183,7 → 189,7 |
uintptr_t tsb = (uintptr_t) as->arch.itsb; |
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
/* |
* TSBs were allocated from memory not covered |
* by the locked 4M kernel DTLB entry. We need |
/trunk/kernel/arch/sparc64/src/mm/tsb.c |
---|
41,18 → 41,17 |
#include <macros.h> |
#include <debug.h> |
#define TSB_INDEX_MASK ((1<<(21+1+TSB_SIZE-PAGE_WIDTH))-1) |
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1) |
/** Invalidate portion of TSB. |
* |
* We assume that the address space is already locked. |
* Note that respective portions of both TSBs |
* are invalidated at a time. |
* We assume that the address space is already locked. Note that respective |
* portions of both TSBs are invalidated at a time. |
* |
* @param as Address space. |
* @param page First page to invalidate in TSB. |
* @param pages Number of pages to invalidate. |
* Value of (count_t) -1 means the whole TSB. |
* @param pages Number of pages to invalidate. Value of (count_t) -1 means the |
* whole TSB. |
*/ |
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages) |
{ |
65,8 → 64,10 |
cnt = min(pages, ITSB_ENTRY_COUNT); |
for (i = 0; i < cnt; i++) { |
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = true; |
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = true; |
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = |
true; |
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = |
true; |
} |
} |
/trunk/kernel/arch/sparc64/src/mm/frame.c |
---|
64,15 → 64,18 |
confdata = ADDR2PFN(start); |
if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) |
confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); |
zone_create(ADDR2PFN(start), SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), confdata, 0); |
last_frame = max(last_frame, start + ALIGN_UP(size, FRAME_SIZE)); |
zone_create(ADDR2PFN(start), |
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), |
confdata, 0); |
last_frame = max(last_frame, start + ALIGN_UP(size, |
FRAME_SIZE)); |
} |
/* |
* On sparc64, physical memory can start on a non-zero address. |
* The generic frame_init() only marks PFN 0 as not free, so we |
* must mark the physically first frame not free explicitly here, |
* no matter what is its address. |
* must mark the physically first frame not free explicitly |
* here, no matter what is its address. |
*/ |
frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); |
} |
/trunk/kernel/arch/sparc64/src/mm/page.c |
---|
45,8 → 45,8 |
#ifdef CONFIG_SMP |
/** Entries locked in DTLB of BSP. |
* |
* Application processors need to have the same locked entries |
* in their DTLBs as the bootstrap processor. |
* Application processors need to have the same locked entries in their DTLBs as |
* the bootstrap processor. |
*/ |
static struct { |
uintptr_t virt_page; |
84,19 → 84,16 |
/** Map memory-mapped device into virtual memory. |
* |
* So far, only DTLB is used to map devices into memory. |
* Chances are that there will be only a limited amount of |
* devices that the kernel itself needs to lock in DTLB. |
* So far, only DTLB is used to map devices into memory. Chances are that there |
* will be only a limited amount of devices that the kernel itself needs to |
* lock in DTLB. |
* |
* @param physaddr Physical address of the page where the |
* device is located. Must be at least |
* page-aligned. |
* @param size Size of the device's registers. Must not |
* exceed 4M and must include extra space |
* caused by the alignment. |
* @param physaddr Physical address of the page where the device is located. |
* Must be at least page-aligned. |
* @param size Size of the device's registers. Must not exceed 4M and must |
* include extra space caused by the alignment. |
* |
* @return Virtual address of the page where the device is |
* mapped. |
* @return Virtual address of the page where the device is mapped. |
*/ |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
114,17 → 111,17 |
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
{ PAGESIZE_64K, 0, 1}, /* 64K */ |
{ PAGESIZE_64K, 8*PAGE_SIZE, 2 }, /* 128K */ |
{ PAGESIZE_64K, 8*PAGE_SIZE, 4 }, /* 256K */ |
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */ |
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */ |
{ PAGESIZE_512K, 0, 1 }, /* 512K */ |
{ PAGESIZE_512K, 64*PAGE_SIZE, 2 }, /* 1M */ |
{ PAGESIZE_512K, 64*PAGE_SIZE, 4 }, /* 2M */ |
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */ |
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */ |
{ PAGESIZE_4M, 0, 1 }, /* 4M */ |
{ PAGESIZE_4M, 512*PAGE_SIZE, 2 } /* 8M */ |
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */ |
}; |
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
ASSERT(size <= 8*1024*1024); |
ASSERT(size <= 8 * 1024 * 1024); |
if (size <= FRAME_SIZE) |
order = 0; |
144,9 → 141,9 |
/* |
* First, insert the mapping into DTLB. |
*/ |
dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
physaddr + i*sizemap[order].increment, |
sizemap[order].pagesize_code, true, false); |
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
physaddr + i * sizemap[order].increment, |
sizemap[order].pagesize_code, true, false); |
#ifdef CONFIG_SMP |
/* |
153,9 → 150,9 |
* Second, save the information about the mapping for APs. |
*/ |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
virtaddr + i*sizemap[order].increment; |
virtaddr + i * sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
physaddr + i*sizemap[order].increment; |
physaddr + i * sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
sizemap[order].pagesize_code; |
bsp_locked_dtlb_entries++; |
/trunk/boot/genarch/ofw.c |
---|
233,13 → 233,18 |
phys_hi, phys_lo); |
} |
/** Save OpenFirmware physical memory map. |
* |
* @param map Memory map structure where the map will be saved. |
* |
* @return Zero on failure, non-zero on success. |
*/ |
int ofw_memmap(memmap_t *map) |
{ |
unsigned int ac = ofw_get_address_cells(ofw_memory); |
unsigned int sc = ofw_get_size_cells(ofw_memory); |
uint32_t buf[((ac+sc)*MEMMAP_MAX_RECORDS)]; |
uint32_t buf[((ac + sc) * MEMMAP_MAX_RECORDS)]; |
int ret = ofw_get_property(ofw_memory, "reg", buf, sizeof(buf)); |
if (ret <= 0) /* ret is the number of written bytes */ |
return false; |
247,7 → 252,8 |
int pos; |
map->total = 0; |
map->count = 0; |
for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count < MEMMAP_MAX_RECORDS); pos += ac + sc) { |
for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count < |
MEMMAP_MAX_RECORDS); pos += ac + sc) { |
void * start = (void *) ((uintptr_t) buf[pos + ac - 1]); |
unsigned int size = buf[pos + ac + sc - 1]; |