Subversion Repositories HelenOS

Compare Revisions

Regard whitespace Rev 2048 → Rev 2047

/trunk/kernel/arch/sparc64/include/mm/tsb.h
42,8 → 42,7
* again, is nice because TSBs need to be locked
* in TLBs - only one TLB entry will do.
*/
#define TSB_SIZE 2 /* when changing this, change
* as.c as well */
#define TSB_SIZE 2 /* when changing this, change as.c as well */
#define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE))
#define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE))
 
81,14 → 80,12
uint64_t value;
struct {
uint64_t base : 51; /**< TSB base address, bits 63:13. */
unsigned split : 1; /**< Split vs. common TSB for 8K and 64K
* pages. HelenOS uses only 8K pages
* for user mappings, so we always set
* this to 0.
unsigned split : 1; /**< Split vs. common TSB for 8K and 64K pages.
* HelenOS uses only 8K pages for user mappings,
* so we always set this to 0.
*/
unsigned : 9;
unsigned size : 3; /**< TSB size. Number of entries is
* 512 * 2^size. */
unsigned size : 3; /**< TSB size. Number of entries is 512*2^size. */
} __attribute__ ((packed));
};
typedef union tsb_base_reg tsb_base_reg_t;
/trunk/kernel/arch/sparc64/src/sparc64.c
50,7 → 50,6
 
bootinfo_t bootinfo;
 
/** Perform sparc64 specific initialization before main_bsp() is called. */
void arch_pre_main(void)
{
/* Copy init task info. */
70,7 → 69,6
ofw_tree_init(bootinfo.ofw_root);
}
 
/** Perform sparc64 specific initialization before mm is initialized. */
void arch_pre_mm_init(void)
{
if (config.cpu_active == 1)
77,16 → 75,10
trap_init();
}
 
/** Perform sparc64 specific initialization afterr mm is initialized. */
void arch_post_mm_init(void)
{
if (config.cpu_active == 1) {
/*
* We have 2^11 different interrupt vectors.
* But we only create 128 buckets.
*/
irq_init(1 << 11, 128);
standalone_sparc64_console_init();
}
}
/trunk/kernel/arch/sparc64/src/mm/as.c
61,8 → 61,7
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
69,10 → 68,8
return -1;
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
#endif
return 0;
}
80,8 → 77,7
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
103,8 → 99,7
return 0;
}
 
/** Perform sparc64-specific tasks when an address space becomes active on the
* processor.
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
*
* Install ASID and map TSBs.
*
165,8 → 160,7
#endif
}
 
/** Perform sparc64-specific tasks when an address space is removed from the
* processor.
/** Perform sparc64-specific tasks when an address space is removed from the processor.
*
* Demap TSBs.
*
/trunk/kernel/arch/sparc64/src/mm/page.c
45,8 → 45,8
#ifdef CONFIG_SMP
/** Entries locked in DTLB of BSP.
*
* Application processors need to have the same locked entries in their DTLBs as
* the bootstrap processor.
* Application processors need to have the same locked entries
* in their DTLBs as the bootstrap processor.
*/
static struct {
uintptr_t virt_page;
84,16 → 84,19
 
/** Map memory-mapped device into virtual memory.
*
* So far, only DTLB is used to map devices into memory. Chances are that there
* will be only a limited amount of devices that the kernel itself needs to
* lock in DTLB.
* So far, only DTLB is used to map devices into memory.
* Chances are that there will be only a limited amount of
* devices that the kernel itself needs to lock in DTLB.
*
* @param physaddr Physical address of the page where the device is located.
* Must be at least page-aligned.
* @param size Size of the device's registers. Must not exceed 4M and must
* include extra space caused by the alignment.
* @param physaddr Physical address of the page where the
* device is located. Must be at least
* page-aligned.
* @param size Size of the device's registers. Must not
* exceed 4M and must include extra space
* caused by the alignment.
*
* @return Virtual address of the page where the device is mapped.
* @return Virtual address of the page where the device is
* mapped.
*/
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
/trunk/kernel/arch/sparc64/src/mm/tlb.c
57,12 → 57,9
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
93,8 → 90,7
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
128,8 → 124,7
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param ro If true, the entry will be created read-only, regardless of its w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
{
217,13 → 212,11
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault
* handler.
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
}
}
}
230,8 → 223,9
 
/** DTLB miss handler.
*
* Note that some faults (e.g. kernel faults) were already resolved by the
* low-level, assembly language part of the fast_data_access_mmu_miss handler.
* Note that some faults (e.g. kernel faults) were already resolved
* by the low-level, assembly language part of the fast_data_access_mmu_miss
* handler.
*/
void fast_data_access_mmu_miss(int n, istate_t *istate)
{
245,11 → 239,9
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
}
 
page_table_lock(AS, true);
271,8 → 263,7
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
}
}
}
291,9 → 282,8
t = page_mapping_find(AS, va);
if (t && PTE_WRITABLE(t)) {
/*
* The mapping was found in the software page hash table and is
* writable. Demap the old mapping and insert an updated mapping
* into DTLB.
* The mapping was found in the software page hash table and is writable.
* Demap the old mapping and insert an updated mapping into DTLB.
*/
t->a = true;
t->d = true;
305,13 → 295,11
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault
* handler.
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
}
}
}
328,11 → 316,8
d.value = itlb_data_access_read(i);
t.value = itlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,17 → 325,13
d.value = dtlb_data_access_read(i);
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
357,29 → 338,25
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,9 → 370,8
sfsr.value = dtlb_sfsr_read();
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n",
sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
430,8 → 406,7
}
 
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
* (Context).
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
*
* @param asid Address Space ID.
*/
454,8 → 429,7
nucleus_leave();
}
 
/** Invalidate all ITLB and DTLB entries for specified page range in specified
* address space.
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
*
* @param asid Address Space ID.
* @param page First page which to sweep out from ITLB and DTLB.
474,10 → 448,8
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/trunk/kernel/arch/sparc64/src/mm/tsb.c
45,13 → 45,14
 
/** Invalidate portion of TSB.
*
* We assume that the address space is already locked. Note that respective
* portions of both TSBs are invalidated at a time.
* We assume that the address space is already locked.
* Note that respective portions of both TSBs
* are invalidated at a time.
*
* @param as Address space.
* @param page First page to invalidate in TSB.
* @param pages Number of pages to invalidate. Value of (count_t) -1 means the
* whole TSB.
* @param pages Number of pages to invalidate.
* Value of (count_t) -1 means the whole TSB.
*/
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
{
64,10 → 65,8
cnt = min(pages, ITSB_ENTRY_COUNT);
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = true;
}
}
 
/trunk/kernel/arch/sparc64/src/mm/frame.c
64,18 → 64,15
confdata = ADDR2PFN(start);
if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
zone_create(ADDR2PFN(start),
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
confdata, 0);
last_frame = max(last_frame, start + ALIGN_UP(size,
FRAME_SIZE));
zone_create(ADDR2PFN(start), SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), confdata, 0);
last_frame = max(last_frame, start + ALIGN_UP(size, FRAME_SIZE));
}
 
/*
* On sparc64, physical memory can start on a non-zero address.
* The generic frame_init() only marks PFN 0 as not free, so we
* must mark the physically first frame not free explicitly
* here, no matter what is its address.
* must mark the physically first frame not free explicitly here,
* no matter what is its address.
*/
frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
}
/trunk/kernel/arch/sparc64/src/smp/ipi.c
61,7 → 61,7
bool done;
 
/*
* This function might enable interrupts for a while.
* This functin might enable interrupts for a while.
* In order to prevent migration to another processor,
* we explicitly disable preemption.
*/
/trunk/kernel/generic/include/macros.h
43,8 → 43,7
#define is_upper(c) (((c) >= 'A') && ((c) <= 'Z'))
#define is_alpha(c) (is_lower(c) || is_upper(c))
#define is_alphanum(c) (is_alpha(c) || is_digit(c))
#define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || \
((c) == '\r'))
#define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r'))
 
#define min(a,b) ((a) < (b) ? (a) : (b))
#define max(a,b) ((a) > (b) ? (a) : (b))
/trunk/kernel/generic/src/proc/task.c
268,7 → 268,7
 
/** Get accounting data of given task.
*
* Note that task lock of 't' must be already held and
* Note that task_lock on @t must be already held and
* interrupts must be already disabled.
*
* @param t Pointer to thread.
/trunk/kernel/generic/src/mm/frame.c
1138,7 → 1138,7
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);