Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 896 → Rev 897

/kernel/trunk/generic/include/proc/scheduler.h
53,12 → 53,14
extern void kcpulb(void *arg);
 
extern void before_thread_runs(void);
extern void after_thread_ran(void);
 
extern void sched_print_list(void);
 
/*
* To be defined by architectures:
*/
extern void before_thread_runs_arch(void);
extern void after_thread_ran_arch(void);
 
#endif
/kernel/trunk/generic/src/proc/scheduler.c
49,7 → 49,7
 
atomic_t nrdy;
 
/** Take actions before new thread runs
/** Take actions before new thread runs.
*
* Perform actions that need to be
* taken before the newly selected
77,6 → 77,20
#endif
}
 
/** Take actions after old thread ran.
*
* Perform actions that need to be
* taken after the running thread
* was preempted by the scheduler.
*
* THREAD->lock is locked on entry
*
*/
void after_thread_ran(void)
{
after_thread_ran_arch();
}
 
#ifdef CONFIG_FPU_LAZY
void scheduler_fpu_lazy_request(void)
{
257,6 → 271,9
ASSERT(CPU != NULL);
 
if (THREAD) {
/* must be run after switch to scheduler stack */
after_thread_ran();
 
switch (THREAD->state) {
case Running:
THREAD->state = Ready;
300,6 → 317,7
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
break;
}
 
THREAD = NULL;
}
 
351,6 → 369,16
#endif
 
/*
* Some architectures provide late kernel PA2KA(identity)
* mapping in a page fault handler. However, the page fault
* handler uses the kernel stack of the running thread and
* therefore cannot be used to map it. The kernel stack, if
* necessary, is to be mapped in before_thread_runs(). This
* function must be executed before the switch to the new stack.
*/
before_thread_runs();
 
/*
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.
*/
the_copy(THE, (the_t *) THREAD->kstack);
387,7 → 415,6
/*
* This is the place where threads leave scheduler();
*/
before_thread_runs();
spinlock_unlock(&THREAD->lock);
interrupts_restore(THREAD->saved_context.ipl);
return;
/kernel/trunk/arch/sparc64/include/trap/exception.h
31,11 → 31,13
 
#define TT_INSTRUCTION_ACCESS_EXCEPTION 0x08
#define TT_ILLEGAL_INSTRUCTION 0x10
#define TT_DATA_ACCESS_ERROR 0x32
#define TT_MEM_ADDRESS_NOT_ALIGNED 0x34
 
#ifndef __ASM__
extern void do_instruction_access_exc(void);
extern void do_mem_address_not_aligned(void);
extern void do_data_access_error(void);
extern void do_illegal_instruction(void);
#endif /* !__ASM__ */
 
/kernel/trunk/arch/sparc64/include/mm/tlb.h
405,4 → 405,6
extern void fast_data_access_mmu_miss(void);
extern void fast_data_access_protection(void);
 
extern void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable);
 
#endif
/kernel/trunk/arch/sparc64/include/drivers/i8042.h
32,7 → 32,7
#include <arch/types.h>
 
#define KBD_PHYS_ADDRESS 0x1fff8904000ULL
#define KBD_VIRT_ADDRESS 0x00000d00000ULL
#define KBD_VIRT_ADDRESS 0x000d0000000ULL
 
#define STATUS_REG 4
#define COMMAND_REG 4
40,7 → 40,7
 
static inline void i8042_data_write(__u8 data)
{
((__u8 *)(KBD_VIRT_ADDRESS))[DATA_REG] = data;
((volatile __u8 *)(KBD_VIRT_ADDRESS))[DATA_REG] = data;
}
 
static inline __u8 i8042_data_read(void)
55,7 → 55,7
 
static inline void i8042_command_write(__u8 command)
{
((__u8 *)(KBD_VIRT_ADDRESS))[COMMAND_REG] = command;
((volatile __u8 *)(KBD_VIRT_ADDRESS))[COMMAND_REG] = command;
}
 
#endif
/kernel/trunk/arch/sparc64/Makefile.inc
81,6 → 81,7
arch/$(ARCH)/src/mm/memory_init.c \
arch/$(ARCH)/src/sparc64.c \
arch/$(ARCH)/src/start.S \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/trap/trap_table.S \
arch/$(ARCH)/src/trap/trap.c \
arch/$(ARCH)/src/trap/exception.c \
/kernel/trunk/arch/sparc64/src/console.c
40,6 → 40,7
#include <arch/register.h>
#include <proc/thread.h>
#include <synch/mutex.h>
#include <arch/mm/tlb.h>
 
#define KEYBOARD_POLL_PAUSE 50000 /* 50ms */
 
75,6 → 76,10
{
ofw_console_active = 0;
stdin = NULL;
 
dtlb_insert_mapping(FB_VIRT_ADDRESS, FB_PHYS_ADDRESS, PAGESIZE_4M, true, false);
dtlb_insert_mapping(KBD_VIRT_ADDRESS, KBD_PHYS_ADDRESS, PAGESIZE_8K, true, false);
 
fb_init(FB_VIRT_ADDRESS, FB_X_RES, FB_Y_RES, FB_COLOR_DEPTH/8);
i8042_init();
}
/kernel/trunk/arch/sparc64/src/sparc64.c
74,7 → 74,3
void calibrate_delay_loop(void)
{
}
 
void before_thread_runs_arch(void)
{
}
/kernel/trunk/arch/sparc64/src/proc/scheduler.c
0,0 → 1,68
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Ensure that thread's kernel stack is locked in TLB. */
void before_thread_runs_arch(void)
{
__address base;
base = ALIGN_DOWN(config.base, 4*1024*1024);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
* If not, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack);
dtlb_insert_mapping((__address) THREAD->kstack, (__address) THREAD->kstack, PAGESIZE_8K, true, true);
}
}
 
/** Unlock thread's stack from TLB, if necessary. */
void after_thread_ran_arch(void)
{
__address base;
 
base = ALIGN_DOWN(config.base, 4*1024*1024);
 
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
/*
* Kernel stack of this thread is locked in DTLB.
* Destroy the mapping.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack);
}
}
/kernel/trunk/arch/sparc64/src/trap/exception.c
42,6 → 42,12
panic("Memory Address Not Aligned\n");
}
 
/** Handle data_access_error. */
void do_data_access_error(void)
{
panic("Data Access Error: %P\n", tpc_read());
}
 
/** Handle mem_address_not_aligned. */
void do_illegal_instruction(void)
{
/kernel/trunk/arch/sparc64/src/trap/trap_table.S
72,6 → 72,12
clean_window_handler:
CLEAN_WINDOW_HANDLER
 
/* TT = 0x32, TL = 0, data_access_error */
.org trap_table + TT_DATA_ACCESS_ERROR*ENTRY_SIZE
.global data_access_error
data_access_error:
SIMPLE_HANDLER do_data_access_error
 
/* TT = 0x34, TL = 0, mem_address_not_aligned */
.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE
.global mem_address_not_aligned
226,6 → 232,12
clean_window_handler_high:
CLEAN_WINDOW_HANDLER
 
/* TT = 0x32, TL > 0, data_access_error */
.org trap_table + (TT_DATA_ACCESS_ERROR+512)*ENTRY_SIZE
.global data_access_error_high
data_access_error_high:
SIMPLE_HANDLER do_data_access_error
 
/* TT = 0x34, TL > 0, mem_address_not_aligned */
.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE
.global mem_address_not_aligned_high
/kernel/trunk/arch/sparc64/src/mm/tlb.c
109,37 → 109,26
 
dmmu_enable();
immu_enable();
/*
* Quick hack: map frame buffer
*/
fr.address = FB_PHYS_ADDRESS;
pg.address = FB_VIRT_ADDRESS;
}
 
tag.value = ASID_KERNEL;
tag.vpn = pg.vpn;
/** Insert privileged mapping into DMMU TLB.
*
* @param page Virtual page address.
* @param frame Physical frame address.
* @param pagesize Page size.
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
page_address_t pg;
frame_address_t fr;
 
dtlb_tag_access_write(tag.value);
pg.address = page;
fr.address = frame;
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_4M;
data.pfn = fr.pfn;
data.l = true;
data.cp = 0;
data.cv = 0;
data.p = true;
data.w = true;
data.g = true;
 
dtlb_data_in_write(data.value);
/*
* Quick hack: map keyboard
*/
fr.address = KBD_PHYS_ADDRESS;
pg.address = KBD_VIRT_ADDRESS;
 
tag.value = ASID_KERNEL;
tag.vpn = pg.vpn;
 
147,11 → 136,11
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
data.size = pagesize;
data.pfn = fr.pfn;
data.l = true;
data.cp = 0;
data.cv = 0;
data.l = locked;
data.cp = cacheable;
data.cv = cacheable;
data.p = true;
data.w = true;
data.g = true;
169,7 → 158,6
void fast_data_access_mmu_miss(void)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
__address tpc;
char *tpc_str;
 
186,18 → 174,7
/*
* Identity map piece of faulting kernel address space.
*/
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
data.pfn = tag.vpn;
data.l = false;
data.cp = 1;
data.cv = 1;
data.p = true;
data.w = true;
data.g = true;
 
dtlb_data_in_write(data.value);
dtlb_insert_mapping(tag.vpn * PAGE_SIZE, tag.vpn * FRAME_SIZE, PAGESIZE_8K, false, true);
}
 
/** DTLB protection fault handler. */
/kernel/trunk/arch/ia64/src/dummy.s
32,6 → 32,7
.global asm_delay_loop
.global userspace
.global before_thread_runs_arch
.global after_thread_ran_arch
.global cpu_sleep
.global dummy
.global fpu_enable
39,6 → 40,7
.global fpu_init
 
before_thread_runs_arch:
after_thread_ran_arch:
userspace:
calibrate_delay_loop:
asm_delay_loop:
/kernel/trunk/arch/ppc32/src/dummy.s
31,6 → 31,7
.global asm_delay_loop
.global userspace
.global before_thread_runs_arch
.global after_thread_ran_arch
.global dummy
.global fpu_init
.global fpu_enable
37,6 → 38,7
.global fpu_disable
 
before_thread_runs_arch:
after_thread_ran_arch:
userspace:
asm_delay_loop:
fpu_init:
/kernel/trunk/arch/amd64/src/proc/scheduler.c
43,3 → 43,7
(__u64)&THREAD->kstack);
swapgs();
}
 
void after_thread_ran_arch(void)
{
}
/kernel/trunk/arch/mips32/src/mips32.c
134,3 → 134,7
{
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
}
 
void after_thread_ran_arch(void)
{
}
/kernel/trunk/arch/ia32/src/proc/scheduler.c
37,3 → 37,7
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
CPU->arch.tss->ss0 = selector(KDATA_DES);
}
 
void after_thread_ran_arch(void)
{
}