Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 112 → Rev 113

/SPARTAN/trunk/src/proc/scheduler.c
44,12 → 44,6
#include <arch/faddr.h>
#include <arch/atomic.h>
 
/*
* NOTE ON ATOMIC READS:
* Some architectures cannot read __u32 atomically.
* For that reason, all accesses to nrdy and the likes must be protected by spinlock.
*/
 
volatile int nrdy;
 
 
/SPARTAN/trunk/src/mm/frame.c
48,14 → 48,6
__u8 *frame_bitmap;
count_t frame_bitmap_octets;
 
/*
* This is for kernel address space frames (allocated with FRAME_KA).
* Their addresses may not interfere with user address space.
*/
__u8 *frame_kernel_bitmap;
count_t kernel_frames;
count_t kernel_frames_free;
 
static spinlock_t framelock;
 
void frame_init(void)
77,17 → 69,10
*/
memsetb((__address) frame_bitmap, frame_bitmap_octets, 0);
frames_free = frames;
 
/*
* Will be properly set up by architecture dependent frame init.
*/
frame_kernel_bitmap = NULL;
kernel_frames_free = 0;
kernel_frames = 0;
}
 
/*
* No frame allocations/reservations prior this point.
* No frame allocations/reservations prior this point.
*/
 
frame_arch_init();
108,35 → 93,20
{
int i;
pri_t pri;
__u8 **frame_bitmap_ptr = &frame_bitmap;
count_t *frames_ptr = &frames, *frames_free_ptr = &frames_free;
if (flags & FRAME_KA) {
frame_bitmap_ptr = &frame_kernel_bitmap;
frames_ptr = &kernel_frames;
frames_free_ptr = &kernel_frames_free;
}
loop:
pri = cpu_priority_high();
spinlock_lock(&framelock);
if (*frames_free_ptr) {
for (i=0; i < *frames_ptr; i++) {
if (frames_free) {
for (i=0; i < frames; i++) {
int m, n;
m = i / 8;
n = i % 8;
 
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) {
(*frame_bitmap_ptr)[m] |= (1<<n);
*frames_free_ptr--;
if (flags & FRAME_KA) {
/*
* frames_free_ptr points to kernel_frames_free
* It is still necessary to decrement frames_free.
*/
frames_free--;
}
if ((frame_bitmap[m] & (1<<n)) == 0) {
frame_bitmap[m] |= (1<<n);
frames_free--;
spinlock_unlock(&framelock);
cpu_priority_restore(pri);
if (flags & FRAME_KA) return PA2KA(i*FRAME_SIZE);
164,35 → 134,21
{
pri_t pri;
__u32 frame;
count_t *frames_free_ptr = &frames_free, *frames_ptr = &frames;
__u8 **frame_bitmap_ptr = &frame_bitmap;
 
if (IS_KA(addr)) {
frames_free_ptr = &kernel_frames_free;
frame_bitmap_ptr = &frame_kernel_bitmap;
}
 
pri = cpu_priority_high();
spinlock_lock(&framelock);
frame = IS_KA(addr) ? KA2PA(addr) : addr;
frame /= FRAME_SIZE;
if (frame < *frames_ptr) {
if (frame < frames) {
int m, n;
m = frame / 8;
n = frame % 8;
if ((*frame_bitmap_ptr)[m] & (1<<n)) {
(*frame_bitmap_ptr)[m] &= ~(1<<n);
*frames_free_ptr++;
if (IS_KA(addr)) {
/*
* frames_free_ptr points to kernel_frames_free
* It is still necessary to increment frames_free.
*/
frames_free++;
}
if (frame_bitmap[m] & (1<<n)) {
frame_bitmap[m] &= ~(1<<n);
frames_free++;
}
else panic("frame already free\n");
}
210,29 → 166,20
{
pri_t pri;
__u32 frame;
count_t *frames_ptr = &frames, *frames_free_ptr = &frames_free;
__u8 **frame_bitmap_ptr = &frame_bitmap;
pri = cpu_priority_high();
spinlock_lock(&framelock);
frame = IS_KA(addr) ? KA2PA(addr) : addr;
frame /= FRAME_SIZE;
if (frame < *frames_ptr) {
if (frame < frames) {
int m, n;
 
m = frame / 8;
n = frame % 8;
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) {
(*frame_bitmap_ptr)[m] |= (1<<n);
*frames_free_ptr--;
if (IS_KA(addr)) {
/*
* frames_free_ptr points to kernel_frames_free
* It is still necessary to decrement frames_free.
*/
frames_free--;
}
if ((frame_bitmap[m] & (1<<n)) == 0) {
frame_bitmap[m] |= (1<<n);
frames_free--;
}
}
spinlock_unlock(&framelock);
/SPARTAN/trunk/arch/mips/src/mm/frame.c
31,7 → 31,4
 
void frame_arch_init(void)
{
kernel_frames = frames;
kernel_frames_free = frames_free;
frame_kernel_bitmap = frame_bitmap;
}
/SPARTAN/trunk/arch/ia32/src/pm.c
131,7 → 131,7
 
void pm_init(void)
{
struct descriptor *gdt_p = (struct descriptor *) gdtr.base;
struct descriptor *gdt_p = (struct descriptor *) PA2KA(gdtr.base);
 
/*
* Each CPU has its private GDT and TSS.
/SPARTAN/trunk/arch/ia32/src/smp/mps.c
392,6 → 392,7
}
}
 
 
/*
* Kernel thread for bringing up application processors. It becomes clear
* that we need an arrangement like this (AP's being initialized by a kernel
414,11 → 415,9
pr = processor_entries;
 
/*
* Grab a frame and map its address to page 0. This is a hack which
* accesses data in frame 0. Note that page 0 is not present because
* of nil reference bug catching.
* We need to access data in frame 0.
*/
frame = frame_alloc(FRAME_KA);
frame = frame_alloc(0);
map_page_to_frame(frame,0,PAGE_CACHEABLE,0);
 
/*
473,7 → 472,7
 
memcopy(gdt, gdt_new, GDT_ITEMS*sizeof(struct descriptor));
gdtr.base = KA2PA((__address) gdt_new);
 
if (l_apic_send_init_ipi(pr[i].l_apic_id)) {
/*
* There may be just one AP being initialized at
/SPARTAN/trunk/arch/ia32/src/smp/ap.S
63,6 → 63,7
movw %ax,%ss
movl $(ctx-0x80000000),%eax # KA2PA((__address) &ctx)
movl (%eax),%esp
subl $0x80000000,%esp # KA2PA(ctx.sp)
 
lidt idtr
 
/SPARTAN/trunk/arch/ia32/src/boot/boot.S
95,8 → 95,6
movl %cr0, %ebx
orl $(1<<31), %ebx
movl %ebx, %cr0
jmp 0f
0:
ret
 
 
/SPARTAN/trunk/arch/ia32/src/mm/frame.c
37,10 → 37,6
void frame_arch_init(void)
{
if (config.cpu_active == 1) {
kernel_frames = frames;
kernel_frames_free = frames_free;
frame_kernel_bitmap = frame_bitmap;
 
frame_not_free(0x0);
 
frame_region_not_free(0xa0000,0xff000);
/SPARTAN/trunk/arch/ia32/src/mm/page.c
64,8 → 64,8
* PA2KA(identity) mapping for all but 0th page.
*/
for (i = 1; i < frames; i++) {
map_page_to_frame(i * PAGE_SIZE, i * PAGE_SIZE, PAGE_CACHEABLE, dba);
map_page_to_frame(PA2KA(i * PAGE_SIZE), i * PAGE_SIZE, PAGE_CACHEABLE, dba);
map_page_to_frame(i * PAGE_SIZE, i * PAGE_SIZE, PAGE_CACHEABLE, KA2PA(dba));
map_page_to_frame(PA2KA(i * PAGE_SIZE), i * PAGE_SIZE, PAGE_CACHEABLE, KA2PA(dba));
}
 
trap_register(14, page_fault);
112,7 → 112,7
pde = page >> 22; /* page directory entry */
pte = (page >> 12) & 0x3ff; /* page table entry */
pd = (struct page_specifier *) dba;
pd = (struct page_specifier *) PA2KA(dba);
if (!pd[pde].present) {
/*
126,7 → 126,7
pd[pde].uaccessible = 1;
}
pt = (struct page_specifier *) (pd[pde].frame_address << 12);
pt = (struct page_specifier *) PA2KA((pd[pde].frame_address << 12));
 
pt[pte].frame_address = frame >> 12;
pt[pte].present = !(flags & PAGE_NOT_PRESENT);