Subversion Repositories HelenOS

Compare Revisions

Problem with comparison.

Ignore whitespace Rev HEAD → Rev 792

/kernel/trunk/genarch/src/mm/as_ht.c
0,0 → 1,60
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/mm/as_ht.h>
#include <genarch/mm/page_ht.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <arch/types.h>
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
 
static pte_t *ht_create(int flags);
 
as_operations_t as_ht_operations = {
.page_table_create = ht_create
};
 
 
/** Page hash table create.
*
* The page hash table will be created only once
* and will be shared by all address spaces.
*
* @param flags Ignored.
*
* @return Returns NULL.
*/
pte_t *ht_create(int flags)
{
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
}
return NULL;
}
/kernel/trunk/genarch/src/mm/page_ht.c
0,0 → 1,206
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/mm/page_ht.h>
#include <mm/page.h>
#include <arch/mm/page.h>
#include <mm/frame.h>
#include <mm/heap.h>
#include <mm/as.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/asm.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
#include <memstr.h>
#include <adt/hash_table.h>
 
static index_t hash(__native key[]);
static bool compare(__native key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags);
static pte_t *ht_mapping_find(as_t *as, __address page);
 
/**
* This lock protects the page hash table.
*/
SPINLOCK_INITIALIZE(page_ht_lock);
 
/**
* Page hash table.
* The page hash table may be accessed only when page_ht_lock is held.
*/
hash_table_t page_ht;
 
/** Hash table operations for page hash table. */
hash_table_operations_t ht_operations = {
.hash = hash,
.compare = compare,
.remove_callback = remove_callback
};
 
page_operations_t page_ht_operations = {
.mapping_insert = ht_mapping_insert,
.mapping_find = ht_mapping_find
};
 
/** Compute page hash table index.
*
* @param key Array of two keys (i.e. page and address space).
*
* @return Index into page hash table.
*/
index_t hash(__native key[])
{
as_t *as = (as_t *) key[KEY_AS];
__address page = (__address) key[KEY_PAGE];
index_t index;
/*
* Virtual page addresses have roughly the same probability
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
/*
* Address space structures are likely to be allocated from
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((__native) as) & (PAGE_HT_ENTRIES-1);
return index;
}
 
/** Compare page hash table item with page and/or address space.
*
* @param key Array of one or two keys (i.e. page and/or address space).
* @param keys Number of keys passed.
* @param item Item to compare the keys with.
*
* @return true on match, false otherwise.
*/
bool compare(__native key[], count_t keys, link_t *item)
{
pte_t *t;
 
ASSERT(item);
ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS));
 
/*
* Convert item to PTE.
*/
t = list_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (__address) t->as) && (key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (__address) t->as);
}
}
 
/** Callback on page hash table item removal.
*
* @param item Page hash table item being removed.
*/
void remove_callback(link_t *item)
{
pte_t *t;
 
ASSERT(item);
 
/*
* Convert item to PTE.
*/
t = list_get_instance(item, pte_t, link);
 
free(t);
}
 
/** Map page to frame using page hash table.
*
* Map virtual address 'page' to physical address 'frame'
* using 'flags'.
*
* The address space must be locked and interruptsmust be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void ht_mapping_insert(as_t *as, __address page, __address frame, int flags)
{
pte_t *t;
ipl_t ipl;
__native key[2] = { (__address) as, page };
ipl = interrupts_disable();
spinlock_lock(&page_ht_lock);
 
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t));
ASSERT(t != NULL);
hash_table_insert(&page_ht, key, &t->link);
}
spinlock_unlock(&page_ht_lock);
interrupts_restore(ipl);
}
 
/** Find mapping for virtual page in page hash table.
*
* Find mapping for virtual page.
*
* The address space must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; requested mapping otherwise.
*/
pte_t *ht_mapping_find(as_t *as, __address page)
{
link_t *hlp;
pte_t *t = NULL;
__native key[2] = { (__address) as, page };
spinlock_lock(&page_ht_lock);
 
hlp = hash_table_find(&page_ht, key);
if (hlp)
t = list_get_instance(hlp, pte_t, link);
 
spinlock_unlock(&page_ht_lock);
return t;
}
/kernel/trunk/genarch/src/mm/asid.c
0,0 → 1,226
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/*
* ASID management.
*
* Modern processor architectures optimize TLB utilization
* by using ASIDs (a.k.a. memory contexts on sparc64 and
* region identifiers on ia64). These ASIDs help to associate
* each TLB item with an address space, thus making
* finer-grained TLB invalidation possible.
*
* Unfortunatelly, there are usually less ASIDs available than
* there can be unique as_t structures (i.e. address spaces
* recognized by the kernel).
*
* When system runs short of ASIDs, it will attempt to steal
* ASID from an address space that has not been active for
* a while.
*
* Architectures that don't have hardware support for address
* spaces do not compile with this file.
*/
 
#include <mm/asid.h>
#include <mm/as.h>
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
 
/**
* asidlock protects both the asids_allocated counter
* and the list of address spaces that were already
* assigned ASID.
*/
SPINLOCK_INITIALIZE(asidlock);
 
static count_t asids_allocated = 0;
 
/**
* List of address spaces with assigned ASID.
* When the system runs short of allocable
* ASIDS, inactive address spaces are guaranteed
* to be at the beginning of the list.
*/
LIST_INITIALIZE(as_with_asid_head);
 
 
/** Allocate free address space identifier.
*
* This code depends on the fact that ASIDS_ALLOCABLE
* is greater than number of supported CPUs.
*
* @return New ASID.
*/
asid_t asid_get(void)
{
ipl_t ipl;
asid_t asid;
link_t *tmp;
as_t *as;
 
/*
* Check if there is an unallocated ASID.
*/
ipl = interrupts_disable();
spinlock_lock(&asidlock);
if (asids_allocated == ASIDS_ALLOCABLE) {
 
/*
* All ASIDs are already allocated.
* Resort to stealing.
*/
/*
* Remove the first item on the list.
* It is guaranteed to belong to an
* inactive address space.
*/
tmp = as_with_asid_head.next;
ASSERT(tmp != &as_with_asid_head);
list_remove(tmp);
as = list_get_instance(tmp, as_t, as_with_asid_link);
spinlock_lock(&as->lock);
 
/*
* Steal the ASID.
* Note that the stolen ASID is not active.
*/
asid = as->asid;
ASSERT(asid != ASID_INVALID);
 
/*
* Notify the address space from wich the ASID
* was stolen by invalidating its asid member.
*/
as->asid = ASID_INVALID;
spinlock_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
*/
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
tlb_shootdown_finalize();
tlb_invalidate_asid(asid);
} else {
 
/*
* There is at least one unallocated ASID.
* Find it and assign it.
*/
 
asid = asid_find_free();
asids_allocated++;
}
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
return asid;
}
 
/** Release address space identifier.
*
* This code relies on architecture
* dependent functionality.
*
* @param asid ASID to be released.
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
 
asids_allocated--;
asid_put_arch(asid);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
 
/** Install ASID.
*
* This function is to be executed on each address space switch.
*
* @param as Address space.
*/
void asid_install(as_t *as)
{
ipl_t ipl;
ipl = interrupts_disable();
spinlock_lock(&asidlock);
spinlock_lock(&as->lock);
if (as->asid != ASID_KERNEL) {
if (as->asid != ASID_INVALID) {
/*
* This address space has valid ASID.
* Remove 'as' from the list of address spaces
* with assigned ASID, so that it can be later
* appended to the tail of the same list.
* This is to prevent stealing of ASIDs from
* recently installed address spaces.
*/
list_remove(&as->as_with_asid_link);
} else {
spinlock_unlock(&as->lock);
spinlock_unlock(&asidlock);
/*
* This address space doesn't have ASID assigned.
* It was stolen or the address space is being
* installed for the first time.
* Allocate new ASID for it.
*/
as->asid = asid_get();
spinlock_lock(&asidlock);
spinlock_lock(&as->lock);
}
/*
* Now it is sure that 'as' has ASID.
* It is therefore appended to the list
* of address spaces from which it can
* be stolen.
*/
list_append(&as->as_with_asid_link, &as_with_asid_head);
}
spinlock_unlock(&as->lock);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
/kernel/trunk/genarch/src/mm/asid_fifo.c
0,0 → 1,71
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/mm/asid_fifo.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
#include <typedefs.h>
#include <adt/fifo.h>
 
/**
* FIFO queue containing unassigned ASIDs.
* Can be only accessed when asidlock is held.
*/
FIFO_INITIALIZE(free_asids, asid_t, ASIDS_ALLOCABLE);
 
/** Initialize data structures for O(1) ASID allocation and deallocation. */
void asid_fifo_init(void)
{
int i;
for (i = 0; i < ASIDS_ALLOCABLE; i++) {
fifo_push(free_asids, ASID_START + i);
}
}
 
/** Allocate free ASID.
*
* Allocation runs in O(1).
*
* @return Free ASID.
*/
asid_t asid_find_free(void)
{
return fifo_pop(free_asids);
}
 
/** Return ASID among free ASIDs.
*
* This operation runs in O(1).
*
* @param asid ASID being freed.
*/
void asid_put_arch(asid_t asid)
{
fifo_push(free_asids, asid);
}
/kernel/trunk/genarch/src/mm/page_pt.c
0,0 → 1,131
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/mm/page_pt.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/asm.h>
#include <memstr.h>
 
static void pt_mapping_insert(as_t *as, __address page, __address frame, int flags);
static pte_t *pt_mapping_find(as_t *as, __address page);
 
page_operations_t page_pt_operations = {
.mapping_insert = pt_mapping_insert,
.mapping_find = pt_mapping_find
};
 
/** Map page to frame using hierarchical page tables.
*
* Map virtual address 'page' to physical address 'frame'
* using 'flags'.
*
* The address space must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void pt_mapping_insert(as_t *as, __address page, __address frame, int flags)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
__address newpt;
 
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(ONE_FRAME, FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(ONE_FRAME, FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(ONE_FRAME, FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
}
 
/** Find mapping for virtual page in hierarchical page tables.
*
* Find mapping for virtual page.
*
* The address space must be locked and interrupts must be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise.
*/
pte_t *pt_mapping_find(as_t *as, __address page)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
 
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
return &ptl3[PTL3_INDEX(page)];
}
/kernel/trunk/genarch/src/mm/as_pt.c
0,0 → 1,78
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/mm/page_pt.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <memstr.h>
#include <arch.h>
 
static pte_t *ptl0_create(int flags);
 
as_operations_t as_pt_operations = {
.page_table_create = ptl0_create
};
 
/** Create PTL0.
*
* PTL0 of 4-level page table will be created for each address space.
*
* @param flags Flags can specify whether ptl0 is for the kernel address space.
*
* @return New PTL0.
*/
pte_t *ptl0_create(int flags)
{
pte_t *src_ptl0, *dst_ptl0;
ipl_t ipl;
 
dst_ptl0 = (pte_t *) frame_alloc(ONE_FRAME, FRAME_KA | FRAME_PANIC);
 
if (flags & FLAG_AS_KERNEL) {
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
} else {
/*
* Copy the kernel address space portion to new PTL0.
* TODO: copy only kernel address space.
*/
ipl = interrupts_disable();
spinlock_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((__address) AS_KERNEL->page_table);
memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
spinlock_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
}
 
return (pte_t *) KA2PA((__address) dst_ptl0);
}
/kernel/trunk/genarch/src/acpi/matd.c
0,0 → 1,234
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/types.h>
#include <typedefs.h>
#include <genarch/acpi/acpi.h>
#include <genarch/acpi/madt.h>
#include <arch/smp/apic.h>
#include <arch/smp/smp.h>
#include <panic.h>
#include <debug.h>
#include <config.h>
#include <print.h>
#include <mm/heap.h>
#include <memstr.h>
#include <sort.h>
 
struct acpi_madt *acpi_madt = NULL;
 
#ifdef CONFIG_SMP
 
/** Standard ISA IRQ map; can be overriden by Interrupt Source Override entries of MADT. */
int isa_irq_map[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
 
static void madt_l_apic_entry(struct madt_l_apic *la, __u32 index);
static void madt_io_apic_entry(struct madt_io_apic *ioa, __u32 index);
static void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, __u32 index);
static int madt_cmp(void * a, void * b);
 
struct madt_l_apic *madt_l_apic_entries = NULL;
struct madt_io_apic *madt_io_apic_entries = NULL;
 
index_t madt_l_apic_entry_index = 0;
index_t madt_io_apic_entry_index = 0;
count_t madt_l_apic_entry_cnt = 0;
count_t madt_io_apic_entry_cnt = 0;
count_t cpu_count = 0;
 
struct madt_apic_header * * madt_entries_index = NULL;
int madt_entries_index_cnt = 0;
 
char *entry[] = {
"L_APIC",
"IO_APIC",
"INTR_SRC_OVRD",
"NMI_SRC",
"L_APIC_NMI",
"L_APIC_ADDR_OVRD",
"IO_SAPIC",
"L_SAPIC",
"PLATFORM_INTR_SRC"
};
 
/*
* ACPI MADT Implementation of SMP configuration interface.
*/
static count_t madt_cpu_count(void);
static bool madt_cpu_enabled(index_t i);
static bool madt_cpu_bootstrap(index_t i);
static __u8 madt_cpu_apic_id(index_t i);
static int madt_irq_to_pin(int irq);
 
struct smp_config_operations madt_config_operations = {
.cpu_count = madt_cpu_count,
.cpu_enabled = madt_cpu_enabled,
.cpu_bootstrap = madt_cpu_bootstrap,
.cpu_apic_id = madt_cpu_apic_id,
.irq_to_pin = madt_irq_to_pin
};
 
count_t madt_cpu_count(void)
{
return madt_l_apic_entry_cnt;
}
 
bool madt_cpu_enabled(index_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->flags & 0x1;
 
}
 
bool madt_cpu_bootstrap(index_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id == l_apic_id();
}
 
__u8 madt_cpu_apic_id(index_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id;
}
 
int madt_irq_to_pin(int irq)
{
ASSERT(irq < sizeof(isa_irq_map)/sizeof(int));
return isa_irq_map[irq];
}
 
int madt_cmp(void * a, void * b)
{
return
(((struct madt_apic_header *) a)->type > ((struct madt_apic_header *) b)->type) ?
1 :
((((struct madt_apic_header *) a)->type < ((struct madt_apic_header *) b)->type) ? -1 : 0);
}
void acpi_madt_parse(void)
{
struct madt_apic_header *end = (struct madt_apic_header *) (((__u8 *) acpi_madt) + acpi_madt->header.length);
struct madt_apic_header *h;
l_apic = (__u32 *) (__native) acpi_madt->l_apic_address;
 
/* calculate madt entries */
for (h = &acpi_madt->apic_header[0]; h < end; h = (struct madt_apic_header *) (((__u8 *) h) + h->length)) {
madt_entries_index_cnt++;
}
 
/* create madt apic entries index array */
madt_entries_index = (struct madt_apic_header * *) early_malloc(madt_entries_index_cnt * sizeof(struct madt_apic_header * *));
 
__u32 index = 0;
 
for (h = &acpi_madt->apic_header[0]; h < end; h = (struct madt_apic_header *) (((__u8 *) h) + h->length)) {
madt_entries_index[index++] = h;
}
 
/* Quicksort MADT index structure */
qsort(madt_entries_index, madt_entries_index_cnt, sizeof(__address), &madt_cmp);
 
/* Parse MADT entries */
for (index = 0; index < madt_entries_index_cnt - 1; index++) {
h = madt_entries_index[index];
switch (h->type) {
case MADT_L_APIC:
madt_l_apic_entry((struct madt_l_apic *) h, index);
break;
case MADT_IO_APIC:
madt_io_apic_entry((struct madt_io_apic *) h, index);
break;
case MADT_INTR_SRC_OVRD:
madt_intr_src_ovrd_entry((struct madt_intr_src_ovrd *) h, index);
break;
case MADT_NMI_SRC:
case MADT_L_APIC_NMI:
case MADT_L_APIC_ADDR_OVRD:
case MADT_IO_SAPIC:
case MADT_L_SAPIC:
case MADT_PLATFORM_INTR_SRC:
printf("MADT: skipping %s entry (type=%d)\n", entry[h->type], h->type);
break;
 
default:
if (h->type >= MADT_RESERVED_SKIP_BEGIN && h->type <= MADT_RESERVED_SKIP_END) {
printf("MADT: skipping reserved entry (type=%d)\n", h->type);
}
if (h->type >= MADT_RESERVED_OEM_BEGIN) {
printf("MADT: skipping OEM entry (type=%d)\n", h->type);
}
break;
}
}
 
if (cpu_count)
config.cpu_count = cpu_count;
}
 
void madt_l_apic_entry(struct madt_l_apic *la, __u32 index)
{
if (!madt_l_apic_entry_cnt++) {
madt_l_apic_entry_index = index;
}
if (!(la->flags & 0x1)) {
/* Processor is unusable, skip it. */
return;
}
cpu_count++;
apic_id_mask |= 1<<la->apic_id;
}
 
void madt_io_apic_entry(struct madt_io_apic *ioa, __u32 index)
{
if (!madt_io_apic_entry_cnt++) {
/* remember index of the first io apic entry */
madt_io_apic_entry_index = index;
io_apic = (__u32 *) (__native) ioa->io_apic_address;
} else {
/* currently not supported */
return;
}
}
 
void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, __u32 index)
{
ASSERT(override->source < sizeof(isa_irq_map)/sizeof(int));
printf("MADT: ignoring %s entry: bus=%d, source=%d, global_int=%d, flags=%W\n",
entry[override->header.type], override->bus, override->source,
override->global_int, override->flags);
}
 
#endif /* CONFIG_SMP */
/kernel/trunk/genarch/src/acpi/acpi.c
0,0 → 1,175
/*
* Copyright (C) 2005 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/acpi/acpi.h>
#include <genarch/acpi/madt.h>
#include <arch/bios/bios.h>
#include <mm/as.h>
#include <mm/page.h>
#include <print.h>
 
#define RSDP_SIGNATURE "RSD PTR "
#define RSDP_REVISION_OFFS 15
 
struct acpi_rsdp *acpi_rsdp = NULL;
struct acpi_rsdt *acpi_rsdt = NULL;
struct acpi_xsdt *acpi_xsdt = NULL;
 
struct acpi_signature_map signature_map[] = {
{ (__u8 *)"APIC", (void *) &acpi_madt, "Multiple APIC Description Table" }
};
 
static int rsdp_check(__u8 *rsdp) {
struct acpi_rsdp *r = (struct acpi_rsdp *) rsdp;
__u8 sum = 0;
int i;
for (i=0; i<20; i++)
sum += rsdp[i];
if (sum)
return 0; /* bad checksum */
 
if (r->revision == 0)
return 1; /* ACPI 1.0 */
for (; i<r->length; i++)
sum += rsdp[i];
return !sum;
}
 
int acpi_sdt_check(__u8 *sdt)
{
struct acpi_sdt_header *h = (struct acpi_sdt_header *) sdt;
__u8 sum = 0;
int i;
 
for (i=0; i<h->length; i++)
sum += sdt[i];
return !sum;
}
 
static void map_sdt(struct acpi_sdt_header *sdt)
{
page_mapping_insert(AS_KERNEL, (__address) sdt, (__address) sdt, PAGE_NOT_CACHEABLE);
map_structure((__address) sdt, sdt->length);
}
 
static void configure_via_rsdt(void)
{
int i, j, cnt = (acpi_rsdt->header.length - sizeof(struct acpi_sdt_header))/sizeof(__u32);
for (i=0; i<cnt; i++) {
for (j=0; j<sizeof(signature_map)/sizeof(struct acpi_signature_map); j++) {
struct acpi_sdt_header *h = (struct acpi_sdt_header *) (__native) acpi_rsdt->entry[i];
map_sdt(h);
if (*((__u32 *) &h->signature[0])==*((__u32 *) &signature_map[j].signature[0])) {
if (!acpi_sdt_check((__u8 *) h))
goto next;
*signature_map[j].sdt_ptr = h;
printf("%P: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
}
}
next:
;
}
}
 
static void configure_via_xsdt(void)
{
int i, j, cnt = (acpi_xsdt->header.length - sizeof(struct acpi_sdt_header))/sizeof(__u64);
for (i=0; i<cnt; i++) {
for (j=0; j<sizeof(signature_map)/sizeof(struct acpi_signature_map); j++) {
struct acpi_sdt_header *h = (struct acpi_sdt_header *) ((__address) acpi_rsdt->entry[i]);
 
map_sdt(h);
if (*((__u32 *) &h->signature[0])==*((__u32 *) &signature_map[j].signature[0])) {
if (!acpi_sdt_check((__u8 *) h))
goto next;
*signature_map[j].sdt_ptr = h;
printf("%P: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
}
}
next:
;
}
 
}
 
void acpi_init(void)
{
__u8 *addr[2] = { NULL, (__u8 *) PA2KA(0xe0000) };
int i, j, length[2] = { 1024, 128*1024 };
__u64 *sig = (__u64 *) RSDP_SIGNATURE;
 
/*
* Find Root System Description Pointer
* 1. search first 1K of EBDA
* 2. search 128K starting at 0xe0000
*/
 
addr[0] = (__u8 *) PA2KA(ebda);
for (i = (ebda ? 0 : 1); i < 2; i++) {
for (j = 0; j < length[i]; j += 16) {
if (*((__u64 *) &addr[i][j]) == *sig && rsdp_check(&addr[i][j])) {
acpi_rsdp = (struct acpi_rsdp *) &addr[i][j];
goto rsdp_found;
}
}
}
 
return;
 
rsdp_found:
printf("%P: ACPI Root System Description Pointer\n", acpi_rsdp);
 
acpi_rsdt = (struct acpi_rsdt *) (__native) acpi_rsdp->rsdt_address;
if (acpi_rsdp->revision) acpi_xsdt = (struct acpi_xsdt *) ((__address) acpi_rsdp->xsdt_address);
 
if (acpi_rsdt) map_sdt((struct acpi_sdt_header *) acpi_rsdt);
if (acpi_xsdt) map_sdt((struct acpi_sdt_header *) acpi_xsdt);
 
if (acpi_rsdt && !acpi_sdt_check((__u8 *) acpi_rsdt)) {
printf("RSDT: %s\n", "bad checksum");
return;
}
if (acpi_xsdt && !acpi_sdt_check((__u8 *) acpi_xsdt)) {
printf("XSDT: %s\n", "bad checksum");
return;
}
 
if (acpi_xsdt) configure_via_xsdt();
else if (acpi_rsdt) configure_via_rsdt();
 
}
 
/kernel/trunk/genarch/src/ofw/ofw.c
0,0 → 1,123
/*
* Copyright (C) 2005 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <genarch/ofw/ofw.h>
#include <arch/asm.h>
#include <stdarg.h>
#include <cpu.h>
#include <arch/types.h>
 
ofw_entry ofw;
 
phandle ofw_chosen;
ihandle ofw_stdin;
ihandle ofw_stdout;
 
void ofw_init(void)
{
ofw_chosen = ofw_find_device("/chosen");
if (ofw_chosen == -1)
ofw_done();
if (ofw_get_property(ofw_chosen, "stdin", &ofw_stdin, sizeof(ofw_stdin)) <= 0)
ofw_stdin = 0;
if (ofw_get_property(ofw_chosen, "stdout", &ofw_stdout, sizeof(ofw_stdout)) <= 0)
ofw_stdout = 0;
}
 
void ofw_done(void)
{
(void) ofw_call("exit", 0, 0);
cpu_halt();
}
 
__native ofw_call(const char *service, const int nargs, const int nret, ...)
{
va_list list;
ofw_args_t args;
int i;
args.service = service;
args.nargs = nargs;
args.nret = nret;
va_start(list, nret);
for (i = 0; i < nargs; i++)
args.args[i] = va_arg(list, ofw_arg_t);
va_end(list);
for (i = 0; i < nret; i++)
args.args[i + nargs] = 0;
ofw(&args);
return args.args[nargs];
}
 
void ofw_putchar(const char ch)
{
if (ofw_stdout == 0)
return;
(void) ofw_call("write", 3, 1, ofw_stdout, &ch, 1);
}
 
/** Read character from OFW's input.
*
* This call is non-blocking.
*
* @return 0 if no character was read, character read otherwise.
*/
char ofw_getchar(void)
{
char ch;
 
if (ofw_stdin == 0)
return 0;
if (ofw_call("read", 3, 1, ofw_stdin, &ch, 1) == 1)
return ch;
else
return 0;
}
 
phandle ofw_find_device(const char *name)
{
return (phandle) ofw_call("finddevice", 1, 1, name);
}
 
int ofw_get_property(const phandle device, const char *name, void *buf, const int buflen)
{
return (int) ofw_call("getprop", 4, 1, device, name, buf, buflen);
}
 
void *ofw_claim(const void *addr, const int size, const int align)
{
return (void *) ofw_call("claim", 3, 1, addr, size, align);
}