/branches/network/kernel/generic/src/mm/as.c |
---|
82,7 → 82,6 |
#include <arch/mm/cache.h> |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
#ifndef __OBJC__ |
/** |
* Each architecture decides what functions will be used to carry out |
* address space operations such as creating or locking page tables. |
93,7 → 92,6 |
* Slab for as_t objects. |
*/ |
static slab_cache_t *as_slab; |
#endif |
/** |
* This lock serializes access to the ASID subsystem. |
113,13 → 111,11 |
/** Kernel address space. */ |
as_t *AS_KERNEL = NULL; |
static int area_flags_to_page_flags(int aflags); |
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); |
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
as_area_t *avoid_area); |
static void sh_info_remove_reference(share_info_t *sh_info); |
static int area_flags_to_page_flags(int); |
static as_area_t *find_area_and_lock(as_t *, uintptr_t); |
static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); |
static void sh_info_remove_reference(share_info_t *); |
#ifndef __OBJC__ |
static int as_constructor(void *obj, int flags) |
{ |
as_t *as = (as_t *) obj; |
126,7 → 122,7 |
int rc; |
link_initialize(&as->inactive_as_with_asid_link); |
mutex_initialize(&as->lock); |
mutex_initialize(&as->lock, MUTEX_PASSIVE); |
rc = as_constructor_arch(as, flags); |
139,7 → 135,6 |
return as_destructor_arch(as); |
} |
#endif |
/** Initialize address space subsystem. */ |
void as_init(void) |
146,10 → 141,8 |
{ |
as_arch_init(); |
#ifndef __OBJC__ |
as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, |
as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); |
#endif |
AS_KERNEL = as_create(FLAG_AS_KERNEL); |
if (!AS_KERNEL) |
159,20 → 152,14 |
/** Create address space. |
* |
* @param flags Flags that influence way in wich the address space is created. |
* @param flags Flags that influence the way in wich the address space |
* is created. |
*/ |
as_t *as_create(int flags) |
{ |
as_t *as; |
#ifdef __OBJC__ |
as = [as_t new]; |
link_initialize(&as->inactive_as_with_asid_link); |
mutex_initialize(&as->lock); |
(void) as_constructor_arch(as, flags); |
#else |
as = (as_t *) slab_alloc(as_slab, 0); |
#endif |
(void) as_create_arch(as, 0); |
btree_create(&as->as_area_btree); |
199,6 → 186,8 |
* zero), the address space can be destroyed. |
* |
* We know that we don't hold any spinlock. |
* |
* @param as Address space to be destroyed. |
*/ |
void as_destroy(as_t *as) |
{ |
263,11 → 252,7 |
interrupts_restore(ipl); |
#ifdef __OBJC__ |
[as free]; |
#else |
slab_free(as_slab, as); |
#endif |
} |
/** Create address space area of common attributes. |
274,19 → 259,19 |
* |
* The created address space area is added to the target address space. |
* |
* @param as Target address space. |
* @param flags Flags of the area memory. |
* @param size Size of area. |
* @param base Base address of area. |
* @param attrs Attributes of the area. |
* @param backend Address space area backend. NULL if no backend is used. |
* @param backend_data NULL or a pointer to an array holding two void *. |
* @param as Target address space. |
* @param flags Flags of the area memory. |
* @param size Size of area. |
* @param base Base address of area. |
* @param attrs Attributes of the area. |
* @param backend Address space area backend. NULL if no backend is used. |
* @param backend_data NULL or a pointer to an array holding two void *. |
* |
* @return Address space area on success or NULL on failure. |
* @return Address space area on success or NULL on failure. |
*/ |
as_area_t * |
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, |
mem_backend_t *backend, mem_backend_data_t *backend_data) |
mem_backend_t *backend, mem_backend_data_t *backend_data) |
{ |
ipl_t ipl; |
as_area_t *a; |
312,7 → 297,7 |
a = (as_area_t *) malloc(sizeof(as_area_t), 0); |
mutex_initialize(&a->lock); |
mutex_initialize(&a->lock, MUTEX_PASSIVE); |
a->as = as; |
a->flags = flags; |
324,8 → 309,7 |
if (backend_data) |
a->backend_data = *backend_data; |
else |
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), |
0); |
memsetb(&a->backend_data, sizeof(a->backend_data), 0); |
btree_create(&a->used_space); |
339,13 → 323,14 |
/** Find address space area and change it. |
* |
* @param as Address space. |
* @param address Virtual address belonging to the area to be changed. Must be |
* page-aligned. |
* @param size New size of the virtual memory block starting at address. |
* @param flags Flags influencing the remap operation. Currently unused. |
* @param as Address space. |
* @param address Virtual address belonging to the area to be changed. |
* Must be page-aligned. |
* @param size New size of the virtual memory block starting at |
* address. |
* @param flags Flags influencing the remap operation. Currently unused. |
* |
* @return Zero on success or a value from @ref errno.h otherwise. |
* @return Zero on success or a value from @ref errno.h otherwise. |
*/ |
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) |
{ |
431,7 → 416,7 |
uintptr_t b = node->key[node->keys - 1]; |
count_t c = |
(count_t) node->value[node->keys - 1]; |
int i = 0; |
unsigned int i = 0; |
if (overlaps(b, c * PAGE_SIZE, area->base, |
pages * PAGE_SIZE)) { |
526,10 → 511,10 |
/** Destroy address space area. |
* |
* @param as Address space. |
* @param address Address withing the area to be deleted. |
* @param as Address space. |
* @param address Address within the area to be deleted. |
* |
* @return Zero on success or a value from @ref errno.h on failure. |
* @return Zero on success or a value from @ref errno.h on failure. |
*/ |
int as_area_destroy(as_t *as, uintptr_t address) |
{ |
561,7 → 546,7 |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
626,18 → 611,19 |
* sh_info of the source area. The process of duplicating the |
* mapping is done through the backend share function. |
* |
* @param src_as Pointer to source address space. |
* @param src_base Base address of the source address space area. |
* @param acc_size Expected size of the source area. |
* @param dst_as Pointer to destination address space. |
* @param dst_base Target base address. |
* @param src_as Pointer to source address space. |
* @param src_base Base address of the source address space area. |
* @param acc_size Expected size of the source area. |
* @param dst_as Pointer to destination address space. |
* @param dst_base Target base address. |
* @param dst_flags_mask Destination address space area flags mask. |
* |
* @return Zero on success or ENOENT if there is no such task or if there is no |
* such address space area, EPERM if there was a problem in accepting the area |
* or ENOMEM if there was a problem in allocating destination address space |
* area. ENOTSUP is returned if the address space area backend does not support |
* sharing. |
* @return Zero on success or ENOENT if there is no such task or if |
* there is no such address space area, EPERM if there was |
* a problem in accepting the area or ENOMEM if there was a |
* problem in allocating destination address space area. |
* ENOTSUP is returned if the address space area backend |
* does not support sharing. |
*/ |
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
698,7 → 684,7 |
sh_info = src_area->sh_info; |
if (!sh_info) { |
sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); |
mutex_initialize(&sh_info->lock); |
mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); |
sh_info->refcount = 2; |
btree_create(&sh_info->pagemap); |
src_area->sh_info = sh_info; |
756,10 → 742,11 |
* |
* The address space area must be locked prior to this call. |
* |
* @param area Address space area. |
* @param access Access mode. |
* @param area Address space area. |
* @param access Access mode. |
* |
* @return False if access violates area's permissions, true otherwise. |
* @return False if access violates area's permissions, true |
* otherwise. |
*/ |
bool as_area_check_access(as_area_t *area, pf_access_t access) |
{ |
775,21 → 762,181 |
return true; |
} |
/** Change adress space area flags. |
* |
* The idea is to have the same data, but with a different access mode. |
* This is needed e.g. for writing code into memory and then executing it. |
* In order for this to work properly, this may copy the data |
* into private anonymous memory (unless it's already there). |
* |
* @param as Address space. |
* @param flags Flags of the area memory. |
* @param address Address withing the area to be changed. |
* |
* @return Zero on success or a value from @ref errno.h on failure. |
*/ |
int as_area_change_flags(as_t *as, int flags, uintptr_t address) |
{ |
as_area_t *area; |
uintptr_t base; |
link_t *cur; |
ipl_t ipl; |
int page_flags; |
uintptr_t *old_frame; |
index_t frame_idx; |
count_t used_pages; |
/* Flags for the new memory mapping */ |
page_flags = area_flags_to_page_flags(flags); |
ipl = interrupts_disable(); |
mutex_lock(&as->lock); |
area = find_area_and_lock(as, address); |
if (!area) { |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
if (area->sh_info || area->backend != &anon_backend) { |
/* Copying shared areas not supported yet */ |
/* Copying non-anonymous memory not supported yet */ |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
base = area->base; |
/* |
* Compute total number of used pages in the used_space B+tree |
*/ |
used_pages = 0; |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
used_pages += (count_t) node->value[i]; |
} |
} |
/* An array for storing frame numbers */ |
old_frame = malloc(used_pages * sizeof(uintptr_t), 0); |
/* |
* Start TLB shootdown sequence. |
*/ |
tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); |
/* |
* Remove used pages from page tables and remember their frame |
* numbers. |
*/ |
frame_idx = 0; |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
uintptr_t b = node->key[i]; |
count_t j; |
pte_t *pte; |
for (j = 0; j < (count_t) node->value[i]; j++) { |
page_table_lock(as, false); |
pte = page_mapping_find(as, b + j * PAGE_SIZE); |
ASSERT(pte && PTE_VALID(pte) && |
PTE_PRESENT(pte)); |
old_frame[frame_idx++] = PTE_GET_FRAME(pte); |
/* Remove old mapping */ |
page_mapping_remove(as, b + j * PAGE_SIZE); |
page_table_unlock(as, false); |
} |
} |
} |
/* |
* Finish TLB shootdown sequence. |
*/ |
tlb_invalidate_pages(as->asid, area->base, area->pages); |
/* |
* Invalidate potential software translation caches (e.g. TSB on |
* sparc64). |
*/ |
as_invalidate_translation_cache(as, area->base, area->pages); |
tlb_shootdown_finalize(); |
/* |
* Set the new flags. |
*/ |
area->flags = flags; |
/* |
* Map pages back in with new flags. This step is kept separate |
* so that the memory area could not be accesed with both the old and |
* the new flags at once. |
*/ |
frame_idx = 0; |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
uintptr_t b = node->key[i]; |
count_t j; |
for (j = 0; j < (count_t) node->value[i]; j++) { |
page_table_lock(as, false); |
/* Insert the new mapping */ |
page_mapping_insert(as, b + j * PAGE_SIZE, |
old_frame[frame_idx++], page_flags); |
page_table_unlock(as, false); |
} |
} |
} |
free(old_frame); |
mutex_unlock(&area->lock); |
mutex_unlock(&as->lock); |
interrupts_restore(ipl); |
return 0; |
} |
/** Handle page fault within the current address space. |
* |
* This is the high-level page fault handler. It decides |
* whether the page fault can be resolved by any backend |
* and if so, it invokes the backend to resolve the page |
* fault. |
* This is the high-level page fault handler. It decides whether the page fault |
* can be resolved by any backend and if so, it invokes the backend to resolve |
* the page fault. |
* |
* Interrupts are assumed disabled. |
* |
* @param page Faulting page. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* @param istate Pointer to interrupted state. |
* @param page Faulting page. |
* @param access Access mode that caused the page fault (i.e. |
* read/write/exec). |
* @param istate Pointer to the interrupted state. |
* |
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the |
* fault was caused by copy_to_uspace() or copy_from_uspace(). |
* @return AS_PF_FAULT on page fault, AS_PF_OK on success or |
* AS_PF_DEFER if the fault was caused by copy_to_uspace() |
* or copy_from_uspace(). |
*/ |
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) |
{ |
835,9 → 982,8 |
page_table_lock(AS, false); |
/* |
* To avoid race condition between two page faults |
* on the same address, we need to make sure |
* the mapping has not been already inserted. |
* To avoid race condition between two page faults on the same address, |
* we need to make sure the mapping has not been already inserted. |
*/ |
if ((pte = page_mapping_find(AS, page))) { |
if (PTE_PRESENT(pte)) { |
891,8 → 1037,8 |
* |
* When this function is enetered, no spinlocks may be held. |
* |
* @param old Old address space or NULL. |
* @param new New address space. |
* @param old Old address space or NULL. |
* @param new New address space. |
*/ |
void as_switch(as_t *old_as, as_t *new_as) |
{ |
963,9 → 1109,9 |
/** Convert address space area flags to page flags. |
* |
* @param aflags Flags of some address space area. |
* @param aflags Flags of some address space area. |
* |
* @return Flags to be passed to page_mapping_insert(). |
* @return Flags to be passed to page_mapping_insert(). |
*/ |
int area_flags_to_page_flags(int aflags) |
{ |
993,9 → 1139,9 |
* The address space area must be locked. |
* Interrupts must be disabled. |
* |
* @param a Address space area. |
* @param a Address space area. |
* |
* @return Flags to be used in page_mapping_insert(). |
* @return Flags to be used in page_mapping_insert(). |
*/ |
int as_area_get_flags(as_area_t *a) |
{ |
1004,23 → 1150,20 |
/** Create page table. |
* |
* Depending on architecture, create either address space |
* private or global page table. |
* Depending on architecture, create either address space private or global page |
* table. |
* |
* @param flags Flags saying whether the page table is for kernel address space. |
* @param flags Flags saying whether the page table is for the kernel |
* address space. |
* |
* @return First entry of the page table. |
* @return First entry of the page table. |
*/ |
pte_t *page_table_create(int flags) |
{ |
#ifdef __OBJC__ |
return [as_t page_table_create: flags]; |
#else |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_create); |
return as_operations->page_table_create(flags); |
#endif |
} |
/** Destroy page table. |
1027,18 → 1170,14 |
* |
* Destroy page table in architecture specific way. |
* |
* @param page_table Physical address of PTL0. |
* @param page_table Physical address of PTL0. |
*/ |
void page_table_destroy(pte_t *page_table) |
{ |
#ifdef __OBJC__ |
return [as_t page_table_destroy: page_table]; |
#else |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_destroy); |
as_operations->page_table_destroy(page_table); |
#endif |
} |
/** Lock page table. |
1050,36 → 1189,28 |
* prior to this call. Address space can be locked prior to this |
* call in which case the lock argument is false. |
* |
* @param as Address space. |
* @param lock If false, do not attempt to lock as->lock. |
* @param as Address space. |
* @param lock If false, do not attempt to lock as->lock. |
*/ |
void page_table_lock(as_t *as, bool lock) |
{ |
#ifdef __OBJC__ |
[as page_table_lock: lock]; |
#else |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_lock); |
as_operations->page_table_lock(as, lock); |
#endif |
} |
/** Unlock page table. |
* |
* @param as Address space. |
* @param unlock If false, do not attempt to unlock as->lock. |
* @param as Address space. |
* @param unlock If false, do not attempt to unlock as->lock. |
*/ |
void page_table_unlock(as_t *as, bool unlock) |
{ |
#ifdef __OBJC__ |
[as page_table_unlock: unlock]; |
#else |
ASSERT(as_operations); |
ASSERT(as_operations->page_table_unlock); |
as_operations->page_table_unlock(as, unlock); |
#endif |
} |
1087,17 → 1218,17 |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Virtual address. |
* @param as Address space. |
* @param va Virtual address. |
* |
* @return Locked address space area containing va on success or NULL on |
* failure. |
* @return Locked address space area containing va on success or |
* NULL on failure. |
*/ |
as_area_t *find_area_and_lock(as_t *as, uintptr_t va) |
{ |
as_area_t *a; |
btree_node_t *leaf, *lnode; |
int i; |
unsigned int i; |
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); |
if (a) { |
1143,19 → 1274,19 |
* |
* The address space must be locked and interrupts must be disabled. |
* |
* @param as Address space. |
* @param va Starting virtual address of the area being tested. |
* @param size Size of the area being tested. |
* @param avoid_area Do not touch this area. |
* @param as Address space. |
* @param va Starting virtual address of the area being tested. |
* @param size Size of the area being tested. |
* @param avoid_area Do not touch this area. |
* |
* @return True if there is no conflict, false otherwise. |
* @return True if there is no conflict, false otherwise. |
*/ |
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, |
as_area_t *avoid_area) |
bool |
check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) |
{ |
as_area_t *a; |
btree_node_t *leaf, *node; |
int i; |
unsigned int i; |
/* |
* We don't want any area to have conflicts with NULL page. |
1240,7 → 1371,7 |
ipl = interrupts_disable(); |
src_area = find_area_and_lock(AS, base); |
if (src_area){ |
if (src_area) { |
size = src_area->pages * PAGE_SIZE; |
mutex_unlock(&src_area->lock); |
} else { |
1254,17 → 1385,17 |
* |
* The address space area must be already locked. |
* |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* |
* @return 0 on failure and 1 on success. |
* @return Zero on failure and non-zero on success. |
*/ |
int used_space_insert(as_area_t *a, uintptr_t page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
int i; |
unsigned int i; |
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
ASSERT(count); |
1528,8 → 1659,8 |
} |
} |
panic("Inconsistency detected while adding %d pages of used space at " |
"%p.\n", count, page); |
panic("Inconsistency detected while adding %" PRIc " pages of used " |
"space at %p.\n", count, page); |
} |
/** Mark portion of address space area as unused. |
1536,17 → 1667,17 |
* |
* The address space area must be already locked. |
* |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* @param a Address space area. |
* @param page First page to be marked. |
* @param count Number of page to be marked. |
* |
* @return 0 on failure and 1 on success. |
* @return Zero on failure and non-zero on success. |
*/ |
int used_space_remove(as_area_t *a, uintptr_t page, count_t count) |
{ |
btree_node_t *leaf, *node; |
count_t pages; |
int i; |
unsigned int i; |
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); |
ASSERT(count); |
1707,8 → 1838,8 |
} |
error: |
panic("Inconsistency detected while removing %d pages of used space " |
"from %p.\n", count, page); |
panic("Inconsistency detected while removing %" PRIc " pages of used " |
"space from %p.\n", count, page); |
} |
/** Remove reference to address space area share info. |
1715,7 → 1846,7 |
* |
* If the reference count drops to 0, the sh_info is deallocated. |
* |
* @param sh_info Pointer to address space area share info. |
* @param sh_info Pointer to address space area share info. |
*/ |
void sh_info_remove_reference(share_info_t *sh_info) |
{ |
1734,7 → 1865,7 |
for (cur = sh_info->pagemap.leaf_head.next; |
cur != &sh_info->pagemap.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) |
1770,6 → 1901,12 |
return (unative_t) as_area_resize(AS, address, size, 0); |
} |
/** Wrapper for as_area_change_flags(). */ |
unative_t sys_as_area_change_flags(uintptr_t address, int flags) |
{ |
return (unative_t) as_area_change_flags(AS, flags, address); |
} |
/** Wrapper for as_area_destroy(). */ |
unative_t sys_as_area_destroy(uintptr_t address) |
{ |
1778,7 → 1915,7 |
/** Print out information about address space. |
* |
* @param as Address space. |
* @param as Address space. |
*/ |
void as_print(as_t *as) |
{ |
1795,14 → 1932,14 |
node = list_get_instance(cur, btree_node_t, leaf_link); |
int i; |
unsigned int i; |
for (i = 0; i < node->keys; i++) { |
as_area_t *area = node->value[i]; |
mutex_lock(&area->lock); |
printf("as_area: %p, base=%p, pages=%d (%p - %p)\n", |
area, area->base, area->pages, area->base, |
area->base + area->pages*PAGE_SIZE); |
printf("as_area: %p, base=%p, pages=%" PRIc |
" (%p - %p)\n", area, area->base, area->pages, |
area->base, area->base + FRAMES2SIZE(area->pages)); |
mutex_unlock(&area->lock); |
} |
} |
/branches/network/kernel/generic/src/mm/backend_anon.c |
---|
78,7 → 78,6 |
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
uintptr_t frame; |
bool dirty = false; |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
98,7 → 97,7 |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
if (!frame) { |
bool allocate = true; |
int i; |
unsigned int i; |
/* |
* Zero can be returned as a valid frame address. |
106,7 → 105,7 |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == |
ALIGN_DOWN(addr, PAGE_SIZE)) { |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base) { |
allocate = false; |
break; |
} |
113,8 → 112,7 |
} |
if (allocate) { |
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); |
/* |
* Insert the address of the newly allocated |
144,8 → 142,7 |
* the different causes |
*/ |
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); |
} |
/* |
193,13 → 190,13 |
for (cur = area->used_space.leaf_head.next; |
cur != &area->used_space.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
uintptr_t base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |
unsigned int j; |
for (j = 0; j < count; j++) { |
pte_t *pte; |
/branches/network/kernel/generic/src/mm/backend_elf.c |
---|
48,6 → 48,7 |
#include <memstr.h> |
#include <macros.h> |
#include <arch.h> |
#include <arch/barrier.h> |
#ifdef CONFIG_VIRT_IDX_DCACHE |
#include <arch/mm/cache.h> |
67,12 → 68,13 |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. read/write/exec). |
* @param area Pointer to the address space area. |
* @param addr Faulting virtual address. |
* @param access Access mode that caused the fault (i.e. |
* read/write/exec). |
* |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. |
* serviced). |
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK |
* on success (i.e. serviced). |
*/ |
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) |
{ |
79,7 → 81,7 |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
btree_node_t *leaf; |
uintptr_t base, frame; |
uintptr_t base, frame, page, start_anon; |
index_t i; |
bool dirty = false; |
86,12 → 88,18 |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
ASSERT((addr >= entry->p_vaddr) && |
ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && |
(addr < entry->p_vaddr + entry->p_memsz)); |
i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; |
base = (uintptr_t) |
(((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); |
/* Virtual address of faulting page*/ |
page = ALIGN_DOWN(addr, PAGE_SIZE); |
/* Virtual address of the end of initialized part of segment */ |
start_anon = entry->p_vaddr + entry->p_filesz; |
if (area->sh_info) { |
bool found = false; |
98,12 → 106,12 |
/* |
* The address space area is shared. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (uintptr_t) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
page - area->base, &leaf); |
if (!frame) { |
int i; |
unsigned int i; |
/* |
* Workaround for valid NULL address. |
110,8 → 118,7 |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == |
ALIGN_DOWN(addr, PAGE_SIZE)) { |
if (leaf->key[i] == page - area->base) { |
found = true; |
break; |
} |
121,21 → 128,18 |
frame_reference_add(ADDR2PFN(frame)); |
page_mapping_insert(AS, addr, frame, |
as_area_get_flags(area)); |
if (!used_space_insert(area, |
ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
if (!used_space_insert(area, page, 1)) |
panic("Could not insert used space.\n"); |
mutex_unlock(&area->sh_info->lock); |
return AS_PF_OK; |
} |
} |
/* |
* The area is either not shared or the pagemap does not contain the |
* mapping. |
*/ |
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < |
entry->p_vaddr + entry->p_filesz) { |
if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { |
/* |
* Initialized portion of the segment. The memory is backed |
* directly by the content of the ELF image. Pages are |
148,20 → 152,15 |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memcpy((void *) PA2KA(frame), |
(void *) (base + i * FRAME_SIZE), FRAME_SIZE); |
if (entry->p_flags & PF_X) { |
smc_coherence_block((void *) PA2KA(frame), |
FRAME_SIZE); |
} |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
frame = KA2PA(base + i*FRAME_SIZE); |
frame = KA2PA(base + i * FRAME_SIZE); |
} |
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
} else if (page >= start_anon) { |
/* |
* This is the uninitialized portion of the segment. |
* It is not physically present in the ELF image. |
169,44 → 168,52 |
* and cleared. |
*/ |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
size_t size; |
size_t pad_lo, pad_hi; |
/* |
* The mixed case. |
* The lower part is backed by the ELF image and |
* the upper part is anonymous memory. |
* |
* The middle part is backed by the ELF image and |
* the lower and upper parts are anonymous memory. |
* (The segment can be and often is shorter than 1 page). |
*/ |
size = entry->p_filesz - (i<<PAGE_WIDTH); |
if (page < entry->p_vaddr) |
pad_lo = entry->p_vaddr - page; |
else |
pad_lo = 0; |
if (start_anon < page + PAGE_SIZE) |
pad_hi = page + PAGE_SIZE - start_anon; |
else |
pad_hi = 0; |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE), |
size); |
memcpy((void *) (PA2KA(frame) + pad_lo), |
(void *) (base + i * FRAME_SIZE + pad_lo), |
FRAME_SIZE - pad_lo - pad_hi); |
if (entry->p_flags & PF_X) { |
smc_coherence_block((void *) (PA2KA(frame) + pad_lo), |
FRAME_SIZE - pad_lo - pad_hi); |
} |
memsetb((void *) PA2KA(frame), pad_lo, 0); |
memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, |
0); |
dirty = true; |
} |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
if (dirty && area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, page - area->base, |
(void *) frame, leaf); |
} |
} |
if (area->sh_info) |
mutex_unlock(&area->sh_info->lock); |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
if (!used_space_insert(area, page, 1)) |
panic("Could not insert used space.\n"); |
return AS_PF_OK; |
216,9 → 223,10 |
* |
* The address space area and page tables must be already locked. |
* |
* @param area Pointer to the address space area. |
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE. |
* @param frame Frame to be released. |
* @param area Pointer to the address space area. |
* @param page Page that is mapped to frame. Must be aligned to |
* PAGE_SIZE. |
* @param frame Frame to be released. |
* |
*/ |
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) |
225,17 → 233,17 |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
uintptr_t base; |
uintptr_t base, start_anon; |
index_t i; |
ASSERT((page >= entry->p_vaddr) && |
ASSERT((page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && |
(page < entry->p_vaddr + entry->p_memsz)); |
i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (page + PAGE_SIZE < |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
i = (page - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + |
ALIGN_DOWN(entry->p_offset, FRAME_SIZE)); |
start_anon = entry->p_vaddr + entry->p_filesz; |
if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { |
if (entry->p_flags & PF_W) { |
/* |
* Free the frame with the copy of writable segment |
261,7 → 269,7 |
* |
* The address space and address space area must be locked prior to the call. |
* |
* @param area Address space area. |
* @param area Address space area. |
*/ |
void elf_share(as_area_t *area) |
{ |
290,7 → 298,7 |
mutex_lock(&area->sh_info->lock); |
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; |
cur = cur->next) { |
int i; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
297,7 → 305,7 |
for (i = 0; i < node->keys; i++) { |
uintptr_t base = node->key[i]; |
count_t count = (count_t) node->value[i]; |
int j; |
unsigned int j; |
/* |
* Skip read-only areas of used space that are backed |
304,7 → 312,8 |
* by the ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + count * PAGE_SIZE <= start_anon) |
if (base >= entry->p_vaddr && |
base + count * PAGE_SIZE <= start_anon) |
continue; |
for (j = 0; j < count; j++) { |
315,7 → 324,8 |
* ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + (j + 1) * PAGE_SIZE <= |
if (base >= entry->p_vaddr && |
base + (j + 1) * PAGE_SIZE <= |
start_anon) |
continue; |
/branches/network/kernel/generic/src/mm/frame.c |
---|
58,6 → 58,8 |
#include <debug.h> |
#include <adt/list.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/condvar.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <print.h> |
103,6 → 105,14 |
static zones_t zones; |
/* |
* Synchronization primitives used to sleep when there is no memory |
* available. |
*/ |
mutex_t mem_avail_mtx; |
condvar_t mem_avail_cv; |
unsigned long mem_avail_frames = 0; /**< Number of available frames. */ |
unsigned long mem_avail_gen = 0; /**< Generation counter. */ |
/********************/ |
/* Helper functions */ |
120,7 → 130,7 |
static inline int frame_index_valid(zone_t *zone, index_t index) |
{ |
return (index >= 0) && (index < zone->count); |
return (index < zone->count); |
} |
/** Compute pfn_t from frame_t pointer & zone pointer */ |
129,11 → 139,9 |
return (frame - zone->frames); |
} |
/** Initialize frame structure |
/** Initialize frame structure. |
* |
* Initialize frame structure. |
* |
* @param frame Frame structure to be initialized. |
* @param frame Frame structure to be initialized. |
*/ |
static void frame_initialize(frame_t *frame) |
{ |
145,11 → 153,10 |
/* Zoneinfo functions */ |
/**********************/ |
/** |
* Insert-sort zone into zones list |
/** Insert-sort zone into zones list. |
* |
* @param newzone New zone to be inserted into zone list |
* @return zone number on success, -1 on error |
* @param newzone New zone to be inserted into zone list. |
* @return Zone number on success, -1 on error. |
*/ |
static int zones_add_zone(zone_t *newzone) |
{ |
171,7 → 178,8 |
for (i = 0; i < zones.count; i++) { |
/* Check for overflow */ |
z = zones.info[i]; |
if (overlaps(newzone->base, newzone->count, z->base, z->count)) { |
if (overlaps(newzone->base, newzone->count, z->base, |
z->count)) { |
printf("Zones overlap!\n"); |
return -1; |
} |
192,17 → 200,16 |
return i; |
} |
/** |
* Try to find a zone where can we find the frame |
/** Try to find a zone where can we find the frame. |
* |
* Assume interrupts are disabled. |
* |
* @param frame Frame number contained in zone |
* @param pzone If not null, it is used as zone hint. Zone index |
* is filled into the variable on success. |
* @return Pointer to locked zone containing frame |
* @param frame Frame number contained in zone. |
* @param pzone If not null, it is used as zone hint. Zone index is |
* filled into the variable on success. |
* @return Pointer to locked zone containing frame. |
*/ |
static zone_t * find_zone_and_lock(pfn_t frame, unsigned int *pzone) |
static zone_t *find_zone_and_lock(pfn_t frame, unsigned int *pzone) |
{ |
unsigned int i; |
unsigned int hint = pzone ? *pzone : 0; |
210,7 → 217,7 |
spinlock_lock(&zones.lock); |
if (hint >= zones.count || hint < 0) |
if (hint >= zones.count) |
hint = 0; |
i = hint; |
229,7 → 236,7 |
i++; |
if (i >= zones.count) |
i = 0; |
} while(i != hint); |
} while (i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
245,16 → 252,21 |
* |
* Assume interrupts are disabled. |
* |
* @param order Size (2^order) of free space we are trying to find |
* @param pzone Pointer to preferred zone or NULL, on return contains zone |
* number |
* @param order Size (2^order) of free space we are trying to find. |
* @param flags Required flags of the target zone. |
* @param pzone Pointer to preferred zone or NULL, on return contains |
* zone number. |
*/ |
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone) |
static zone_t * |
find_free_zone_and_lock(uint8_t order, int flags, unsigned int *pzone) |
{ |
unsigned int i; |
zone_t *z; |
unsigned int hint = pzone ? *pzone : 0; |
/* Mask off flags that are not applicable. */ |
flags &= FRAME_LOW_4_GiB; |
spinlock_lock(&zones.lock); |
if (hint >= zones.count) |
hint = 0; |
264,17 → 276,24 |
spinlock_lock(&z->lock); |
/* Check if the zone has 2^order frames area available */ |
if (zone_can_alloc(z, order)) { |
spinlock_unlock(&zones.lock); |
if (pzone) |
*pzone = i; |
return z; |
/* |
* Check whether the zone meets the search criteria. |
*/ |
if ((z->flags & flags) == flags) { |
/* |
* Check if the zone has 2^order frames area available. |
*/ |
if (zone_can_alloc(z, order)) { |
spinlock_unlock(&zones.lock); |
if (pzone) |
*pzone = i; |
return z; |
} |
} |
spinlock_unlock(&z->lock); |
if (++i >= zones.count) |
i = 0; |
} while(i != hint); |
} while (i != hint); |
spinlock_unlock(&zones.lock); |
return NULL; |
} |
283,12 → 302,13 |
/* Buddy system functions */ |
/**************************/ |
/** Buddy system find_block implementation |
/** Buddy system find_block implementation. |
* |
* Find block that is parent of current list. |
* That means go to lower addresses, until such block is found |
* |
* @param order - Order of parent must be different then this parameter!! |
* @param order Order of parent must be different then this |
* parameter!! |
*/ |
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
uint8_t order) |
309,24 → 329,12 |
return NULL; |
} |
static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
{ |
frame_t *frame; |
zone_t *zone; |
index_t index; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
index = frame_index(zone, frame); |
printf("%zd", index); |
} |
/** Buddy system find_buddy implementation |
/** Buddy system find_buddy implementation. |
* |
* @param b Buddy system. |
* @param block Block for which buddy should be found |
* @param b Buddy system. |
* @param block Block for which buddy should be found. |
* |
* @return Buddy for given block if found |
* @return Buddy for given block if found. |
*/ |
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block) |
{ |
345,9 → 353,11 |
ASSERT(is_left ^ is_right); |
if (is_left) { |
index = (frame_index(zone, frame)) + (1 << frame->buddy_order); |
index = (frame_index(zone, frame)) + |
(1 << frame->buddy_order); |
} else { /* if (is_right) */ |
index = (frame_index(zone, frame)) - (1 << frame->buddy_order); |
index = (frame_index(zone, frame)) - |
(1 << frame->buddy_order); |
} |
if (frame_index_valid(zone, index)) { |
360,14 → 370,15 |
return NULL; |
} |
/** Buddy system bisect implementation |
/** Buddy system bisect implementation. |
* |
* @param b Buddy system. |
* @param block Block to bisect |
* @param b Buddy system. |
* @param block Block to bisect. |
* |
* @return right block |
* @return Right block. |
*/ |
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) { |
static link_t *zone_buddy_bisect(buddy_system_t *b, link_t *block) |
{ |
frame_t *frame_l, *frame_r; |
frame_l = list_get_instance(block, frame_t, buddy_link); |
376,13 → 387,14 |
return &frame_r->buddy_link; |
} |
/** Buddy system coalesce implementation |
/** Buddy system coalesce implementation. |
* |
* @param b Buddy system. |
* @param block_1 First block |
* @param block_2 First block's buddy |
* @param b Buddy system. |
* @param block_1 First block. |
* @param block_2 First block's buddy. |
* |
* @return Coalesced block (actually block that represents lower address) |
* @return Coalesced block (actually block that represents lower |
* address). |
*/ |
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1, |
link_t *block_2) |
395,39 → 407,41 |
return frame1 < frame2 ? block_1 : block_2; |
} |
/** Buddy system set_order implementation |
/** Buddy system set_order implementation. |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* @param order Order to set |
* @param b Buddy system. |
* @param block Buddy system block. |
* @param order Order to set. |
*/ |
static void zone_buddy_set_order(buddy_system_t *b, link_t *block, |
uint8_t order) { |
uint8_t order) |
{ |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
} |
/** Buddy system get_order implementation |
/** Buddy system get_order implementation. |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* @param b Buddy system. |
* @param block Buddy system block. |
* |
* @return Order of block |
* @return Order of block. |
*/ |
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) { |
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) |
{ |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
} |
/** Buddy system mark_busy implementation |
/** Buddy system mark_busy implementation. |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
* @param b Buddy system. |
* @param block Buddy system block. |
*/ |
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) |
{ |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
434,13 → 448,13 |
frame->refcount = 1; |
} |
/** Buddy system mark_available implementation |
/** Buddy system mark_available implementation. |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
* @param b Buddy system. |
* @param block Buddy system block. |
*/ |
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) { |
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) |
{ |
frame_t *frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 0; |
454,8 → 468,7 |
.get_order = zone_buddy_get_order, |
.mark_busy = zone_buddy_mark_busy, |
.mark_available = zone_buddy_mark_available, |
.find_block = zone_buddy_find_block, |
.print_id = zone_buddy_print_id |
.find_block = zone_buddy_find_block |
}; |
/******************/ |
462,15 → 475,15 |
/* Zone functions */ |
/******************/ |
/** Allocate frame in particular zone |
/** Allocate frame in particular zone. |
* |
* Assume zone is locked |
* Assume zone is locked. |
* Panics if allocation is impossible. |
* |
* @param zone Zone to allocate from. |
* @param order Allocate exactly 2^order frames. |
* @param zone Zone to allocate from. |
* @param order Allocate exactly 2^order frames. |
* |
* @return Frame index in zone |
* @return Frame index in zone. |
* |
*/ |
static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) |
496,12 → 509,12 |
return v; |
} |
/** Free frame from zone |
/** Free frame from zone. |
* |
* Assume zone is locked |
* Assume zone is locked. |
* |
* @param zone Pointer to zone from which the frame is to be freed |
* @param frame_idx Frame index relative to zone |
* @param zone Pointer to zone from which the frame is to be freed. |
* @param frame_idx Frame index relative to zone. |
*/ |
static void zone_frame_free(zone_t *zone, index_t frame_idx) |
{ |
524,14 → 537,14 |
} |
} |
/** Return frame from zone */ |
static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
/** Return frame from zone. */ |
static frame_t *zone_get_frame(zone_t *zone, index_t frame_idx) |
{ |
ASSERT(frame_idx < zone->count); |
return &zone->frames[frame_idx]; |
} |
/** Mark frame in zone unavailable to allocation */ |
/** Mark frame in zone unavailable to allocation. */ |
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
{ |
frame_t *frame; |
544,18 → 557,21 |
&frame->buddy_link); |
ASSERT(link); |
zone->free_count--; |
mutex_lock(&mem_avail_mtx); |
mem_avail_frames--; |
mutex_unlock(&mem_avail_mtx); |
} |
/** |
* Join 2 zones |
/** Join two zones. |
* |
* Expect zone_t *z to point to space at least zone_conf_size large |
* Expect zone_t *z to point to space at least zone_conf_size large. |
* |
* Assume z1 & z2 are locked |
* Assume z1 & z2 are locked. |
* |
* @param z Target zone structure pointer |
* @param z1 Zone to merge |
* @param z2 Zone to merge |
* @param z Target zone structure pointer. |
* @param z1 Zone to merge. |
* @param z2 Zone to merge. |
*/ |
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
{ |
629,7 → 645,7 |
} |
} |
/** Return old configuration frames into the zone |
/** Return old configuration frames into the zone. |
* |
* We have several cases |
* - the conf. data is outside of zone -> exit, shall we call frame_free?? |
636,9 → 652,9 |
* - the conf. data was created by zone_create or |
* updated with reduce_region -> free every frame |
* |
* @param newzone The actual zone where freeing should occur |
* @param oldzone Pointer to old zone configuration data that should |
* be freed from new zone |
* @param newzone The actual zone where freeing should occur. |
* @param oldzone Pointer to old zone configuration data that should |
* be freed from new zone. |
*/ |
static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
{ |
662,7 → 678,7 |
} |
} |
/** Reduce allocated block to count of order 0 frames |
/** Reduce allocated block to count of order 0 frames. |
* |
* The allocated block need 2^order frames of space. Reduce all frames |
* in block to order 0 and free the unneeded frames. This means, that |
670,8 → 686,8 |
* you have to free every frame. |
* |
* @param zone |
* @param frame_idx Index to block |
* @param count Allocated space in block |
* @param frame_idx Index to block. |
* @param count Allocated space in block. |
*/ |
static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
{ |
698,7 → 714,7 |
} |
} |
/** Merge zones z1 and z2 |
/** Merge zones z1 and z2. |
* |
* - the zones must be 2 zones with no zone existing in between, |
* which means that z2 = z1+1 |
719,10 → 735,10 |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count) |
if ((z1 >= zones.count) || (z2 >= zones.count)) |
goto errout; |
/* We can join only 2 zones with none existing inbetween */ |
if (z2-z1 != 1) |
if (z2 - z1 != 1) |
goto errout; |
zone1 = zones.info[z1]; |
773,8 → 789,7 |
interrupts_restore(ipl); |
} |
/** |
* Merge all zones into one big zone |
/** Merge all zones into one big zone. |
* |
* It is reasonable to do this on systems whose bios reports parts in chunks, |
* so that we could have 1 zone (it's faster). |
784,21 → 799,19 |
int count = zones.count; |
while (zones.count > 1 && --count) { |
zone_merge(0,1); |
zone_merge(0, 1); |
break; |
} |
} |
/** Create frame zone |
/** Create new frame zone. |
* |
* Create new frame zone. |
* @param start Physical address of the first frame within the zone. |
* @param count Count of frames in zone. |
* @param z Address of configuration information of zone. |
* @param flags Zone flags. |
* |
* @param start Physical address of the first frame within the zone. |
* @param count Count of frames in zone |
* @param z Address of configuration information of zone |
* @param flags Zone flags. |
* |
* @return Initialized zone. |
* @return Initialized zone. |
*/ |
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
{ |
808,7 → 821,15 |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = start; |
z->count = count; |
/* Mask off flags that are calculated automatically. */ |
flags &= ~FRAME_LOW_4_GiB; |
/* Determine calculated flags. */ |
if (z->base + count < (1ULL << (32 - FRAME_WIDTH))) /* 4 GiB */ |
flags |= FRAME_LOW_4_GiB; |
z->flags = flags; |
z->free_count = count; |
z->busy_count = 0; |
819,8 → 840,7 |
z->buddy_system = (buddy_system_t *)&z[1]; |
buddy_system_create(z->buddy_system, max_order, |
&zone_buddy_system_operations, |
(void *) z); |
&zone_buddy_system_operations, (void *) z); |
/* Allocate frames _after_ the conframe */ |
/* Check sizes */ |
837,14 → 857,14 |
} |
} |
/** Compute configuration data size for zone |
/** Compute configuration data size for zone. |
* |
* @param count Size of zone in frames |
* @return Size of zone configuration info (in bytes) |
* @param count Size of zone in frames. |
* @return Size of zone configuration info (in bytes). |
*/ |
uintptr_t zone_conf_size(count_t count) |
{ |
int size = sizeof(zone_t) + count*sizeof(frame_t); |
int size = sizeof(zone_t) + count * sizeof(frame_t); |
int max_order; |
max_order = fnzb(count); |
852,20 → 872,20 |
return size; |
} |
/** Create and add zone to system |
/** Create and add zone to system. |
* |
* @param start First frame number (absolute) |
* @param count Size of zone in frames |
* @param confframe Where configuration frames are supposed to be. |
* Automatically checks, that we will not disturb the |
* kernel and possibly init. |
* If confframe is given _outside_ this zone, it is expected, |
* that the area is already marked BUSY and big enough |
* to contain zone_conf_size() amount of data. |
* If the confframe is inside the area, the zone free frame |
* information is modified not to include it. |
* @param start First frame number (absolute). |
* @param count Size of zone in frames. |
* @param confframe Where configuration frames are supposed to be. |
* Automatically checks, that we will not disturb the |
* kernel and possibly init. If confframe is given |
* _outside_ this zone, it is expected, that the area is |
* already marked BUSY and big enough to contain |
* zone_conf_size() amount of data. If the confframe is |
* inside the area, the zone free frame information is |
* modified not to include it. |
* |
* @return Zone number or -1 on error |
* @return Zone number or -1 on error. |
*/ |
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
{ |
884,8 → 904,8 |
* it does not span kernel & init |
*/ |
confcount = SIZE2FRAMES(zone_conf_size(count)); |
if (confframe >= start && confframe < start+count) { |
for (;confframe < start + count; confframe++) { |
if (confframe >= start && confframe < start + count) { |
for (; confframe < start + count; confframe++) { |
addr = PFN2ADDR(confframe); |
if (overlaps(addr, PFN2ADDR(confcount), |
KA2PA(config.base), config.kernel_size)) |
919,11 → 939,16 |
if (znum == -1) |
return -1; |
mutex_lock(&mem_avail_mtx); |
mem_avail_frames += count; |
mutex_unlock(&mem_avail_mtx); |
/* If confdata in zone, mark as unavailable */ |
if (confframe >= start && confframe < start + count) |
for (i = confframe; i < confframe + confcount; i++) { |
zone_mark_unavailable(z, i - z->base); |
} |
return znum; |
} |
930,7 → 955,7 |
/***************************************/ |
/* Frame functions */ |
/** Set parent of frame */ |
/** Set parent of frame. */ |
void frame_set_parent(pfn_t pfn, void *data, unsigned int hint) |
{ |
zone_t *zone = find_zone_and_lock(pfn, &hint); |
937,7 → 962,7 |
ASSERT(zone); |
zone_get_frame(zone, pfn-zone->base)->parent = data; |
zone_get_frame(zone, pfn - zone->base)->parent = data; |
spinlock_unlock(&zone->lock); |
} |
955,19 → 980,20 |
/** Allocate power-of-two frames of physical memory. |
* |
* @param order Allocate exactly 2^order frames. |
* @param flags Flags for host zone selection and address processing. |
* @param pzone Preferred zone |
* @param order Allocate exactly 2^order frames. |
* @param flags Flags for host zone selection and address processing. |
* @param pzone Preferred zone. |
* |
* @return Physical address of the allocated frame. |
* @return Physical address of the allocated frame. |
* |
*/ |
void * frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone) |
void *frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone) |
{ |
ipl_t ipl; |
int freed; |
pfn_t v; |
zone_t *zone; |
unsigned long gen = 0; |
loop: |
ipl = interrupts_disable(); |
975,7 → 1001,7 |
/* |
* First, find suitable frame zone. |
*/ |
zone = find_free_zone_and_lock(order, pzone); |
zone = find_free_zone_and_lock(order, flags, pzone); |
/* If no memory, reclaim some slab memory, |
if it does not help, reclaim all */ |
982,23 → 1008,51 |
if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
freed = slab_reclaim(0); |
if (freed) |
zone = find_free_zone_and_lock(order, pzone); |
zone = find_free_zone_and_lock(order, flags, pzone); |
if (!zone) { |
freed = slab_reclaim(SLAB_RECLAIM_ALL); |
if (freed) |
zone = find_free_zone_and_lock(order, pzone); |
zone = find_free_zone_and_lock(order, flags, |
pzone); |
} |
} |
if (!zone) { |
/* |
* TODO: Sleep until frames are available again. |
* Sleep until some frames are available again. |
*/ |
interrupts_restore(ipl); |
if (flags & FRAME_ATOMIC) |
if (flags & FRAME_ATOMIC) { |
interrupts_restore(ipl); |
return 0; |
} |
panic("Sleep not implemented.\n"); |
#ifdef CONFIG_DEBUG |
unsigned long avail; |
mutex_lock(&mem_avail_mtx); |
avail = mem_avail_frames; |
mutex_unlock(&mem_avail_mtx); |
printf("Thread %" PRIu64 " waiting for %u frames, " |
"%u available.\n", THREAD->tid, 1ULL << order, avail); |
#endif |
mutex_lock(&mem_avail_mtx); |
while ((mem_avail_frames < (1ULL << order)) || |
gen == mem_avail_gen) |
condvar_wait(&mem_avail_cv, &mem_avail_mtx); |
gen = mem_avail_gen; |
mutex_unlock(&mem_avail_mtx); |
#ifdef CONFIG_DEBUG |
mutex_lock(&mem_avail_mtx); |
avail = mem_avail_frames; |
mutex_unlock(&mem_avail_mtx); |
printf("Thread %" PRIu64 " woken up, %u frames available.\n", |
THREAD->tid, avail); |
#endif |
interrupts_restore(ipl); |
goto loop; |
} |
1006,6 → 1060,11 |
v += zone->base; |
spinlock_unlock(&zone->lock); |
mutex_lock(&mem_avail_mtx); |
mem_avail_frames -= (1ULL << order); |
mutex_unlock(&mem_avail_mtx); |
interrupts_restore(ipl); |
if (flags & FRAME_KA) |
1019,7 → 1078,7 |
* Decrement frame reference count. |
* If it drops to zero, move the frame structure to free list. |
* |
* @param Frame Physical Address of of the frame to be freed. |
* @param frame Physical Address of of the frame to be freed. |
*/ |
void frame_free(uintptr_t frame) |
{ |
1028,16 → 1087,26 |
pfn_t pfn = ADDR2PFN(frame); |
ipl = interrupts_disable(); |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
zone = find_zone_and_lock(pfn, NULL); |
ASSERT(zone); |
zone_frame_free(zone, pfn-zone->base); |
zone_frame_free(zone, pfn - zone->base); |
spinlock_unlock(&zone->lock); |
/* |
* Signal that some memory has been freed. |
*/ |
mutex_lock(&mem_avail_mtx); |
mem_avail_frames++; |
mem_avail_gen++; |
condvar_broadcast(&mem_avail_cv); |
mutex_unlock(&mem_avail_mtx); |
interrupts_restore(ipl); |
} |
1046,7 → 1115,7 |
* Find respective frame structure for supplied PFN and |
* increment frame reference count. |
* |
* @param pfn Frame number of the frame to be freed. |
* @param pfn Frame number of the frame to be freed. |
*/ |
void frame_reference_add(pfn_t pfn) |
{ |
1059,10 → 1128,10 |
/* |
* First, find host frame zone for addr. |
*/ |
zone = find_zone_and_lock(pfn,NULL); |
zone = find_zone_and_lock(pfn, NULL); |
ASSERT(zone); |
frame = &zone->frames[pfn-zone->base]; |
frame = &zone->frames[pfn - zone->base]; |
frame->refcount++; |
spinlock_unlock(&zone->lock); |
1069,7 → 1138,7 |
interrupts_restore(ipl); |
} |
/** Mark given range unavailable in frame zones */ |
/** Mark given range unavailable in frame zones. */ |
void frame_mark_unavailable(pfn_t start, count_t count) |
{ |
unsigned int i; |
1086,15 → 1155,14 |
} |
} |
/** Initialize physical memory management |
* |
* Initialize physical memory managemnt. |
*/ |
/** Initialize physical memory management. */ |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
zones.count = 0; |
spinlock_initialize(&zones.lock, "zones.lock"); |
mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); |
condvar_initialize(&mem_avail_cv); |
} |
/* Tell the architecture to create some memory */ |
frame_arch_init(); |
1122,10 → 1190,9 |
} |
/** Return total size of all zones |
* |
*/ |
uint64_t zone_total_size(void) { |
/** Return total size of all zones. */ |
uint64_t zone_total_size(void) |
{ |
zone_t *zone = NULL; |
unsigned int i; |
ipl_t ipl; |
1147,53 → 1214,86 |
return total; |
} |
/** Prints list of zones |
* |
*/ |
void zone_print_list(void) { |
/** Prints list of zones. */ |
void zone_print_list(void) |
{ |
zone_t *zone = NULL; |
unsigned int i; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
#ifdef __32_BITS__ |
printf("# base address free frames busy frames\n"); |
printf("-- ------------ ------------ ------------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("# base address free frames busy frames\n"); |
printf("-- -------------------- ------------ ------------\n"); |
#endif |
if (sizeof(void *) == 4) { |
printf("# base address free frames busy frames\n"); |
printf("-- ------------ ------------ ------------\n"); |
} else { |
printf("# base address free frames busy frames\n"); |
printf("-- -------------------- ------------ ------------\n"); |
} |
for (i = 0; i < zones.count; i++) { |
/* |
* Because printing may require allocation of memory, we may not hold |
* the frame allocator locks when printing zone statistics. Therefore, |
* we simply gather the statistics under the protection of the locks and |
* print the statistics when the locks have been released. |
* |
* When someone adds/removes zones while we are printing the statistics, |
* we may end up with inaccurate output (e.g. a zone being skipped from |
* the listing). |
*/ |
for (i = 0; ; i++) { |
uintptr_t base; |
count_t free_count; |
count_t busy_count; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
if (i >= zones.count) { |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
break; |
} |
zone = zones.info[i]; |
spinlock_lock(&zone->lock); |
base = PFN2ADDR(zone->base); |
free_count = zone->free_count; |
busy_count = zone->busy_count; |
spinlock_unlock(&zone->lock); |
if (sizeof(void *) == 4) |
printf("%-2d %#10zx %12zd %12zd\n", i, PFN2ADDR(zone->base), |
zone->free_count, zone->busy_count); |
else |
printf("%-2d %#18zx %12zd %12zd\n", i, PFN2ADDR(zone->base), |
zone->free_count, zone->busy_count); |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
#ifdef __32_BITS__ |
printf("%-2u %10p %12" PRIc " %12" PRIc "\n", i, base, |
free_count, busy_count); |
#endif |
#ifdef __64_BITS__ |
printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i, base, |
free_count, busy_count); |
#endif |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
} |
/** Prints zone details. |
* |
* @param num Zone base address or zone number. |
* @param num Zone base address or zone number. |
*/ |
void zone_print_one(unsigned int num) { |
void zone_print_one(unsigned int num) |
{ |
zone_t *zone = NULL; |
ipl_t ipl; |
unsigned int i; |
uintptr_t base; |
count_t count; |
count_t busy_count; |
count_t free_count; |
ipl = interrupts_disable(); |
spinlock_lock(&zones.lock); |
1205,26 → 1305,28 |
} |
} |
if (!zone) { |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
printf("Zone not found.\n"); |
goto out; |
return; |
} |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n"); |
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, |
PFN2ADDR(zone->base)); |
printf("Zone size: %zd frames (%zd KB)\n", zone->count, |
SIZE2KB(FRAMES2SIZE(zone->count))); |
printf("Allocated space: %zd frames (%zd KB)\n", zone->busy_count, |
SIZE2KB(FRAMES2SIZE(zone->busy_count))); |
printf("Available space: %zd frames (%zd KB)\n", zone->free_count, |
SIZE2KB(FRAMES2SIZE(zone->free_count))); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
base = PFN2ADDR(zone->base); |
count = zone->count; |
busy_count = zone->busy_count; |
free_count = zone->free_count; |
spinlock_unlock(&zone->lock); |
out: |
spinlock_unlock(&zones.lock); |
interrupts_restore(ipl); |
printf("Zone base address: %p\n", base); |
printf("Zone size: %" PRIc " frames (%" PRIs " KiB)\n", count, |
SIZE2KB(FRAMES2SIZE(count))); |
printf("Allocated space: %" PRIc " frames (%" PRIs " KiB)\n", |
busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); |
printf("Available space: %" PRIc " frames (%" PRIs " KiB)\n", |
free_count, SIZE2KB(FRAMES2SIZE(free_count))); |
} |
/** @} |
/branches/network/kernel/generic/src/mm/buddy.c |
---|
35,8 → 35,7 |
* @brief Buddy allocator framework. |
* |
* This file contains buddy system allocator framework. |
* Specialized functions are needed for this abstract framework |
* to be useful. |
* Specialized functions are needed for this abstract framework to be useful. |
*/ |
#include <mm/buddy.h> |
44,8 → 43,9 |
#include <arch/types.h> |
#include <debug.h> |
#include <print.h> |
#include <macros.h> |
/** Return size needed for the buddy configuration data */ |
/** Return size needed for the buddy configuration data. */ |
size_t buddy_conf_size(int max_order) |
{ |
return sizeof(buddy_system_t) + (max_order + 1) * sizeof(link_t); |
52,21 → 52,20 |
} |
/** Create buddy system |
/** Create buddy system. |
* |
* Allocate memory for and initialize new buddy system. |
* |
* @param b Preallocated buddy system control data. |
* @param max_order The biggest allocable size will be 2^max_order. |
* @param op Operations for new buddy system. |
* @param data Pointer to be used by implementation. |
* @param b Preallocated buddy system control data. |
* @param max_order The biggest allocable size will be 2^max_order. |
* @param op Operations for new buddy system. |
* @param data Pointer to be used by implementation. |
* |
* @return New buddy system. |
* @return New buddy system. |
*/ |
void buddy_system_create(buddy_system_t *b, |
uint8_t max_order, |
buddy_system_operations_t *op, |
void *data) |
void |
buddy_system_create(buddy_system_t *b, uint8_t max_order, |
buddy_system_operations_t *op, void *data) |
{ |
int i; |
80,7 → 79,7 |
ASSERT(op->mark_busy); |
/* |
* Use memory after our own structure |
* Use memory after our own structure. |
*/ |
b->order = (link_t *) (&b[1]); |
92,14 → 91,15 |
b->data = data; |
} |
/** Check if buddy system can allocate block |
/** Check if buddy system can allocate block. |
* |
* @param b Buddy system pointer |
* @param i Size of the block (2^i) |
* @param b Buddy system pointer. |
* @param i Size of the block (2^i). |
* |
* @return True if block can be allocated |
* @return True if block can be allocated. |
*/ |
bool buddy_system_can_alloc(buddy_system_t *b, uint8_t i) { |
bool buddy_system_can_alloc(buddy_system_t *b, uint8_t i) |
{ |
uint8_t k; |
/* |
106,12 → 106,13 |
* If requested block is greater then maximal block |
* we know immediatly that we cannot satisfy the request. |
*/ |
if (i > b->max_order) return false; |
if (i > b->max_order) |
return false; |
/* |
* Check if any bigger or equal order has free elements |
*/ |
for (k=i; k <= b->max_order; k++) { |
for (k = i; k <= b->max_order; k++) { |
if (!list_empty(&b->order[k])) { |
return true; |
} |
118,12 → 119,11 |
} |
return false; |
} |
/** Allocate PARTICULAR block from buddy system |
/** Allocate PARTICULAR block from buddy system. |
* |
* @ return Block of data or NULL if no such block was found |
* @return Block of data or NULL if no such block was found. |
*/ |
link_t *buddy_system_alloc_block(buddy_system_t *b, link_t *block) |
{ |
134,7 → 134,7 |
ASSERT(left); |
list_remove(left); |
while (1) { |
if (! b->op->get_order(b,left)) { |
if (!b->op->get_order(b, left)) { |
b->op->mark_busy(b, left); |
return left; |
} |
142,8 → 142,8 |
order = b->op->get_order(b, left); |
right = b->op->bisect(b, left); |
b->op->set_order(b, left, order-1); |
b->op->set_order(b, right, order-1); |
b->op->set_order(b, left, order - 1); |
b->op->set_order(b, right, order - 1); |
tmp = b->op->find_block(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
160,10 → 160,10 |
/** Allocate block from buddy system. |
* |
* @param b Buddy system pointer. |
* @param i Returned block will be 2^i big. |
* @param b Buddy system pointer. |
* @param i Returned block will be 2^i big. |
* |
* @return Block of data represented by link_t. |
* @return Block of data represented by link_t. |
*/ |
link_t *buddy_system_alloc(buddy_system_t *b, uint8_t i) |
{ |
217,13 → 217,12 |
buddy_system_free(b, hlp); |
return res; |
} |
/** Return block to buddy system. |
* |
* @param b Buddy system pointer. |
* @param block Block to return. |
* @param b Buddy system pointer. |
* @param block Block to return. |
*/ |
void buddy_system_free(buddy_system_t *b, link_t *block) |
{ |
267,7 → 266,8 |
b->op->set_order(b, hlp, i + 1); |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
* Recursively add the coalesced block to the list of |
* order i + 1. |
*/ |
buddy_system_free(b, hlp); |
return; |
278,46 → 278,7 |
* Insert block into the list of order i. |
*/ |
list_append(block, &b->order[i]); |
} |
/** Prints out structure of buddy system |
* |
* @param b Pointer to buddy system |
* @param elem_size Element size |
*/ |
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) { |
index_t i; |
count_t cnt, elem_count = 0, block_count = 0; |
link_t * cur; |
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n"); |
printf("-----\t------\t--------\t----------\t---------------\n"); |
for (i=0;i <= b->max_order; i++) { |
cnt = 0; |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) |
cnt++; |
} |
printf("#%zd\t%5zd\t%7zdK\t%8zdK\t%6zd\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i); |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) { |
b->op->print_id(b, cur); |
printf(" "); |
} |
} |
printf("\n"); |
block_count += cnt; |
elem_count += (1 << i) * cnt; |
} |
printf("-----\t------\t--------\t----------\t---------------\n"); |
printf("Buddy system contains %zd free elements (%zd blocks)\n" , elem_count, block_count); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/mm/slab.c |
---|
167,12 → 167,12 |
* Allocate frames for slab space and initialize |
* |
*/ |
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) |
{ |
void *data; |
slab_t *slab; |
size_t fsize; |
int i; |
unsigned int i; |
unsigned int zone = 0; |
data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
179,7 → 179,7 |
if (!data) { |
return NULL; |
} |
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { |
slab = slab_alloc(slab_extern_cache, flags); |
if (!slab) { |
frame_free(KA2PA(data)); |
191,8 → 191,8 |
} |
/* Fill in slab structures */ |
for (i=0; i < (1 << cache->order); i++) |
frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone); |
for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
slab->start = data; |
slab->available = cache->objects; |
199,8 → 199,8 |
slab->nextavail = 0; |
slab->cache = cache; |
for (i=0; i<cache->objects;i++) |
*((int *) (slab->start + i*cache->size)) = i+1; |
for (i = 0; i < cache->objects; i++) |
*((int *) (slab->start + i*cache->size)) = i + 1; |
atomic_inc(&cache->allocated_slabs); |
return slab; |
239,8 → 239,7 |
* |
* @return Number of freed pages |
*/ |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
slab_t *slab) |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) |
{ |
int freed = 0; |
256,7 → 255,7 |
ASSERT(slab->available < cache->objects); |
*((int *)obj) = slab->nextavail; |
slab->nextavail = (obj - slab->start)/cache->size; |
slab->nextavail = (obj - slab->start) / cache->size; |
slab->available++; |
/* Move it to correct list */ |
281,7 → 280,7 |
* |
* @return Object address or null |
*/ |
static void * slab_obj_create(slab_cache_t *cache, int flags) |
static void *slab_obj_create(slab_cache_t *cache, int flags) |
{ |
slab_t *slab; |
void *obj; |
301,7 → 300,8 |
return NULL; |
spinlock_lock(&cache->slablock); |
} else { |
slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
slab = list_get_instance(cache->partial_slabs.next, slab_t, |
link); |
list_remove(&slab->link); |
} |
obj = slab->start + slab->nextavail * cache->size; |
332,8 → 332,7 |
* |
* @param first If true, return first, else last mag |
*/ |
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
int first) |
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) |
{ |
slab_magazine_t *mag = NULL; |
link_t *cur; |
368,13 → 367,12 |
* |
* @return Number of freed pages |
*/ |
static count_t magazine_destroy(slab_cache_t *cache, |
slab_magazine_t *mag) |
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) |
{ |
int i; |
unsigned int i; |
count_t frames = 0; |
for (i=0;i < mag->busy; i++) { |
for (i = 0; i < mag->busy; i++) { |
frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
atomic_dec(&cache->cached_objs); |
} |
389,7 → 387,7 |
* |
* Assume cpu_magazine lock is held |
*/ |
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag, *lastmag, *newmag; |
423,7 → 421,7 |
* |
* @return Pointer to object or NULL if not available |
*/ |
static void * magazine_obj_get(slab_cache_t *cache) |
static void *magazine_obj_get(slab_cache_t *cache) |
{ |
slab_magazine_t *mag; |
void *obj; |
458,7 → 456,7 |
* allocate new, exchange last & current |
* |
*/ |
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) |
{ |
slab_magazine_t *cmag,*lastmag,*newmag; |
527,25 → 525,26 |
/* Slab cache functions */ |
/** Return number of objects that fit in certain cache size */ |
static int comp_objects(slab_cache_t *cache) |
static unsigned int comp_objects(slab_cache_t *cache) |
{ |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / |
cache->size; |
else |
return (PAGE_SIZE << cache->order) / cache->size; |
} |
/** Return wasted space in slab */ |
static int badness(slab_cache_t *cache) |
static unsigned int badness(slab_cache_t *cache) |
{ |
int objects; |
int ssize; |
unsigned int objects; |
unsigned int ssize; |
objects = comp_objects(cache); |
ssize = PAGE_SIZE << cache->order; |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
ssize -= sizeof(slab_t); |
return ssize - objects*cache->size; |
return ssize - objects * cache->size; |
} |
/** |
553,33 → 552,29 |
*/ |
static void make_magcache(slab_cache_t *cache) |
{ |
int i; |
unsigned int i; |
ASSERT(_slab_initialized >= 2); |
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
for (i=0; i < config.cpu_count; i++) { |
memsetb((uintptr_t)&cache->mag_cache[i], |
sizeof(cache->mag_cache[i]), 0); |
spinlock_initialize(&cache->mag_cache[i].lock, |
"slab_maglock_cpu"); |
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, |
0); |
for (i = 0; i < config.cpu_count; i++) { |
memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); |
spinlock_initialize(&cache->mag_cache[i].lock, |
"slab_maglock_cpu"); |
} |
} |
/** Initialize allocated memory as a slab cache */ |
static void |
_slab_cache_create(slab_cache_t *cache, |
char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align, |
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), |
int flags) |
{ |
int pages; |
ipl_t ipl; |
memsetb((uintptr_t)cache, sizeof(*cache), 0); |
memsetb(cache, sizeof(*cache), 0); |
cache->name = name; |
if (align < sizeof(unative_t)) |
597,7 → 592,7 |
list_initialize(&cache->magazines); |
spinlock_initialize(&cache->slablock, "slab_lock"); |
spinlock_initialize(&cache->maglock, "slab_maglock"); |
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
make_magcache(cache); |
/* Compute slab sizes, object counts in slabs etc. */ |
610,7 → 605,7 |
if (pages == 1) |
cache->order = 0; |
else |
cache->order = fnzb(pages-1)+1; |
cache->order = fnzb(pages - 1) + 1; |
while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
cache->order += 1; |
631,18 → 626,16 |
} |
/** Create slab cache */ |
slab_cache_t * slab_cache_create(char *name, |
size_t size, |
size_t align, |
int (*constructor)(void *obj, int kmflag), |
int (*destructor)(void *obj), |
int flags) |
slab_cache_t * |
slab_cache_create(char *name, size_t size, size_t align, |
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), |
int flags) |
{ |
slab_cache_t *cache; |
cache = slab_alloc(&slab_cache_cache, 0); |
_slab_cache_create(cache, name, size, align, constructor, destructor, |
flags); |
flags); |
return cache; |
} |
654,7 → 647,7 |
*/ |
static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
{ |
int i; |
unsigned int i; |
slab_magazine_t *mag; |
count_t frames = 0; |
int magcount; |
666,7 → 659,7 |
* endless loop |
*/ |
magcount = atomic_get(&cache->magazine_counter); |
while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { |
frames += magazine_destroy(cache,mag); |
if (!(flags & SLAB_RECLAIM_ALL) && frames) |
break; |
675,7 → 668,7 |
if (flags & SLAB_RECLAIM_ALL) { |
/* Free cpu-bound magazines */ |
/* Destroy CPU magazines */ |
for (i=0; i<config.cpu_count; i++) { |
for (i = 0; i < config.cpu_count; i++) { |
spinlock_lock(&cache->mag_cache[i].lock); |
mag = cache->mag_cache[i].current; |
719,8 → 712,8 |
_slab_reclaim(cache, SLAB_RECLAIM_ALL); |
/* All slabs must be empty */ |
if (!list_empty(&cache->full_slabs) \ |
|| !list_empty(&cache->partial_slabs)) |
if (!list_empty(&cache->full_slabs) || |
!list_empty(&cache->partial_slabs)) |
panic("Destroying cache that is not empty."); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
728,9 → 721,8 |
slab_free(&slab_cache_cache, cache); |
} |
/** Allocate new object from cache - if no flags given, always returns |
memory */ |
void * slab_alloc(slab_cache_t *cache, int flags) |
/** Allocate new object from cache - if no flags given, always returns memory */ |
void *slab_alloc(slab_cache_t *cache, int flags) |
{ |
ipl_t ipl; |
void *result = NULL; |
759,9 → 751,8 |
ipl = interrupts_disable(); |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
|| magazine_obj_put(cache, obj)) { |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || |
magazine_obj_put(cache, obj)) { |
slab_obj_destroy(cache, obj, slab); |
} |
788,7 → 779,8 |
* memory allocation from interrupts can deadlock. |
*/ |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
for (cur = slab_cache_list.next; cur != &slab_cache_list; |
cur = cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
frames += _slab_reclaim(cache, flags); |
} |
802,22 → 794,77 |
/* Print list of slabs */ |
void slab_print_list(void) |
{ |
slab_cache_t *cache; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
int skip = 0; |
printf("slab name size pages obj/pg slabs cached allocated" |
" ctl\n"); |
printf("---------------- -------- ------ ------ ------ ------ ---------" |
" ---\n"); |
while (true) { |
slab_cache_t *cache; |
link_t *cur; |
ipl_t ipl; |
int i; |
/* |
* We must not hold the slab_cache_lock spinlock when printing |
* the statistics. Otherwise we can easily deadlock if the print |
* needs to allocate memory. |
* |
* Therefore, we walk through the slab cache list, skipping some |
* amount of already processed caches during each iteration and |
* gathering statistics about the first unprocessed cache. For |
* the sake of printing the statistics, we realese the |
* slab_cache_lock and reacquire it afterwards. Then the walk |
* starts again. |
* |
* This limits both the efficiency and also accuracy of the |
* obtained statistics. The efficiency is decreased because the |
* time complexity of the algorithm is quadratic instead of |
* linear. The accuracy is impacted because we drop the lock |
* after processing one cache. If there is someone else |
* manipulating the cache list, we might omit an arbitrary |
* number of caches or process one cache multiple times. |
* However, we don't bleed for this algorithm for it is only |
* statistics. |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&slab_cache_lock); |
for (i = 0, cur = slab_cache_list.next; |
i < skip && cur != &slab_cache_list; |
i++, cur = cur->next) |
; |
if (cur == &slab_cache_list) { |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
break; |
} |
skip++; |
cache = list_get_instance(cur, slab_cache_t, link); |
char *name = cache->name; |
uint8_t order = cache->order; |
size_t size = cache->size; |
unsigned int objects = cache->objects; |
long allocated_slabs = atomic_get(&cache->allocated_slabs); |
long cached_objs = atomic_get(&cache->cached_objs); |
long allocated_objs = atomic_get(&cache->allocated_objs); |
int flags = cache->flags; |
printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
name, size, (1 << order), objects, allocated_slabs, |
cached_objs, allocated_objs, |
flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
} |
spinlock_unlock(&slab_cache_lock); |
interrupts_restore(ipl); |
} |
void slab_cache_init(void) |
825,32 → 872,24 |
int i, size; |
/* Initialize magazine cache */ |
_slab_cache_create(&mag_cache, |
"slab_magazine", |
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
sizeof(uintptr_t), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
_slab_cache_create(&mag_cache, "slab_magazine", |
sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), |
sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | |
SLAB_CACHE_SLINSIDE); |
/* Initialize slab_cache cache */ |
_slab_cache_create(&slab_cache_cache, |
"slab_cache", |
sizeof(slab_cache_cache), |
sizeof(uintptr_t), |
NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
_slab_cache_create(&slab_cache_cache, "slab_cache", |
sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, |
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
/* Initialize external slab cache */ |
slab_extern_cache = slab_cache_create("slab_extern", |
sizeof(slab_t), |
0, NULL, NULL, |
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, |
NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
/* Initialize structures for malloc */ |
for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
i++, size <<= 1) { |
malloc_caches[i] = slab_cache_create(malloc_names[i], |
size, 0, |
NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); |
i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); |
i++, size <<= 1) { |
malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0, |
NULL, NULL, SLAB_CACHE_MAGDEFERRED); |
} |
#ifdef CONFIG_DEBUG |
_slab_initialized = 1; |
875,9 → 914,11 |
spinlock_lock(&slab_cache_lock); |
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
for (cur = slab_cache_list.next; cur != &slab_cache_list; |
cur = cur->next){ |
s = list_get_instance(cur, slab_cache_t, link); |
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != |
SLAB_CACHE_MAGDEFERRED) |
continue; |
make_magcache(s); |
s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
888,7 → 929,7 |
/**************************************/ |
/* kalloc/kfree functions */ |
void * malloc(unsigned int size, int flags) |
void *malloc(unsigned int size, int flags) |
{ |
ASSERT(_slab_initialized); |
ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
901,7 → 942,7 |
return slab_alloc(malloc_caches[idx], flags); |
} |
void * realloc(void *ptr, unsigned int size, int flags) |
void *realloc(void *ptr, unsigned int size, int flags) |
{ |
ASSERT(_slab_initialized); |
ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
/branches/network/kernel/generic/src/mm/page.c |
---|
40,11 → 40,28 |
* They however, define the single interface. |
*/ |
/* |
* Note on memory prefetching and updating memory mappings, also described in: |
* AMD x86-64 Architecture Programmer's Manual, Volume 2, System Programming, |
* 7.2.1 Special Coherency Considerations. |
* |
* The processor which modifies a page table mapping can access prefetched data |
* from the old mapping. In order to prevent this, we place a memory barrier |
* after a mapping is updated. |
* |
* We assume that the other processors are either not using the mapping yet |
* (i.e. during the bootstrap) or are executing the TLB shootdown code. While |
* we don't care much about the former case, the processors in the latter case |
* will do an implicit serialization by virtue of running the TLB shootdown |
* interrupt handler. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <arch/mm/asid.h> |
#include <mm/as.h> |
#include <mm/frame.h> |
#include <arch/barrier.h> |
#include <arch/types.h> |
#include <arch/asm.h> |
#include <memstr.h> |
65,8 → 82,8 |
* considering possible crossings |
* of page boundaries. |
* |
* @param s Address of the structure. |
* @param size Size of the structure. |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(uintptr_t s, size_t size) |
{ |
76,8 → 93,11 |
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); |
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, |
s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE); |
/* Repel prefetched accesses to the old mapping. */ |
memory_barrier(); |
} |
/** Insert mapping of page to frame. |
87,10 → 107,11 |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be mapped. |
* @param frame Physical address of memory frame to which the mapping is done. |
* @param flags Flags to be used for mapping. |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be mapped. |
* @param frame Physical address of memory frame to which the mapping is |
* done. |
* @param flags Flags to be used for mapping. |
*/ |
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags) |
{ |
98,6 → 119,9 |
ASSERT(page_mapping_operations->mapping_insert); |
page_mapping_operations->mapping_insert(as, page, frame, flags); |
/* Repel prefetched accesses to the old mapping. */ |
memory_barrier(); |
} |
/** Remove mapping of page. |
108,8 → 132,8 |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
* @param as Address space to wich page belongs. |
* @param page Virtual address of the page to be demapped. |
*/ |
void page_mapping_remove(as_t *as, uintptr_t page) |
{ |
117,6 → 141,9 |
ASSERT(page_mapping_operations->mapping_remove); |
page_mapping_operations->mapping_remove(as, page); |
/* Repel prefetched accesses to the old mapping. */ |
memory_barrier(); |
} |
/** Find mapping for virtual page |
125,10 → 152,11 |
* |
* The page table must be locked and interrupts must be disabled. |
* |
* @param as Address space to wich page belongs. |
* @param page Virtual page. |
* @param as Address space to wich page belongs. |
* @param page Virtual page. |
* |
* @return NULL if there is no such mapping; requested mapping otherwise. |
* @return NULL if there is no such mapping; requested mapping |
* otherwise. |
*/ |
pte_t *page_mapping_find(as_t *as, uintptr_t page) |
{ |
/branches/network/kernel/generic/src/mm/tlb.c |
---|
81,7 → 81,7 |
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, |
uintptr_t page, count_t count) |
{ |
int i; |
unsigned int i; |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
144,7 → 144,7 |
asid_t asid; |
uintptr_t page; |
count_t count; |
int i; |
unsigned int i; |
ASSERT(CPU); |
/branches/network/kernel/generic/src/ipc/sysipc.c |
---|
164,11 → 164,11 |
{ |
int phoneid; |
if (IPC_GET_RETVAL(answer->data) == EHANGUP) { |
if ((native_t) IPC_GET_RETVAL(answer->data) == EHANGUP) { |
/* In case of forward, hangup the forwared phone, |
* not the originator |
*/ |
spinlock_lock(&answer->data.phone->lock); |
mutex_lock(&answer->data.phone->lock); |
spinlock_lock(&TASK->answerbox.lock); |
if (answer->data.phone->state == IPC_PHONE_CONNECTED) { |
list_remove(&answer->data.phone->link); |
175,7 → 175,7 |
answer->data.phone->state = IPC_PHONE_SLAMMED; |
} |
spinlock_unlock(&TASK->answerbox.lock); |
spinlock_unlock(&answer->data.phone->lock); |
mutex_unlock(&answer->data.phone->lock); |
} |
if (!olddata) |
243,7 → 243,7 |
uintptr_t dst = IPC_GET_ARG1(*olddata); |
size_t max_size = IPC_GET_ARG2(*olddata); |
size_t size = IPC_GET_ARG2(answer->data); |
if (size <= max_size) { |
if (size && size <= max_size) { |
/* |
* Copy the destination VA so that this piece of |
* information is not lost. |
258,6 → 258,8 |
free(answer->buffer); |
answer->buffer = NULL; |
} |
} else if (!size) { |
IPC_SET_RETVAL(answer->data, EOK); |
} else { |
IPC_SET_RETVAL(answer->data, ELIMIT); |
} |
268,12 → 270,12 |
/* The recipient agreed to receive data. */ |
int rc; |
uintptr_t dst; |
uintptr_t size; |
uintptr_t max_size; |
size_t size; |
size_t max_size; |
dst = IPC_GET_ARG1(answer->data); |
size = IPC_GET_ARG2(answer->data); |
max_size = IPC_GET_ARG2(*olddata); |
dst = (uintptr_t)IPC_GET_ARG1(answer->data); |
size = (size_t)IPC_GET_ARG2(answer->data); |
max_size = (size_t)IPC_GET_ARG2(*olddata); |
if (size <= max_size) { |
rc = copy_to_uspace((void *) dst, |
354,7 → 356,7 |
*/ |
static void process_answer(call_t *call) |
{ |
if (IPC_GET_RETVAL(call->data) == EHANGUP && |
if (((native_t) IPC_GET_RETVAL(call->data) == EHANGUP) && |
(call->flags & IPC_CALL_FORWARDED)) |
IPC_SET_RETVAL(call->data, EFORWARD); |
424,7 → 426,7 |
phone_t *phone; |
int res; |
int rc; |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
ipc_call_static_init(&call); |
440,7 → 442,9 |
IPC_SET_ARG5(call.data, 0); |
if (!(res = request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
rc = ipc_call_sync(phone, &call); |
if (rc != EOK) |
return rc; |
process_answer(&call); |
} else { |
IPC_SET_RETVAL(call.data, res); |
478,7 → 482,9 |
GET_CHECK_PHONE(phone, phoneid, return ENOENT); |
if (!(res = request_preprocess(&call))) { |
ipc_call_sync(phone, &call); |
rc = ipc_call_sync(phone, &call); |
if (rc != EOK) |
return rc; |
process_answer(&call); |
} else |
IPC_SET_RETVAL(call.data, res); |
624,7 → 630,7 |
IPC_SET_RETVAL(call->data, EFORWARD); |
ipc_answer(&TASK->answerbox, call); |
return ENOENT; |
}); |
}); |
if (!method_is_forwardable(IPC_GET_METHOD(call->data))) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
/branches/network/kernel/generic/src/ipc/ipc.c |
---|
37,7 → 37,9 |
* First the answerbox, then the phone. |
*/ |
#include <synch/synch.h> |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
#include <synch/synch.h> |
#include <ipc/ipc.h> |
64,7 → 66,7 |
*/ |
static void _ipc_call_init(call_t *call) |
{ |
memsetb((uintptr_t) call, sizeof(*call), 0); |
memsetb(call, sizeof(*call), 0); |
call->callerbox = &TASK->answerbox; |
call->sender = TASK; |
call->buffer = NULL; |
85,7 → 87,8 |
call_t *call; |
call = slab_alloc(ipc_call_slab, flags); |
_ipc_call_init(call); |
if (call) |
_ipc_call_init(call); |
return call; |
} |
117,8 → 120,9 |
/** Initialize an answerbox structure. |
* |
* @param box Answerbox structure to be initialized. |
* @param task Task to which the answerbox belongs. |
*/ |
void ipc_answerbox_init(answerbox_t *box) |
void ipc_answerbox_init(answerbox_t *box, task_t *task) |
{ |
spinlock_initialize(&box->lock, "ipc_box_lock"); |
spinlock_initialize(&box->irq_lock, "ipc_box_irqlock"); |
129,7 → 133,7 |
list_initialize(&box->answers); |
list_initialize(&box->irq_notifs); |
list_initialize(&box->irq_head); |
box->task = TASK; |
box->task = task; |
} |
/** Connect a phone to an answerbox. |
139,7 → 143,7 |
*/ |
void ipc_phone_connect(phone_t *phone, answerbox_t *box) |
{ |
spinlock_lock(&phone->lock); |
mutex_lock(&phone->lock); |
phone->state = IPC_PHONE_CONNECTED; |
phone->callee = box; |
148,7 → 152,7 |
list_append(&phone->link, &box->connected_phones); |
spinlock_unlock(&box->lock); |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
} |
/** Initialize a phone structure. |
157,7 → 161,7 |
*/ |
void ipc_phone_init(phone_t *phone) |
{ |
spinlock_initialize(&phone->lock, "phone_lock"); |
mutex_initialize(&phone->lock, MUTEX_PASSIVE); |
phone->callee = NULL; |
phone->state = IPC_PHONE_FREE; |
atomic_set(&phone->active_calls, 0); |
167,18 → 171,23 |
* |
* @param phone Destination kernel phone structure. |
* @param request Call structure with request. |
* |
* @return EOK on success or EINTR if the sleep was interrupted. |
*/ |
void ipc_call_sync(phone_t *phone, call_t *request) |
int ipc_call_sync(phone_t *phone, call_t *request) |
{ |
answerbox_t sync_box; |
ipc_answerbox_init(&sync_box); |
ipc_answerbox_init(&sync_box, TASK); |
/* We will receive data in a special box. */ |
request->callerbox = &sync_box; |
ipc_call(phone, request); |
ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); |
if (!ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, |
SYNCH_FLAGS_INTERRUPTIBLE)) |
return EINTR; |
return EOK; |
} |
/** Answer a message which was not dispatched and is not listed in any queue. |
191,6 → 200,13 |
call->flags |= IPC_CALL_ANSWERED; |
if (call->flags & IPC_CALL_FORWARDED) { |
if (call->data.caller_phone) { |
/* Demasquerade the caller phone. */ |
call->data.phone = call->data.caller_phone; |
} |
} |
spinlock_lock(&callerbox->lock); |
list_append(&call->link, &callerbox->answers); |
spinlock_unlock(&callerbox->lock); |
260,9 → 276,9 |
{ |
answerbox_t *box; |
spinlock_lock(&phone->lock); |
mutex_lock(&phone->lock); |
if (phone->state != IPC_PHONE_CONNECTED) { |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
if (call->flags & IPC_CALL_FORWARDED) { |
IPC_SET_RETVAL(call->data, EFORWARD); |
_ipc_answer_free_call(call); |
277,7 → 293,7 |
box = phone->callee; |
_ipc_call(phone, box, call); |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
return 0; |
} |
296,11 → 312,11 |
answerbox_t *box; |
call_t *call; |
spinlock_lock(&phone->lock); |
mutex_lock(&phone->lock); |
if (phone->state == IPC_PHONE_FREE || |
phone->state == IPC_PHONE_HUNGUP || |
phone->state == IPC_PHONE_CONNECTING) { |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
return -1; |
} |
box = phone->callee; |
319,7 → 335,7 |
} |
phone->state = IPC_PHONE_HUNGUP; |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
return 0; |
} |
343,8 → 359,11 |
list_remove(&call->link); |
spinlock_unlock(&oldbox->lock); |
if (mode & IPC_FF_ROUTE_FROM_ME) |
if (mode & IPC_FF_ROUTE_FROM_ME) { |
if (!call->data.caller_phone) |
call->data.caller_phone = call->data.phone; |
call->data.phone = newphone; |
} |
return ipc_call(newphone, call); |
} |
448,7 → 467,7 |
while (!list_empty(&TASK->answerbox.connected_phones)) { |
phone = list_get_instance(TASK->answerbox.connected_phones.next, |
phone_t, link); |
if (!spinlock_trylock(&phone->lock)) { |
if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { |
spinlock_unlock(&TASK->answerbox.lock); |
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); |
goto restart_phones; |
459,7 → 478,7 |
phone->state = IPC_PHONE_SLAMMED; |
list_remove(&phone->link); |
spinlock_unlock(&phone->lock); |
mutex_unlock(&phone->lock); |
} |
/* Answer all messages in 'calls' and 'dispatched_calls' queues */ |
534,7 → 553,10 |
/* Print opened phones & details */ |
printf("PHONE:\n"); |
for (i = 0; i < IPC_MAX_PHONES; i++) { |
spinlock_lock(&task->phones[i].lock); |
if (SYNCH_FAILED(mutex_trylock(&task->phones[i].lock))) { |
printf("%d: mutex busy\n", i); |
continue; |
} |
if (task->phones[i].state != IPC_PHONE_FREE) { |
printf("%d: ", i); |
switch (task->phones[i].state) { |
556,10 → 578,10 |
default: |
break; |
} |
printf("active: %d\n", |
printf("active: %ld\n", |
atomic_get(&task->phones[i].active_calls)); |
} |
spinlock_unlock(&task->phones[i].lock); |
mutex_unlock(&task->phones[i].lock); |
} |
569,8 → 591,9 |
for (tmp = task->answerbox.calls.next; tmp != &task->answerbox.calls; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d " |
"A4:%d A5:%d Flags:%x\n", call, call->sender->taskid, |
printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun |
" A1:%" PRIun " A2:%" PRIun " A3:%" PRIun |
" A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, call->sender->taskid, |
IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), |
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), |
578,12 → 601,13 |
} |
/* Print answerbox - calls */ |
printf("ABOX - DISPATCHED CALLS:\n"); |
for (tmp=task->answerbox.dispatched_calls.next; |
tmp != &task->answerbox.dispatched_calls; |
tmp = tmp->next) { |
for (tmp = task->answerbox.dispatched_calls.next; |
tmp != &task->answerbox.dispatched_calls; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d " |
"A4:%d A5:%d Flags:%x\n", call, call->sender->taskid, |
printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun |
" A1:%" PRIun " A2:%" PRIun " A3:%" PRIun |
" A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, call->sender->taskid, |
IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), |
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), |
594,7 → 618,8 |
for (tmp = task->answerbox.answers.next; tmp != &task->answerbox.answers; |
tmp = tmp->next) { |
call = list_get_instance(tmp, call_t, link); |
printf("Callid:%p M:%d A1:%d A2:%d A3:%d A4:%d A5:%d Flags:%x\n", |
printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun |
" A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n", |
call, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data), |
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), |
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data), |
/branches/network/kernel/generic/src/ipc/ipcrsc.c |
---|
170,7 → 170,6 |
int i; |
spinlock_lock(&TASK->lock); |
for (i = 0; i < IPC_MAX_PHONES; i++) { |
if (TASK->phones[i].state == IPC_PHONE_HUNGUP && |
atomic_get(&TASK->phones[i].active_calls) == 0) |
183,8 → 182,9 |
} |
spinlock_unlock(&TASK->lock); |
if (i >= IPC_MAX_PHONES) |
if (i == IPC_MAX_PHONES) |
return -1; |
return i; |
} |
/branches/network/kernel/generic/src/ipc/irq.c |
---|
65,7 → 65,7 |
*/ |
static void code_execute(call_t *call, irq_code_t *code) |
{ |
int i; |
unsigned int i; |
unative_t dstval = 0; |
if (!code) |
/branches/network/kernel/generic/src/main/kinit.c |
---|
47,6 → 47,7 |
#include <proc/scheduler.h> |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <proc/program.h> |
#include <panic.h> |
#include <func.h> |
#include <cpu.h> |
146,7 → 147,8 |
/* |
* Create kernel console. |
*/ |
t = thread_create(kconsole, (void *) "kconsole", TASK, 0, "kconsole", false); |
t = thread_create(kconsole, (void *) "kconsole", TASK, 0, "kconsole", |
false); |
if (t) |
thread_ready(t); |
else |
153,40 → 155,54 |
panic("thread_create/kconsole\n"); |
interrupts_enable(); |
/* |
* Create user tasks, load RAM disk images. |
*/ |
count_t i; |
program_t programs[CONFIG_INIT_TASKS]; |
for (i = 0; i < init.cnt; i++) { |
/* |
* Run user tasks, load RAM disk images. |
*/ |
if (init.tasks[i].addr % FRAME_SIZE) { |
printf("init[%d].addr is not frame aligned", i); |
printf("init[%" PRIc "].addr is not frame aligned", i); |
continue; |
} |
task_t *utask = task_run_program((void *) init.tasks[i].addr, |
"uspace"); |
if (utask) { |
int rc = program_create_from_image((void *) init.tasks[i].addr, |
&programs[i]); |
if (rc == 0 && programs[i].task != NULL) { |
/* |
* Set capabilities to init userspace tasks. |
*/ |
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | |
cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | |
CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG); |
if (!ipc_phone_0) |
ipc_phone_0 = &utask->answerbox; |
if (!ipc_phone_0) |
ipc_phone_0 = &programs[i].task->answerbox; |
} else if (rc == 0) { |
/* It was the program loader and was registered */ |
} else { |
int rd = init_rd((rd_header *) init.tasks[i].addr, |
/* RAM disk image */ |
int rd = init_rd((rd_header_t *) init.tasks[i].addr, |
init.tasks[i].size); |
if (rd != RE_OK) |
printf("Init binary %zd not used, error code %d.\n", i, rd); |
printf("Init binary %" PRIc " not used, error " |
"code %d.\n", i, rd); |
} |
} |
/* |
* Run user tasks with reasonable delays |
*/ |
for (i = 0; i < init.cnt; i++) { |
if (programs[i].task != NULL) { |
thread_usleep(50000); |
program_ready(&programs[i]); |
} |
} |
if (!stdin) { |
while (1) { |
thread_sleep(1); |
/branches/network/kernel/generic/src/main/main.c |
---|
57,9 → 57,11 |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/tasklet.h> |
#include <main/kinit.h> |
#include <main/version.h> |
#include <console/kconsole.h> |
#include <console/console.h> |
#include <cpu.h> |
#include <align.h> |
#include <interrupt.h> |
77,16 → 79,16 |
#include <ipc/ipc.h> |
#include <macros.h> |
#include <adt/btree.h> |
#include <console/klog.h> |
#include <smp/smp.h> |
#include <ddi/ddi.h> |
/** Global configuration structure. */ |
config_t config; |
/** Initial user-space tasks */ |
init_t init = { |
0 |
.cnt = 0 |
}; |
/** Boot allocations. */ |
102,15 → 104,16 |
* the linker or the low level assembler code with |
* appropriate sizes and addresses. |
*/ |
uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel |
* is loaded. */ |
size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. |
*/ |
size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. |
*/ |
uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address. |
*/ |
/**< Virtual address of where the kernel is loaded. */ |
uintptr_t hardcoded_load_address = 0; |
/**< Size of the kernel code in bytes. */ |
size_t hardcoded_ktext_size = 0; |
/**< Size of the kernel data in bytes. */ |
size_t hardcoded_kdata_size = 0; |
/**< Lowest safe stack virtual address. */ |
uintptr_t stack_safe = 0; |
void main_bsp(void); |
void main_ap(void); |
129,9 → 132,11 |
/** Main kernel routine for bootstrap CPU. |
* |
* Initializes the kernel by bootstrap CPU. |
* This function passes control directly to |
* main_bsp_separated_stack(). |
* The code here still runs on the boot stack, which knows nothing about |
* preemption counts. Because of that, this function cannot directly call |
* functions that disable or enable preemption (e.g. spinlock_lock()). The |
* primary task of this function is to calculate address of a new stack and |
* switch to it. |
* |
* Assuming interrupts_disable(). |
* |
184,89 → 189,93 |
*/ |
void main_bsp_separated_stack(void) |
{ |
task_t *k; |
thread_t *t; |
count_t i; |
/* Keep this the first thing. */ |
the_initialize(THE); |
LOG(); |
version_print(); |
LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs |
"\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs, |
config.base, config.kernel_size, config.stack_base, |
config.stack_size); |
/* |
* kconsole data structures must be initialized very early |
* because other subsystems will register their respective |
* commands. |
*/ |
kconsole_init(); |
LOG_EXEC(kconsole_init()); |
/* |
* Exception handler initialization, before architecture |
* starts adding its own handlers |
*/ |
exc_init(); |
LOG_EXEC(exc_init()); |
/* |
* Memory management subsystems initialization. |
*/ |
arch_pre_mm_init(); |
frame_init(); |
*/ |
LOG_EXEC(arch_pre_mm_init()); |
LOG_EXEC(frame_init()); |
/* Initialize at least 1 memory segment big enough for slab to work. */ |
slab_cache_init(); |
btree_init(); |
as_init(); |
page_init(); |
tlb_init(); |
ddi_init(); |
arch_post_mm_init(); |
LOG_EXEC(slab_cache_init()); |
LOG_EXEC(btree_init()); |
LOG_EXEC(as_init()); |
LOG_EXEC(page_init()); |
LOG_EXEC(tlb_init()); |
LOG_EXEC(ddi_init()); |
LOG_EXEC(tasklet_init()); |
LOG_EXEC(arch_post_mm_init()); |
LOG_EXEC(arch_pre_smp_init()); |
LOG_EXEC(smp_init()); |
version_print(); |
printf("kernel: %.*p hardcoded_ktext_size=%zd KB, " |
"hardcoded_kdata_size=%zd KB\n", sizeof(uintptr_t) * 2, |
config.base, SIZE2KB(hardcoded_ktext_size), |
SIZE2KB(hardcoded_kdata_size)); |
printf("stack: %.*p size=%zd KB\n", sizeof(uintptr_t) * 2, |
config.stack_base, SIZE2KB(config.stack_size)); |
arch_pre_smp_init(); |
smp_init(); |
/* Slab must be initialized after we know the number of processors. */ |
slab_enable_cpucache(); |
LOG_EXEC(slab_enable_cpucache()); |
printf("Detected %zu CPU(s), %llu MB free memory\n", |
config.cpu_count, SIZE2MB(zone_total_size())); |
cpu_init(); |
printf("Detected %" PRIc " CPU(s), %" PRIu64" MiB free memory\n", |
config.cpu_count, SIZE2MB(zone_total_size())); |
calibrate_delay_loop(); |
clock_counter_init(); |
timeout_init(); |
scheduler_init(); |
task_init(); |
thread_init(); |
futex_init(); |
klog_init(); |
LOG_EXEC(cpu_init()); |
LOG_EXEC(calibrate_delay_loop()); |
LOG_EXEC(clock_counter_init()); |
LOG_EXEC(timeout_init()); |
LOG_EXEC(scheduler_init()); |
LOG_EXEC(task_init()); |
LOG_EXEC(thread_init()); |
LOG_EXEC(futex_init()); |
if (init.cnt > 0) { |
count_t i; |
for (i = 0; i < init.cnt; i++) |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, |
sizeof(uintptr_t) * 2, init.tasks[i].addr, i, |
printf("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc |
"].size=%#" PRIs "\n", i, init.tasks[i].addr, i, |
init.tasks[i].size); |
} else |
printf("No init binaries found\n"); |
ipc_init(); |
LOG_EXEC(ipc_init()); |
LOG_EXEC(klog_init()); |
/* |
* Create kernel task. |
*/ |
k = task_create(AS_KERNEL, "kernel"); |
if (!k) |
panic("can't create kernel task\n"); |
task_t *kernel = task_create(AS_KERNEL, "kernel"); |
if (!kernel) |
panic("Can't create kernel task\n"); |
/* |
* Create the first thread. |
*/ |
t = thread_create(kinit, NULL, k, 0, "kinit", true); |
if (!t) |
panic("can't create kinit thread\n"); |
thread_ready(t); |
thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit", |
true); |
if (!kinit_thread) |
panic("Can't create kinit thread\n"); |
LOG_EXEC(thread_ready(kinit_thread)); |
/* |
* This call to scheduler() will return to kinit, |
/branches/network/kernel/generic/src/proc/task.c |
---|
35,10 → 35,8 |
* @brief Task management. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <atomic.h> |
45,23 → 43,17 |
#include <synch/spinlock.h> |
#include <synch/waitq.h> |
#include <arch.h> |
#include <panic.h> |
#include <arch/barrier.h> |
#include <adt/avl.h> |
#include <adt/btree.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <security/cap.h> |
#include <memstr.h> |
#include <ipc/ipcrsc.h> |
#include <print.h> |
#include <lib/elf.h> |
#include <errno.h> |
#include <func.h> |
#include <syscall/copy.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Spinlock protecting the tasks_tree AVL tree. */ |
SPINLOCK_INITIALIZE(tasks_lock); |
79,11 → 71,7 |
static task_id_t task_counter = 0; |
/** Initialize tasks |
* |
* Initialize kernel tasks support. |
* |
*/ |
/** Initialize kernel tasks support. */ |
void task_init(void) |
{ |
TASK = NULL; |
91,7 → 79,8 |
} |
/* |
* The idea behind this walker is to remember a single task different from TASK. |
* The idea behind this walker is to remember a single task different from |
* TASK. |
*/ |
static bool task_done_walker(avltree_node_t *node, void *arg) |
{ |
106,9 → 95,7 |
return true; /* continue the walk */ |
} |
/** Kill all tasks except the current task. |
* |
*/ |
/** Kill all tasks except the current task. */ |
void task_done(void) |
{ |
task_t *t; |
128,7 → 115,7 |
interrupts_restore(ipl); |
#ifdef CONFIG_DEBUG |
printf("Killing task %llu\n", id); |
printf("Killing task %" PRIu64 "\n", id); |
#endif |
task_kill(id); |
thread_usleep(10000); |
140,15 → 127,13 |
} while (t != NULL); |
} |
/** Create new task |
/** Create new task with no threads. |
* |
* Create new task with no threads. |
* @param as Task's address space. |
* @param name Symbolic name. |
* |
* @param as Task's address space. |
* @param name Symbolic name. |
* @return New task's structure. |
* |
* @return New task's structure |
* |
*/ |
task_t *task_create(as_t *as, char *name) |
{ |
171,7 → 156,7 |
ta->capabilities = 0; |
ta->cycles = 0; |
ipc_answerbox_init(&ta->answerbox); |
ipc_answerbox_init(&ta->answerbox, ta); |
for (i = 0; i < IPC_MAX_PHONES; i++) |
ipc_phone_init(&ta->phones[i]); |
if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, |
179,7 → 164,7 |
ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
atomic_set(&ta->active_calls, 0); |
mutex_initialize(&ta->futexes_lock); |
mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); |
btree_create(&ta->futexes); |
ipl = interrupts_disable(); |
202,7 → 187,7 |
/** Destroy task. |
* |
* @param t Task to be destroyed. |
* @param t Task to be destroyed. |
*/ |
void task_destroy(task_t *t) |
{ |
233,73 → 218,18 |
TASK = NULL; |
} |
/** Create new task with 1 thread and run it |
* |
* @param program_addr Address of program executable image. |
* @param name Program name. |
* |
* @return Task of the running program or NULL on error. |
*/ |
task_t *task_run_program(void *program_addr, char *name) |
{ |
as_t *as; |
as_area_t *a; |
int rc; |
thread_t *t; |
task_t *task; |
uspace_arg_t *kernel_uarg; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) program_addr, as); |
if (rc != EE_OK) { |
as_destroy(as); |
return NULL; |
} |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = |
(void *) ((elf_header_t *) program_addr)->e_entry; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
task = task_create(as, name); |
ASSERT(task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, |
AS_AREA_ATTR_NONE, &anon_backend, NULL); |
/* |
* Create the main thread. |
*/ |
t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE, |
"uinit", false); |
ASSERT(t); |
thread_ready(t); |
return task; |
} |
/** Syscall for reading task ID from userspace. |
* |
* @param uspace_task_id Userspace address of 8-byte buffer where to store |
* current task ID. |
* @param uspace_task_id userspace address of 8-byte buffer |
* where to store current task ID. |
* |
* @return 0 on success or an error code from @ref errno.h. |
* @return Zero on success or an error code from @ref errno.h. |
*/ |
unative_t sys_task_get_id(task_id_t *uspace_task_id) |
{ |
/* |
* No need to acquire lock on TASK because taskid |
* remains constant for the lifespan of the task. |
* No need to acquire lock on TASK because taskid remains constant for |
* the lifespan of the task. |
*/ |
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, |
sizeof(TASK->taskid)); |
307,16 → 237,15 |
/** Find task structure corresponding to task ID. |
* |
* The tasks_lock must be already held by the caller of this function |
* and interrupts must be disabled. |
* The tasks_lock must be already held by the caller of this function and |
* interrupts must be disabled. |
* |
* @param id Task ID. |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task ID. |
* @return Task structure address or NULL if there is no such task |
* ID. |
*/ |
task_t *task_find_by_id(task_id_t id) |
{ |
avltree_node_t *node; |
task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; |
node = avltree_search(&tasks_tree, (avltree_key_t) id); |
327,11 → 256,13 |
/** Get accounting data of given task. |
* |
* Note that task lock of 't' must be already held and |
* interrupts must be already disabled. |
* Note that task lock of 't' must be already held and interrupts must be |
* already disabled. |
* |
* @param t Pointer to thread. |
* @param t Pointer to thread. |
* |
* @return Number of cycles used by the task and all its threads |
* so far. |
*/ |
uint64_t task_get_accounting(task_t *t) |
{ |
363,9 → 294,9 |
* This function is idempotent. |
* It signals all the task's threads to bail it out. |
* |
* @param id ID of the task to be killed. |
* @param id ID of the task to be killed. |
* |
* @return 0 on success or an error code from errno.h |
* @return Zero on success or an error code from errno.h. |
*/ |
int task_kill(task_id_t id) |
{ |
386,7 → 317,7 |
spinlock_unlock(&tasks_lock); |
/* |
* Interrupt all threads except ktaskclnp. |
* Interrupt all threads. |
*/ |
spinlock_lock(&ta->lock); |
for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
420,18 → 351,22 |
uint64_t cycles; |
char suffix; |
order(task_get_accounting(t), &cycles, &suffix); |
if (sizeof(void *) == 4) |
printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd", |
t->taskid, t->name, t->context, t, t->as, cycles, suffix, |
t->refcount, atomic_get(&t->active_calls)); |
else |
printf("%-6llu %-10s %-3ld %#18zx %#18zx %9llu%c %7zd %6zd", |
t->taskid, t->name, t->context, t, t->as, cycles, suffix, |
t->refcount, atomic_get(&t->active_calls)); |
#ifdef __32_BITS__ |
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64 |
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, |
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); |
#endif |
#ifdef __64_BITS__ |
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64 |
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, |
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); |
#endif |
for (j = 0; j < IPC_MAX_PHONES; j++) { |
if (t->phones[j].callee) |
printf(" %zd:%#zx", j, t->phones[j].callee); |
printf(" %d:%p", j, t->phones[j].callee); |
} |
printf("\n"); |
447,19 → 382,21 |
/* Messing with task structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
if (sizeof(void *) == 4) { |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ---------- ---------- " |
"---------- ------- ------ ------>\n"); |
} else { |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ------------------ ------------------ " |
"---------- ------- ------ ------>\n"); |
} |
#ifdef __32_BITS__ |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ---------- ---------- " |
"---------- ------- ------ ------>\n"); |
#endif |
#ifdef __64_BITS__ |
printf("taskid name ctx address as " |
"cycles threads calls callee\n"); |
printf("------ ---------- --- ------------------ ------------------ " |
"---------- ------- ------ ------>\n"); |
#endif |
avltree_walk(&tasks_tree, task_print_walker, NULL); |
spinlock_unlock(&tasks_lock); |
/branches/network/kernel/generic/src/proc/thread.c |
---|
67,9 → 67,13 |
#include <main/uinit.h> |
#include <syscall/copy.h> |
#include <errno.h> |
#include <console/klog.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Thread states */ |
char *thread_states[] = { |
"Invalid", |
87,7 → 91,7 |
*/ |
SPINLOCK_INITIALIZE(threads_lock); |
/** ALV tree of all threads. |
/** AVL tree of all threads. |
* |
* When a thread is found in the threads_tree AVL tree, it is guaranteed to |
* exist as long as the threads_lock is held. |
265,15 → 269,17 |
* |
* Create a new thread. |
* |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* @param uncounted Thread's accounting doesn't affect accumulated task |
* accounting. |
* @param func Thread's implementing function. |
* @param arg Thread's implementing function argument. |
* @param task Task to which the thread belongs. The caller must |
* guarantee that the task won't cease to exist during the |
* call. The task's lock may not be held. |
* @param flags Thread flags. |
* @param name Symbolic name. |
* @param uncounted Thread's accounting doesn't affect accumulated task |
* accounting. |
* |
* @return New thread's structure on success, NULL on failure. |
* @return New thread's structure on success, NULL on failure. |
* |
*/ |
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, |
287,8 → 293,7 |
return NULL; |
/* Not needed, but good for debugging */ |
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, |
0); |
memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
441,9 → 446,8 |
*/ |
if (THREAD->flags & THREAD_FLAG_USPACE) { |
ipc_cleanup(); |
futex_cleanup(); |
klog_printf("Cleanup of task %llu completed.", |
TASK->taskid); |
futex_cleanup(); |
LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); |
} |
} |
579,33 → 583,37 |
static bool thread_walker(avltree_node_t *node, void *arg) |
{ |
thread_t *t; |
t = avltree_get_instance(node, thread_t, threads_tree_node); |
thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); |
uint64_t cycles; |
char suffix; |
order(t->cycles, &cycles, &suffix); |
if (sizeof(void *) == 4) |
printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
else |
printf("%-6llu %-10s %#18zx %-8s %#18zx %-3ld %#18zx %#18zx %9llu%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
#ifdef __32_BITS__ |
printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
#endif |
#ifdef __64_BITS__ |
printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ", |
t->tid, t->name, t, thread_states[t->state], t->task, |
t->task->context, t->thread_code, t->kstack, cycles, suffix); |
#endif |
if (t->cpu) |
printf("%-4zd", t->cpu->id); |
printf("%-4u", t->cpu->id); |
else |
printf("none"); |
if (t->state == Sleeping) { |
if (sizeof(uintptr_t) == 4) |
printf(" %#10zx", t->sleep_queue); |
else |
printf(" %#18zx", t->sleep_queue); |
#ifdef __32_BITS__ |
printf(" %10p", t->sleep_queue); |
#endif |
#ifdef __64_BITS__ |
printf(" %18p", t->sleep_queue); |
#endif |
} |
printf("\n"); |
621,23 → 629,25 |
/* Messing with thread structures, avoid deadlock */ |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
if (sizeof(uintptr_t) == 4) { |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ---------- -------- ---------- " |
"--- ---------- ---------- ---------- ---- " |
"----------\n"); |
} else { |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ------------------ -------- ------------------ " |
"--- ------------------ ------------------ ---------- ---- " |
"------------------\n"); |
} |
#ifdef __32_BITS__ |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ---------- -------- ---------- " |
"--- ---------- ---------- ---------- ---- " |
"----------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("tid name address state task " |
"ctx code stack cycles cpu " |
"waitqueue\n"); |
printf("------ ---------- ------------------ -------- ------------------ " |
"--- ------------------ ------------------ ---------- ---- " |
"------------------\n"); |
#endif |
avltree_walk(&threads_tree, thread_walker, NULL); |
spinlock_unlock(&threads_lock); |
662,7 → 672,6 |
return node != NULL; |
} |
/** Update accounting of current thread. |
* |
* Note that thread_lock on THREAD must be already held and |
/branches/network/kernel/generic/src/proc/program.c |
---|
0,0 → 1,239 |
/* |
* Copyright (c) 2001-2004 Jakub Jermar |
* Copyright (c) 2008 Jiri Svoboda |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** |
* @file |
* @brief Running userspace programs. |
*/ |
#include <main/uinit.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/uarg.h> |
#include <mm/as.h> |
#include <mm/slab.h> |
#include <arch.h> |
#include <adt/list.h> |
#include <ipc/ipc.h> |
#include <ipc/ipcrsc.h> |
#include <security/cap.h> |
#include <lib/elf.h> |
#include <errno.h> |
#include <print.h> |
#include <syscall/copy.h> |
#include <proc/program.h> |
#ifndef LOADED_PROG_STACK_PAGES_NO |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** |
* Points to the binary image used as the program loader. All non-initial |
* tasks are created from this executable image. |
*/ |
void *program_loader = NULL; |
/** Create a program using an existing address space. |
* |
* @param as Address space containing a binary program image. |
* @param entry_addr Program entry-point address in program address space. |
* @param p Buffer for storing program information. |
*/ |
void program_create(as_t *as, uintptr_t entry_addr, program_t *p) |
{ |
as_area_t *a; |
uspace_arg_t *kernel_uarg; |
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
kernel_uarg->uspace_entry = (void *) entry_addr; |
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
kernel_uarg->uspace_thread_function = NULL; |
kernel_uarg->uspace_thread_arg = NULL; |
kernel_uarg->uspace_uarg = NULL; |
p->task = task_create(as, "app"); |
ASSERT(p->task); |
/* |
* Create the data as_area. |
*/ |
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, |
AS_AREA_ATTR_NONE, &anon_backend, NULL); |
/* |
* Create the main thread. |
*/ |
p->main_thread = thread_create(uinit, kernel_uarg, p->task, |
THREAD_FLAG_USPACE, "uinit", false); |
ASSERT(p->main_thread); |
} |
/** Parse an executable image in the kernel memory. |
* |
* If the image belongs to a program loader, it is registered as such, |
* (and *task is set to NULL). Otherwise a task is created from the |
* executable image. The task is returned in *task. |
* |
* @param image_addr Address of an executable program image. |
* @param p Buffer for storing program info. If image_addr |
* points to a loader image, p->task will be set to |
* NULL and EOK will be returned. |
* |
* @return EOK on success or negative error code. |
*/ |
int program_create_from_image(void *image_addr, program_t *p) |
{ |
as_t *as; |
unsigned int rc; |
as = as_create(0); |
ASSERT(as); |
rc = elf_load((elf_header_t *) image_addr, as, 0); |
if (rc != EE_OK) { |
as_destroy(as); |
p->task = NULL; |
p->main_thread = NULL; |
if (rc != EE_LOADER) |
return ENOTSUP; |
/* Register image as the program loader */ |
ASSERT(program_loader == NULL); |
program_loader = image_addr; |
printf("Registered program loader at 0x%" PRIp "\n", |
image_addr); |
return EOK; |
} |
program_create(as, ((elf_header_t *) image_addr)->e_entry, p); |
return EOK; |
} |
/** Create a task from the program loader image. |
* |
* @param p Buffer for storing program info. |
* @return EOK on success or negative error code. |
*/ |
int program_create_loader(program_t *p) |
{ |
as_t *as; |
unsigned int rc; |
void *loader; |
as = as_create(0); |
ASSERT(as); |
loader = program_loader; |
if (!loader) { |
printf("Cannot spawn loader as none was registered\n"); |
return ENOENT; |
} |
rc = elf_load((elf_header_t *) program_loader, as, ELD_F_LOADER); |
if (rc != EE_OK) { |
as_destroy(as); |
return ENOENT; |
} |
program_create(as, ((elf_header_t *) program_loader)->e_entry, p); |
return EOK; |
} |
/** Make program ready. |
* |
* Switch program's main thread to the ready state. |
* |
* @param p Program to make ready. |
*/ |
void program_ready(program_t *p) |
{ |
thread_ready(p->main_thread); |
} |
/** Syscall for creating a new loader instance from userspace. |
* |
* Creates a new task from the program loader image, connects a phone |
* to it and stores the phone id into the provided buffer. |
* |
* @param uspace_phone_id Userspace address where to store the phone id. |
* |
* @return 0 on success or an error code from @ref errno.h. |
*/ |
unative_t sys_program_spawn_loader(int *uspace_phone_id) |
{ |
program_t p; |
int fake_id; |
int rc; |
int phone_id; |
fake_id = 0; |
/* Before we even try creating the task, see if we can write the id */ |
rc = (unative_t) copy_to_uspace(uspace_phone_id, &fake_id, |
sizeof(fake_id)); |
if (rc != 0) |
return rc; |
phone_id = phone_alloc(); |
if (phone_id < 0) |
return ELIMIT; |
rc = program_create_loader(&p); |
if (rc != 0) |
return rc; |
phone_connect(phone_id, &p.task->answerbox); |
/* No need to aquire lock before task_ready() */ |
rc = (unative_t) copy_to_uspace(uspace_phone_id, &phone_id, |
sizeof(phone_id)); |
if (rc != 0) { |
/* Ooops */ |
ipc_phone_hangup(&TASK->phones[phone_id]); |
task_kill(p.task->taskid); |
return rc; |
} |
// FIXME: control the capabilities |
cap_set(p.task, cap_get(TASK)); |
program_ready(&p); |
return EOK; |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/proc/scheduler.c |
---|
451,8 → 451,8 |
/* |
* Entering state is unexpected. |
*/ |
panic("tid%llu: unexpected state %s\n", THREAD->tid, |
thread_states[THREAD->state]); |
panic("tid%" PRIu64 ": unexpected state %s\n", |
THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
504,9 → 504,9 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, |
atomic_get(&CPU->nrdy)); |
printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 |
", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, |
THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
640,9 → 640,9 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, " |
"avg=%nd\n", CPU->id, t->tid, CPU->id, |
atomic_get(&CPU->nrdy), |
printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " |
"nrdy=%ld, avg=%ld\n", CPU->id, t->tid, |
CPU->id, atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= THREAD_FLAG_STOLEN; |
708,7 → 708,7 |
continue; |
spinlock_lock(&cpus[cpu].lock); |
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", |
printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n", |
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), |
cpus[cpu].needs_relink); |
719,11 → 719,11 |
spinlock_unlock(&r->lock); |
continue; |
} |
printf("\trq[%d]: ", i); |
printf("\trq[%u]: ", i); |
for (cur = r->rq_head.next; cur != &r->rq_head; |
cur = cur->next) { |
t = list_get_instance(cur, thread_t, rq_link); |
printf("%llu(%s) ", t->tid, |
printf("%" PRIu64 "(%s) ", t->tid, |
thread_states[t->state]); |
} |
printf("\n"); |
/branches/network/kernel/generic/src/proc/tasklet.c |
---|
0,0 → 1,64 |
/* |
* Copyright (c) 2007 Jan Hudecek |
* Copyright (c) 2008 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** @file tasklet.c |
* @brief Tasklet implementation |
*/ |
#include <proc/tasklet.h> |
#include <synch/spinlock.h> |
#include <mm/slab.h> |
#include <config.h> |
/** Spinlock protecting list of tasklets */ |
SPINLOCK_INITIALIZE(tasklet_lock); |
/** Array of tasklet lists for every CPU */ |
tasklet_descriptor_t **tasklet_list; |
void tasklet_init(void) |
{ |
unsigned int i; |
tasklet_list = malloc(sizeof(tasklet_descriptor_t *) * config.cpu_count, 0); |
if (!tasklet_list) |
panic("Error initializing tasklets"); |
for (i = 0; i < config.cpu_count; i++) |
tasklet_list[i] = NULL; |
spinlock_initialize(&tasklet_lock, "tasklet_lock"); |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/lib/objc_ext.c |
---|
File deleted |
/branches/network/kernel/generic/src/lib/objc.c |
---|
File deleted |
/branches/network/kernel/generic/src/lib/memstr.c |
---|
59,7 → 59,7 |
*/ |
void *_memcpy(void * dst, const void *src, size_t cnt) |
{ |
int i, j; |
unsigned int i, j; |
if (ALIGN_UP((uintptr_t) src, sizeof(unative_t)) != (uintptr_t) src || |
ALIGN_UP((uintptr_t) dst, sizeof(unative_t)) != (uintptr_t) dst) { |
67,14 → 67,14 |
((uint8_t *) dst)[i] = ((uint8_t *) src)[i]; |
} else { |
for (i = 0; i < cnt/sizeof(unative_t); i++) |
for (i = 0; i < cnt / sizeof(unative_t); i++) |
((unative_t *) dst)[i] = ((unative_t *) src)[i]; |
for (j = 0; j < cnt%sizeof(unative_t); j++) |
for (j = 0; j < cnt % sizeof(unative_t); j++) |
((uint8_t *)(((unative_t *) dst) + i))[j] = ((uint8_t *)(((unative_t *) src) + i))[j]; |
} |
return (char *) src; |
return (char *) dst; |
} |
/** Fill block of memory |
87,9 → 87,9 |
* @param x Value to fill. |
* |
*/ |
void _memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
void _memsetb(void *dst, size_t cnt, uint8_t x) |
{ |
int i; |
unsigned int i; |
uint8_t *p = (uint8_t *) dst; |
for (i = 0; i < cnt; i++) |
106,9 → 106,9 |
* @param x Value to fill. |
* |
*/ |
void _memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
void _memsetw(void *dst, size_t cnt, uint16_t x) |
{ |
int i; |
unsigned int i; |
uint16_t *p = (uint16_t *) dst; |
for (i = 0; i < cnt; i++) |
/branches/network/kernel/generic/src/lib/rd.c |
---|
38,11 → 38,10 |
*/ |
#include <lib/rd.h> |
#include <arch/byteorder.h> |
#include <byteorder.h> |
#include <mm/frame.h> |
#include <sysinfo/sysinfo.h> |
#include <ddi/ddi.h> |
#include <print.h> |
#include <align.h> |
static parea_t rd_parea; /**< Physical memory area for rd. */ |
49,14 → 48,14 |
/** |
* RAM disk initialization routine. At this point, the RAM disk memory is shared |
* and information about the share is provided as sysinfo values to the userspace |
* tasks. |
* and information about the share is provided as sysinfo values to the |
* userspace tasks. |
*/ |
int init_rd(rd_header * header, size_t size) |
int init_rd(rd_header_t *header, size_t size) |
{ |
/* Identify RAM disk */ |
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || |
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) |
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) |
return RE_INVALID; |
/* Identify version */ |
80,10 → 79,7 |
if ((hsize % FRAME_SIZE) || (dsize % FRAME_SIZE)) |
return RE_UNSUPPORTED; |
if (dsize % FRAME_SIZE) |
return RE_UNSUPPORTED; |
if (hsize > size) |
return RE_INVALID; |
90,7 → 86,8 |
if ((uint64_t) hsize + dsize > size) |
dsize = size - hsize; |
rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize), FRAME_SIZE); |
rd_parea.pbase = ALIGN_DOWN((uintptr_t) KA2PA((void *) header + hsize), |
FRAME_SIZE); |
rd_parea.vbase = (uintptr_t) ((void *) header + hsize); |
rd_parea.frames = SIZE2FRAMES(dsize); |
rd_parea.cacheable = true; |
99,8 → 96,8 |
sysinfo_set_item_val("rd", NULL, true); |
sysinfo_set_item_val("rd.header_size", NULL, hsize); |
sysinfo_set_item_val("rd.size", NULL, dsize); |
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) |
KA2PA((void *) header + hsize)); |
sysinfo_set_item_val("rd.address.physical", NULL, |
(unative_t) KA2PA((void *) header + hsize)); |
return RE_OK; |
} |
/branches/network/kernel/generic/src/lib/elf.c |
---|
57,7 → 57,7 |
}; |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, |
as_t *as); |
as_t *as, int flags); |
static int section_header(elf_section_header_t *entry, elf_header_t *elf, |
as_t *as); |
static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, |
67,9 → 67,10 |
* |
* @param header Pointer to ELF header in memory |
* @param as Created and properly mapped address space |
* @param flags A combination of ELD_F_* |
* @return EE_OK on success |
*/ |
int elf_load(elf_header_t *header, as_t * as) |
unsigned int elf_load(elf_header_t *header, as_t * as, int flags) |
{ |
int i, rc; |
100,6 → 101,10 |
if (header->e_type != ET_EXEC) |
return EE_UNSUPPORTED; |
/* Check if the ELF image starts on a page boundary */ |
if (ALIGN_UP((uintptr_t)header, PAGE_SIZE) != (uintptr_t)header) |
return EE_UNSUPPORTED; |
/* Walk through all segment headers and process them. */ |
for (i = 0; i < header->e_phnum; i++) { |
elf_segment_header_t *seghdr; |
106,7 → 111,7 |
seghdr = &((elf_segment_header_t *)(((uint8_t *) header) + |
header->e_phoff))[i]; |
rc = segment_header(seghdr, header, as); |
rc = segment_header(seghdr, header, as, flags); |
if (rc != EE_OK) |
return rc; |
} |
131,7 → 136,7 |
* |
* @return NULL terminated description of error. |
*/ |
char *elf_error(int rc) |
char *elf_error(unsigned int rc) |
{ |
ASSERT(rc < sizeof(error_codes) / sizeof(char *)); |
147,8 → 152,10 |
* @return EE_OK on success, error code otherwise. |
*/ |
static int segment_header(elf_segment_header_t *entry, elf_header_t *elf, |
as_t *as) |
as_t *as, int flags) |
{ |
char *interp; |
switch (entry->p_type) { |
case PT_NULL: |
case PT_PHDR: |
158,6 → 165,16 |
break; |
case PT_DYNAMIC: |
case PT_INTERP: |
interp = (char *)elf + entry->p_offset; |
/* FIXME */ |
/*if (memcmp((uintptr_t)interp, (uintptr_t)ELF_INTERP_ZSTR, |
ELF_INTERP_ZLEN) != 0) { |
return EE_UNSUPPORTED; |
}*/ |
if ((flags & ELD_F_LOADER) == 0) { |
return EE_LOADER; |
} |
break; |
case PT_SHLIB: |
case PT_NOTE: |
case PT_LOPROC: |
182,6 → 199,8 |
as_area_t *a; |
int flags = 0; |
mem_backend_data_t backend_data; |
uintptr_t base; |
size_t mem_sz; |
backend_data.elf = elf; |
backend_data.segment = entry; |
201,13 → 220,14 |
flags |= AS_AREA_READ; |
flags |= AS_AREA_CACHEABLE; |
/* |
* Check if the virtual address starts on page boundary. |
/* |
* Align vaddr down, inserting a little "gap" at the beginning. |
* Adjust area size, so that its end remains in place. |
*/ |
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
return EE_UNSUPPORTED; |
base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE); |
mem_sz = entry->p_memsz + (entry->p_vaddr - base); |
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, |
a = as_area_create(as, flags, mem_sz, base, |
AS_AREA_ATTR_NONE, &elf_backend, &backend_data); |
if (!a) |
return EE_MEMORY; |
/branches/network/kernel/generic/src/lib/func.c |
---|
73,7 → 73,7 |
} |
#endif |
if (CPU) |
printf("cpu%d: halted\n", CPU->id); |
printf("cpu%u: halted\n", CPU->id); |
else |
printf("cpu: halted\n"); |
cpu_halt(); |
139,9 → 139,9 |
*/ |
int strncmp(const char *src, const char *dst, size_t len) |
{ |
int i; |
unsigned int i; |
for (i = 0; *src && *dst && i < len; src++, dst++, i++) { |
for (i = 0; (*src) && (*dst) && (i < len); src++, dst++, i++) { |
if (*src < *dst) |
return -1; |
if (*src > *dst) |
168,7 → 168,7 |
*/ |
void strncpy(char *dest, const char *src, size_t len) |
{ |
int i; |
unsigned int i; |
for (i = 0; i < len; i++) { |
if (!(dest[i] = src[i])) |
return; |
/branches/network/kernel/generic/src/lib/sort.c |
---|
96,14 → 96,17 |
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot) |
{ |
if (n > 4) { |
int i = 0, j = n - 1; |
unsigned int i = 0, j = n - 1; |
memcpy(pivot, data, e_size); |
while (1) { |
while ((cmp(data + i * e_size, pivot) < 0) && i < n) i++; |
while ((cmp(data + j * e_size, pivot) >=0) && j > 0) j--; |
if (i<j) { |
while ((cmp(data + i * e_size, pivot) < 0) && (i < n)) |
i++; |
while ((cmp(data + j * e_size, pivot) >= 0) && (j > 0)) |
j--; |
if (i < j) { |
memcpy(tmp, data + i * e_size, e_size); |
memcpy(data + i * e_size, data + j * e_size, e_size); |
memcpy(data + j * e_size, tmp, e_size); |
/branches/network/kernel/generic/src/sysinfo/sysinfo.c |
---|
177,7 → 177,7 |
sysinfo_item_t *item = sysinfo_create_path(name, root); |
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */ |
item->val.val=val; |
item->val.val = val; |
item->val_type = SYSINFO_VAL_VAL; |
} |
} |
192,7 → 192,7 |
sysinfo_item_t *item = sysinfo_create_path(name, root); |
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */ |
item->val.fn=fn; |
item->val.fn = fn; |
item->val_type = SYSINFO_VAL_FUNCTION; |
} |
} |
244,7 → 244,7 |
break; |
} |
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, |
printf("%s %s val:%" PRIun "(%" PRIxn ") sub:%s\n", root->name, vtype, val, |
val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? |
"NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? |
"TAB" : "FUN")); |
281,10 → 281,15 |
return ret; |
} |
#define SYSINFO_MAX_LEN 1024 |
unative_t sys_sysinfo_valid(unative_t ptr, unative_t len) |
{ |
char *str; |
sysinfo_rettype_t ret = {0, 0}; |
if (len > SYSINFO_MAX_LEN) |
return ret.valid; |
str = malloc(len + 1, 0); |
ASSERT(str); |
299,6 → 304,9 |
{ |
char *str; |
sysinfo_rettype_t ret = {0, 0}; |
if (len > SYSINFO_MAX_LEN) |
return ret.val; |
str = malloc(len + 1, 0); |
ASSERT(str); |
/branches/network/kernel/generic/src/synch/smc.c |
---|
0,0 → 1,60 |
/* |
* Copyright (c) 2008 Jiri Svoboda |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** |
* @file |
* @brief Self-modifying code barriers. |
*/ |
#include <arch.h> |
#include <macros.h> |
#include <errno.h> |
#include <arch/barrier.h> |
#include <synch/smc.h> |
unative_t sys_smc_coherence(uintptr_t va, size_t size) |
{ |
if (overlaps(va, size, NULL, PAGE_SIZE)) |
return EINVAL; |
if (!KERNEL_ADDRESS_SPACE_SHADOWED) { |
if (overlaps(va, size, KERNEL_ADDRESS_SPACE_START, |
KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START)) |
return EINVAL; |
} |
smc_coherence_block((void *) va, size); |
return 0; |
} |
/** @} |
*/ |
/branches/network/kernel/generic/src/synch/rwlock.c |
---|
82,7 → 82,7 |
*/ |
void rwlock_initialize(rwlock_t *rwl) { |
spinlock_initialize(&rwl->lock, "rwlock_t"); |
mutex_initialize(&rwl->exclusive); |
mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE); |
rwl->readers_in = 0; |
} |
231,7 → 231,7 |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
panic("_mutex_lock_timeout() == ESYNCH_OK_ATOMIC\n"); |
break; |
default: |
panic("invalid ESYNCH\n"); |
/branches/network/kernel/generic/src/synch/mutex.c |
---|
38,42 → 38,54 |
#include <synch/mutex.h> |
#include <synch/semaphore.h> |
#include <synch/synch.h> |
#include <debug.h> |
/** Initialize mutex |
/** Initialize mutex. |
* |
* Initialize mutex. |
* |
* @param mtx Mutex. |
* @param mtx Mutex. |
* @param type Type of the mutex. |
*/ |
void mutex_initialize(mutex_t *mtx) |
void mutex_initialize(mutex_t *mtx, mutex_type_t type) |
{ |
mtx->type = type; |
semaphore_initialize(&mtx->sem, 1); |
} |
/** Acquire mutex |
/** Acquire mutex. |
* |
* Acquire mutex. |
* Timeout mode and non-blocking mode can be requested. |
* |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* @param mtx Mutex. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of operation. |
* |
* For exact description of possible combinations of |
* usec and flags, see comment for waitq_sleep_timeout(). |
* |
* @return See comment for waitq_sleep_timeout(). |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags) |
{ |
return _semaphore_down_timeout(&mtx->sem, usec, flags); |
int rc; |
if (mtx->type == MUTEX_PASSIVE) { |
rc = _semaphore_down_timeout(&mtx->sem, usec, flags); |
} else { |
ASSERT(mtx->type == MUTEX_ACTIVE); |
ASSERT(usec == SYNCH_NO_TIMEOUT); |
ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); |
do { |
rc = semaphore_trydown(&mtx->sem); |
} while (SYNCH_FAILED(rc) && |
!(flags & SYNCH_FLAGS_NON_BLOCKING)); |
} |
return rc; |
} |
/** Release mutex |
/** Release mutex. |
* |
* Release mutex. |
* |
* @param mtx Mutex. |
* @param mtx Mutex. |
*/ |
void mutex_unlock(mutex_t *mtx) |
{ |
/branches/network/kernel/generic/src/synch/condvar.c |
---|
43,7 → 43,7 |
/** Initialize condition variable. |
* |
* @param cv Condition variable. |
* @param cv Condition variable. |
*/ |
void condvar_initialize(condvar_t *cv) |
{ |
50,11 → 50,10 |
waitq_initialize(&cv->wq); |
} |
/** |
* Signal the condition has become true |
* to the first waiting thread by waking it up. |
/** Signal the condition has become true to the first waiting thread by waking |
* it up. |
* |
* @param cv Condition variable. |
* @param cv Condition variable. |
*/ |
void condvar_signal(condvar_t *cv) |
{ |
61,11 → 60,10 |
waitq_wakeup(&cv->wq, WAKEUP_FIRST); |
} |
/** |
* Signal the condition has become true |
* to all waiting threads by waking them up. |
/** Signal the condition has become true to all waiting threads by waking |
* them up. |
* |
* @param cv Condition variable. |
* @param cv Condition variable. |
*/ |
void condvar_broadcast(condvar_t *cv) |
{ |
74,17 → 72,17 |
/** Wait for the condition becoming true. |
* |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param flags Select mode of operation. |
* @param cv Condition variable. |
* @param mtx Mutex. |
* @param usec Timeout value in microseconds. |
* @param flags Select mode of operation. |
* |
* For exact description of meaning of possible combinations |
* of usec and flags, see comment for waitq_sleep_timeout(). |
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, |
* ESYNCH_WOULD_BLOCK is always returned. |
* For exact description of meaning of possible combinations of usec and flags, |
* see comment for waitq_sleep_timeout(). Note that when |
* SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always |
* returned. |
* |
* @return See comment for waitq_sleep_timeout(). |
* @return See comment for waitq_sleep_timeout(). |
*/ |
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) |
{ |
/branches/network/kernel/generic/src/synch/spinlock.c |
---|
106,9 → 106,8 |
continue; |
#endif |
if (i++ > DEADLOCK_THRESHOLD) { |
printf("cpu%d: looping on spinlock %.*p:%s, " |
"caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl, |
sl->name, sizeof(uintptr_t) * 2, CALLER); |
printf("cpu%u: looping on spinlock %" PRIp ":%s, caller=%" PRIp, |
CPU->id, sl, sl->name, CALLER); |
symbol = get_symtab_entry(CALLER); |
if (symbol) |
printf("(%s)", symbol); |
119,7 → 118,7 |
} |
if (deadlock_reported) |
printf("cpu%d: not deadlocked\n", CPU->id); |
printf("cpu%u: not deadlocked\n", CPU->id); |
/* |
* Prevent critical section code from bleeding out this way up. |
/branches/network/kernel/generic/src/synch/waitq.c |
---|
54,13 → 54,13 |
#include <context.h> |
#include <adt/list.h> |
static void waitq_timeouted_sleep(void *data); |
static void waitq_sleep_timed_out(void *data); |
/** Initialize wait queue |
* |
* Initialize wait queue. |
* |
* @param wq Pointer to wait queue to be initialized. |
* @param wq Pointer to wait queue to be initialized. |
*/ |
void waitq_initialize(waitq_t *wq) |
{ |
71,7 → 71,7 |
/** Handle timeout during waitq_sleep_timeout() call |
* |
* This routine is called when waitq_sleep_timeout() timeouts. |
* This routine is called when waitq_sleep_timeout() times out. |
* Interrupts are disabled. |
* |
* It is supposed to try to remove 'its' thread from the wait queue; |
79,9 → 79,9 |
* overlap. In that case it behaves just as though there was no |
* timeout at all. |
* |
* @param data Pointer to the thread that called waitq_sleep_timeout(). |
* @param data Pointer to the thread that called waitq_sleep_timeout(). |
*/ |
void waitq_timeouted_sleep(void *data) |
void waitq_sleep_timed_out(void *data) |
{ |
thread_t *t = (thread_t *) data; |
waitq_t *wq; |
123,7 → 123,7 |
* This routine attempts to interrupt a thread from its sleep in a waitqueue. |
* If the thread is not found sleeping, no action is taken. |
* |
* @param t Thread to be interrupted. |
* @param t Thread to be interrupted. |
*/ |
void waitq_interrupt_sleep(thread_t *t) |
{ |
183,9 → 183,9 |
* This function is really basic in that other functions as waitq_sleep() |
* and all the *_timeout() functions use it. |
* |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of the sleep. |
* @param wq Pointer to wait queue. |
* @param usec Timeout in microseconds. |
* @param flags Specify mode of the sleep. |
* |
* The sleep can be interrupted only if the |
* SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. |
200,22 → 200,23 |
* If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the |
* call will immediately return, reporting either success or failure. |
* |
* @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, |
* ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. |
* @return Returns one of ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, |
* ESYNCH_INTERRUPTED, ESYNCH_OK_ATOMIC and |
* ESYNCH_OK_BLOCKED. |
* |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the |
* call there was no pending wakeup. |
* @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of |
* the call there was no pending wakeup. |
* |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* @li ESYNCH_TIMEOUT means that the sleep timed out. |
* |
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
* @li ESYNCH_INTERRUPTED means that somebody interrupted the sleeping thread. |
* |
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
* a pending wakeup at the time of the call. The caller was not put |
* asleep at all. |
* @li ESYNCH_OK_ATOMIC means that the sleep succeeded and that there was |
* a pending wakeup at the time of the call. The caller was not put |
* asleep at all. |
* |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
* @li ESYNCH_OK_BLOCKED means that the sleep succeeded; the full sleep was |
* attempted. |
*/ |
int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags) |
{ |
233,9 → 234,9 |
* This function will return holding the lock of the wait queue |
* and interrupts disabled. |
* |
* @param wq Wait queue. |
* @param wq Wait queue. |
* |
* @return Interrupt level as it existed on entry to this function. |
* @return Interrupt level as it existed on entry to this function. |
*/ |
ipl_t waitq_sleep_prepare(waitq_t *wq) |
{ |
271,9 → 272,9 |
* to the call to waitq_sleep_prepare(). If necessary, the wait queue |
* lock is released. |
* |
* @param wq Wait queue. |
* @param rc Return code of waitq_sleep_timeout_unsafe(). |
* @param ipl Interrupt level returned by waitq_sleep_prepare(). |
* @param wq Wait queue. |
* @param rc Return code of waitq_sleep_timeout_unsafe(). |
* @param ipl Interrupt level returned by waitq_sleep_prepare(). |
*/ |
void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) |
{ |
291,14 → 292,14 |
/** Internal implementation of waitq_sleep_timeout(). |
* |
* This function implements logic of sleeping in a wait queue. |
* This call must be preceeded by a call to waitq_sleep_prepare() |
* and followed by a call to waitq_slee_finish(). |
* This call must be preceded by a call to waitq_sleep_prepare() |
* and followed by a call to waitq_sleep_finish(). |
* |
* @param wq See waitq_sleep_timeout(). |
* @param usec See waitq_sleep_timeout(). |
* @param flags See waitq_sleep_timeout(). |
* @param wq See waitq_sleep_timeout(). |
* @param usec See waitq_sleep_timeout(). |
* @param flags See waitq_sleep_timeout(). |
* |
* @return See waitq_sleep_timeout(). |
* @return See waitq_sleep_timeout(). |
*/ |
int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags) |
{ |
355,7 → 356,7 |
} |
THREAD->timeout_pending = true; |
timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, |
waitq_timeouted_sleep, THREAD); |
waitq_sleep_timed_out, THREAD); |
} |
list_append(&THREAD->wq_link, &wq->head); |
383,8 → 384,8 |
* Besides its 'normal' wakeup operation, it attempts to unregister possible |
* timeout. |
* |
* @param wq Pointer to wait queue. |
* @param mode Wakeup mode. |
* @param wq Pointer to wait queue. |
* @param mode Wakeup mode. |
*/ |
void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) |
{ |
404,12 → 405,12 |
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It |
* assumes wq->lock is already locked and interrupts are already disabled. |
* |
* @param wq Pointer to wait queue. |
* @param mode If mode is WAKEUP_FIRST, then the longest waiting thread, |
* if any, is woken up. If mode is WAKEUP_ALL, then all |
* waiting threads, if any, are woken up. If there are no |
* waiting threads to be woken up, the missed wakeup is |
* recorded in the wait queue. |
* @param wq Pointer to wait queue. |
* @param mode If mode is WAKEUP_FIRST, then the longest waiting |
* thread, if any, is woken up. If mode is WAKEUP_ALL, then |
* all waiting threads, if any, are woken up. If there are |
* no waiting threads to be woken up, the missed wakeup is |
* recorded in the wait queue. |
*/ |
void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) |
{ |
431,7 → 432,7 |
* Lock the thread prior to removing it from the wq. |
* This is not necessary because of mutual exclusion |
* (the link belongs to the wait queue), but because |
* of synchronization with waitq_timeouted_sleep() |
* of synchronization with waitq_sleep_timed_out() |
* and thread_interrupt_sleep(). |
* |
* In order for these two functions to work, the following |
/branches/network/kernel/generic/src/synch/futex.c |
---|
324,7 → 324,7 |
for (cur = TASK->futexes.leaf_head.next; |
cur != &TASK->futexes.leaf_head; cur = cur->next) { |
btree_node_t *node; |
int i; |
unsigned int i; |
node = list_get_instance(cur, btree_node_t, leaf_link); |
for (i = 0; i < node->keys; i++) { |
/branches/network/kernel/generic/src/syscall/syscall.c |
---|
38,6 → 38,7 |
#include <syscall/syscall.h> |
#include <proc/thread.h> |
#include <proc/task.h> |
#include <proc/program.h> |
#include <mm/as.h> |
#include <print.h> |
#include <putchar.h> |
46,19 → 47,19 |
#include <debug.h> |
#include <ipc/sysipc.h> |
#include <synch/futex.h> |
#include <synch/smc.h> |
#include <ddi/ddi.h> |
#include <security/cap.h> |
#include <syscall/copy.h> |
#include <sysinfo/sysinfo.h> |
#include <console/console.h> |
#include <console/klog.h> |
/** Print using kernel facility |
* |
* Some simulators can print only through kernel. Userspace can use |
* this syscall to facilitate it. |
* Print to kernel log. |
* |
*/ |
static unative_t sys_io(int fd, const void * buf, size_t count) |
static unative_t sys_klog(int fd, const void * buf, size_t count) |
{ |
size_t i; |
char *data; |
66,20 → 67,23 |
if (count > PAGE_SIZE) |
return ELIMIT; |
data = (char *) malloc(count, 0); |
if (!data) |
return ENOMEM; |
rc = copy_from_uspace(data, buf, count); |
if (rc) { |
if (count > 0) { |
data = (char *) malloc(count, 0); |
if (!data) |
return ENOMEM; |
rc = copy_from_uspace(data, buf, count); |
if (rc) { |
free(data); |
return rc; |
} |
for (i = 0; i < count; i++) |
putchar(data[i]); |
free(data); |
return rc; |
} |
for (i = 0; i < count; i++) |
putchar(data[i]); |
free(data); |
} else |
klog_update(); |
return count; |
} |
100,8 → 104,7 |
if (id < SYSCALL_END) |
rc = syscall_table[id](a1, a2, a3, a4, a5, a6); |
else { |
klog_printf("TASK %llu: Unknown syscall id %llx", TASK->taskid, |
id); |
printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id); |
task_kill(TASK->taskid); |
thread_exit(); |
} |
113,7 → 116,7 |
} |
syshandler_t syscall_table[SYSCALL_END] = { |
(syshandler_t) sys_io, |
(syshandler_t) sys_klog, |
(syshandler_t) sys_tls_set, |
/* Thread and task related syscalls. */ |
120,15 → 123,19 |
(syshandler_t) sys_thread_create, |
(syshandler_t) sys_thread_exit, |
(syshandler_t) sys_thread_get_id, |
(syshandler_t) sys_task_get_id, |
(syshandler_t) sys_program_spawn_loader, |
/* Synchronization related syscalls. */ |
(syshandler_t) sys_futex_sleep_timeout, |
(syshandler_t) sys_futex_wakeup, |
(syshandler_t) sys_smc_coherence, |
/* Address space related syscalls. */ |
(syshandler_t) sys_as_area_create, |
(syshandler_t) sys_as_area_resize, |
(syshandler_t) sys_as_area_change_flags, |
(syshandler_t) sys_as_area_destroy, |
/* IPC related syscalls. */ |
/branches/network/kernel/generic/src/console/klog.c |
---|
File deleted |
/branches/network/kernel/generic/src/console/kconsole.c |
---|
169,7 → 169,7 |
} |
/** Try to find a command beginning with prefix */ |
static const char * cmdtab_search_one(const char *name,link_t **startpos) |
static const char *cmdtab_search_one(const char *name,link_t **startpos) |
{ |
size_t namelen = strlen(name); |
const char *curname; |
203,7 → 203,7 |
*/ |
static int cmdtab_compl(char *name) |
{ |
static char output[MAX_SYMBOL_NAME+1]; |
static char output[MAX_SYMBOL_NAME + 1]; |
link_t *startpos = NULL; |
const char *foundtxt; |
int found = 0; |
213,7 → 213,7 |
while ((foundtxt = cmdtab_search_one(name, &startpos))) { |
startpos = startpos->next; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
strncpy(output, foundtxt, strlen(foundtxt) + 1); |
else { |
for (i = 0; output[i] && foundtxt[i] && |
output[i] == foundtxt[i]; i++) |
240,11 → 240,11 |
} |
static char * clever_readline(const char *prompt, chardev_t *input) |
static char *clever_readline(const char *prompt, chardev_t *input) |
{ |
static int histposition = 0; |
static char tmp[MAX_CMDLINE+1]; |
static char tmp[MAX_CMDLINE + 1]; |
int curlen = 0, position = 0; |
char *current = history[histposition]; |
int i; |
257,7 → 257,8 |
if (c == '\n') { |
putchar(c); |
break; |
} if (c == '\b') { /* Backspace */ |
} |
if (c == '\b') { /* Backspace */ |
if (position == 0) |
continue; |
for (i = position; i < curlen; i++) |
543,7 → 544,8 |
buf = (char *) cmd->argv[i].buffer; |
strncpy(buf, (const char *) &cmdline[start], |
min((end - start) + 2, cmd->argv[i].len)); |
buf[min((end - start) + 1, cmd->argv[i].len - 1)] = '\0'; |
buf[min((end - start) + 1, cmd->argv[i].len - 1)] = |
'\0'; |
break; |
case ARG_TYPE_INT: |
if (parse_int_arg(cmdline + start, end - start + 1, |
560,8 → 562,8 |
'\0'; |
cmd->argv[i].intval = (unative_t) buf; |
cmd->argv[i].vartype = ARG_TYPE_STRING; |
} else if (!parse_int_arg(cmdline + start, end - start + 1, |
&cmd->argv[i].intval)) { |
} else if (!parse_int_arg(cmdline + start, |
end - start + 1, &cmd->argv[i].intval)) { |
cmd->argv[i].vartype = ARG_TYPE_INT; |
} else { |
printf("Unrecognized variable argument.\n"); |
/branches/network/kernel/generic/src/console/console.c |
---|
35,30 → 35,56 |
#include <console/console.h> |
#include <console/chardev.h> |
#include <sysinfo/sysinfo.h> |
#include <synch/waitq.h> |
#include <synch/spinlock.h> |
#include <arch/types.h> |
#include <ddi/device.h> |
#include <ddi/irq.h> |
#include <ddi/ddi.h> |
#include <ipc/irq.h> |
#include <arch.h> |
#include <func.h> |
#include <print.h> |
#include <atomic.h> |
#define BUFLEN 2048 |
static char debug_buffer[BUFLEN]; |
static size_t offset = 0; |
/** Initialize stdout to something that does not print, but does not fail |
* |
* Save data in some buffer so that it could be retrieved in the debugger |
#define KLOG_SIZE PAGE_SIZE |
#define KLOG_LATENCY 8 |
/**< Kernel log cyclic buffer */ |
static char klog[KLOG_SIZE] __attribute__ ((aligned (PAGE_SIZE))); |
/**< Kernel log initialized */ |
static bool klog_inited = false; |
/**< First kernel log characters */ |
static index_t klog_start = 0; |
/**< Number of valid kernel log characters */ |
static size_t klog_len = 0; |
/**< Number of stored (not printed) kernel log characters */ |
static size_t klog_stored = 0; |
/**< Number of stored kernel log characters for uspace */ |
static size_t klog_uspace = 0; |
/**< Kernel log spinlock */ |
SPINLOCK_INITIALIZE(klog_lock); |
/** Physical memory area used for klog buffer */ |
static parea_t klog_parea; |
/* |
* For now, we use 0 as INR. |
* However, it is therefore desirable to have architecture specific |
* definition of KLOG_VIRT_INR in the future. |
*/ |
static void null_putchar(chardev_t *d, const char ch) |
{ |
if (offset >= BUFLEN) |
offset = 0; |
debug_buffer[offset++] = ch; |
} |
#define KLOG_VIRT_INR 0 |
static irq_t klog_irq; |
static chardev_operations_t null_stdout_ops = { |
.write = null_putchar |
.suspend = NULL, |
.resume = NULL, |
.write = NULL, |
.read = NULL |
}; |
chardev_t null_stdout = { |
66,10 → 92,58 |
.op = &null_stdout_ops |
}; |
/** Standard input character device. */ |
/** Allways refuse IRQ ownership. |
* |
* This is not a real IRQ, so we always decline. |
* |
* @return Always returns IRQ_DECLINE. |
*/ |
static irq_ownership_t klog_claim(void) |
{ |
return IRQ_DECLINE; |
} |
/** Standard input character device */ |
chardev_t *stdin = NULL; |
chardev_t *stdout = &null_stdout; |
/** Initialize kernel logging facility |
* |
* The shared area contains kernel cyclic buffer. Userspace application may |
* be notified on new data with indication of position and size |
* of the data within the circular buffer. |
*/ |
void klog_init(void) |
{ |
void *faddr = (void *) KA2PA(klog); |
ASSERT((uintptr_t) faddr % FRAME_SIZE == 0); |
ASSERT(KLOG_SIZE % FRAME_SIZE == 0); |
devno_t devno = device_assign_devno(); |
klog_parea.pbase = (uintptr_t) faddr; |
klog_parea.vbase = (uintptr_t) klog; |
klog_parea.frames = SIZE2FRAMES(KLOG_SIZE); |
klog_parea.cacheable = true; |
ddi_parea_register(&klog_parea); |
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr); |
sysinfo_set_item_val("klog.pages", NULL, SIZE2FRAMES(KLOG_SIZE)); |
sysinfo_set_item_val("klog.devno", NULL, devno); |
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR); |
irq_initialize(&klog_irq); |
klog_irq.devno = devno; |
klog_irq.inr = KLOG_VIRT_INR; |
klog_irq.claim = klog_claim; |
irq_register(&klog_irq); |
spinlock_lock(&klog_lock); |
klog_inited = true; |
spinlock_unlock(&klog_lock); |
} |
/** Get character from character device. Do not echo character. |
* |
* @param chardev Character device. |
90,7 → 164,7 |
return chardev->op->read(chardev); |
/* no other way of interacting with user, halt */ |
if (CPU) |
printf("cpu%d: ", CPU->id); |
printf("cpu%u: ", CPU->id); |
else |
printf("cpu: "); |
printf("halted - no kconsole\n"); |
159,10 → 233,60 |
return ch; |
} |
void klog_update(void) |
{ |
spinlock_lock(&klog_lock); |
if ((klog_inited) && (klog_irq.notif_cfg.notify) && (klog_uspace > 0)) { |
ipc_irq_send_msg_3(&klog_irq, klog_start, klog_len, klog_uspace); |
klog_uspace = 0; |
} |
spinlock_unlock(&klog_lock); |
} |
void putchar(char c) |
{ |
spinlock_lock(&klog_lock); |
if ((klog_stored > 0) && (stdout->op->write)) { |
/* Print charaters stored in kernel log */ |
index_t i; |
for (i = klog_len - klog_stored; i < klog_len; i++) |
stdout->op->write(stdout, klog[(klog_start + i) % KLOG_SIZE]); |
klog_stored = 0; |
} |
/* Store character in the cyclic kernel log */ |
klog[(klog_start + klog_len) % KLOG_SIZE] = c; |
if (klog_len < KLOG_SIZE) |
klog_len++; |
else |
klog_start = (klog_start + 1) % KLOG_SIZE; |
if (stdout->op->write) |
stdout->op->write(stdout, c); |
else { |
/* The character is just in the kernel log */ |
if (klog_stored < klog_len) |
klog_stored++; |
} |
/* The character is stored for uspace */ |
if (klog_uspace < klog_len) |
klog_uspace++; |
/* Check notify uspace to update */ |
bool update; |
if ((klog_uspace > KLOG_LATENCY) || (c == '\n')) |
update = true; |
else |
update = false; |
spinlock_unlock(&klog_lock); |
if (update) |
klog_update(); |
} |
/** @} |
/branches/network/kernel/generic/src/console/cmd.c |
---|
563,7 → 563,7 |
/* This doesn't have to be very accurate */ |
unative_t sec = uptime->seconds1; |
printf("Up %u days, %u hours, %u minutes, %u seconds\n", |
printf("Up %" PRIun " days, %" PRIun " hours, %" PRIun " minutes, %" PRIun " seconds\n", |
sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60); |
return 1; |
632,7 → 632,7 |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling %s() (%.*p)\n", symbol, sizeof(uintptr_t) * 2, symaddr); |
printf("Calling %s() (%p)\n", symbol, symaddr); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
640,7 → 640,7 |
#else |
f = (unative_t (*)(void)) symaddr; |
#endif |
printf("Result: %#zx\n", f()); |
printf("Result: %#" PRIxn "\n", f()); |
} |
return 1; |
686,7 → 686,7 |
struct { |
unative_t f; |
unative_t gp; |
}fptr; |
} fptr; |
#endif |
symaddr = get_symbol_addr((char *) argv->buffer); |
698,7 → 698,7 |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(%#zx): %.*p: %s\n", arg1, sizeof(uintptr_t) * 2, symaddr, symbol); |
printf("Calling f(%#" PRIxn "): %p: %s\n", arg1, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
706,7 → 706,7 |
#else |
f = (unative_t (*)(unative_t,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1)); |
printf("Result: %#" PRIxn "\n", f(arg1)); |
} |
return 1; |
735,8 → 735,8 |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx): %.*p: %s\n", |
arg1, arg2, sizeof(uintptr_t) * 2, symaddr, symbol); |
printf("Calling f(%#" PRIxn ", %#" PRIxn "): %p: %s\n", |
arg1, arg2, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
744,7 → 744,7 |
#else |
f = (unative_t (*)(unative_t,unative_t,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1, arg2)); |
printf("Result: %#" PRIxn "\n", f(arg1, arg2)); |
} |
return 1; |
774,8 → 774,8 |
printf("Duplicate symbol, be more specific.\n"); |
} else { |
symbol = get_symtab_entry(symaddr); |
printf("Calling f(0x%zx,0x%zx, 0x%zx): %.*p: %s\n", |
arg1, arg2, arg3, sizeof(uintptr_t) * 2, symaddr, symbol); |
printf("Calling f(%#" PRIxn ",%#" PRIxn ", %#" PRIxn "): %p: %s\n", |
arg1, arg2, arg3, symaddr, symbol); |
#ifdef ia64 |
fptr.f = symaddr; |
fptr.gp = ((unative_t *)cmd_call2)[1]; |
783,7 → 783,7 |
#else |
f = (unative_t (*)(unative_t,unative_t,unative_t,...)) symaddr; |
#endif |
printf("Result: %#zx\n", f(arg1, arg2, arg3)); |
printf("Result: %#" PRIxn "\n", f(arg1, arg2, arg3)); |
} |
return 1; |
856,7 → 856,7 |
} else { |
if (pointer) |
addr = (uint32_t *)(*(unative_t *)addr); |
printf("Writing 0x%x -> %.*p\n", arg1, sizeof(uintptr_t) * 2, addr); |
printf("Writing %#" PRIx64 " -> %p\n", arg1, addr); |
*addr = arg1; |
} |
1025,7 → 1025,7 |
char suffix; |
order(dt, &cycles, &suffix); |
printf("Time: %llu%c cycles\n", cycles, suffix); |
printf("Time: %" PRIu64 "%c cycles\n", cycles, suffix); |
if (ret == NULL) { |
printf("Test passed\n"); |
1053,7 → 1053,7 |
} |
for (i = 0; i < cnt; i++) { |
printf("%s (%d/%d) ... ", test->name, i + 1, cnt); |
printf("%s (%u/%u) ... ", test->name, i + 1, cnt); |
/* Update and read thread accounting |
for benchmarking */ |
1081,7 → 1081,7 |
data[i] = dt; |
order(dt, &cycles, &suffix); |
printf("OK (%llu%c cycles)\n", cycles, suffix); |
printf("OK (%" PRIu64 "%c cycles)\n", cycles, suffix); |
} |
if (ret) { |
1094,7 → 1094,7 |
} |
order(sum / (uint64_t) cnt, &cycles, &suffix); |
printf("Average\t\t%llu%c\n", cycles, suffix); |
printf("Average\t\t%" PRIu64 "%c\n", cycles, suffix); |
} |
free(data); |
/branches/network/kernel/generic/src/console/chardev.c |
---|
42,7 → 42,7 |
* @param chardev Character device. |
* @param op Implementation of character device operations. |
*/ |
void chardev_initialize(char *name,chardev_t *chardev, |
void chardev_initialize(char *name, chardev_t *chardev, |
chardev_operations_t *op) |
{ |
chardev->name = name; |
/branches/network/kernel/generic/src/cpu/cpu.c |
---|
67,7 → 67,7 |
panic("malloc/cpus"); |
/* initialize everything */ |
memsetb((uintptr_t) cpus, sizeof(cpu_t) * config.cpu_count, 0); |
memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0); |
for (i = 0; i < config.cpu_count; i++) { |
cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_ATOMIC); |
104,7 → 104,7 |
if (cpus[i].active) |
cpu_print_report(&cpus[i]); |
else |
printf("cpu%d: not active\n", i); |
printf("cpu%u: not active\n", i); |
} |
} |
/branches/network/kernel/generic/src/adt/hash_table.c |
---|
63,7 → 63,7 |
if (!h->entry) { |
panic("cannot allocate memory for hash table\n"); |
} |
memsetb((uintptr_t) h->entry, m * sizeof(link_t), 0); |
memsetb(h->entry, m * sizeof(link_t), 0); |
for (i = 0; i < m; i++) |
list_initialize(&h->entry[i]); |
/branches/network/kernel/generic/src/adt/btree.c |
---|
124,7 → 124,7 |
lnode = leaf_node; |
if (!lnode) { |
if (btree_search(t, key, &lnode)) { |
panic("B-tree %p already contains key %d\n", t, key); |
panic("B-tree %p already contains key %" PRIu64 "\n", t, key); |
} |
} |
224,7 → 224,7 |
lnode = leaf_node; |
if (!lnode) { |
if (!btree_search(t, key, &lnode)) { |
panic("B-tree %p does not contain key %d\n", t, key); |
panic("B-tree %p does not contain key %" PRIu64 "\n", t, key); |
} |
} |
524,7 → 524,7 |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
panic("node %p does not contain key %" PRIu64 "\n", node, key); |
} |
/** Remove key and its right subtree pointer from B-tree node. |
551,7 → 551,7 |
return; |
} |
} |
panic("node %p does not contain key %d\n", node, key); |
panic("node %p does not contain key %" PRIu64 "\n", node, key); |
} |
/** Split full B-tree node and insert new key-value-right-subtree triplet. |
970,7 → 970,7 |
printf("("); |
for (i = 0; i < node->keys; i++) { |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); |
if (node->depth && node->subtree[i]) { |
list_append(&node->subtree[i]->bfs_link, &head); |
} |
992,7 → 992,7 |
printf("("); |
for (i = 0; i < node->keys; i++) |
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : ""); |
printf(")"); |
} |
printf("\n"); |
/branches/network/kernel/generic/src/debug/symtab.c |
---|
36,9 → 36,11 |
*/ |
#include <symtab.h> |
#include <arch/byteorder.h> |
#include <byteorder.h> |
#include <func.h> |
#include <print.h> |
#include <arch/types.h> |
#include <typedefs.h> |
/** Return entry that seems most likely to correspond to argument. |
* |
53,12 → 55,12 |
{ |
count_t i; |
for (i=1;symbol_table[i].address_le;++i) { |
for (i = 1; symbol_table[i].address_le; ++i) { |
if (addr < uint64_t_le2host(symbol_table[i].address_le)) |
break; |
} |
if (addr >= uint64_t_le2host(symbol_table[i-1].address_le)) |
return symbol_table[i-1].symbol_name; |
if (addr >= uint64_t_le2host(symbol_table[i - 1].address_le)) |
return symbol_table[i - 1].symbol_name; |
return NULL; |
} |
70,21 → 72,21 |
*/ |
static char * symtab_search_one(const char *name, int *startpos) |
{ |
int namelen = strlen(name); |
unsigned int namelen = strlen(name); |
char *curname; |
int i,j; |
int i, j; |
int colonoffset = -1; |
for (i=0;name[i];i++) |
for (i = 0; name[i]; i++) |
if (name[i] == ':') { |
colonoffset = i; |
break; |
} |
for (i=*startpos;symbol_table[i].address_le;++i) { |
for (i = *startpos; symbol_table[i].address_le; ++i) { |
/* Find a ':' in name */ |
curname = symbol_table[i].symbol_name; |
for (j=0; curname[j] && curname[j] != ':'; j++) |
for (j = 0; curname[j] && curname[j] != ':'; j++) |
; |
if (!curname[j]) |
continue; |
94,7 → 96,7 |
continue; |
if (strncmp(curname, name, namelen) == 0) { |
*startpos = i; |
return curname+namelen; |
return curname + namelen; |
} |
} |
return NULL; |
115,7 → 117,7 |
int i; |
i = 0; |
while ((hint=symtab_search_one(name, &i))) { |
while ((hint = symtab_search_one(name, &i))) { |
if (!strlen(hint)) { |
addr = uint64_t_le2host(symbol_table[i].address_le); |
found++; |
139,7 → 141,7 |
while (symtab_search_one(name, &i)) { |
addr = uint64_t_le2host(symbol_table[i].address_le); |
realname = symbol_table[i].symbol_name; |
printf("%.*p: %s\n", sizeof(uintptr_t) * 2, addr, realname); |
printf("%p: %s\n", addr, realname); |
i++; |
} |
} |
151,7 → 153,7 |
*/ |
int symtab_compl(char *input) |
{ |
char output[MAX_SYMBOL_NAME+1]; |
char output[MAX_SYMBOL_NAME + 1]; |
int startpos = 0; |
char *foundtxt; |
int found = 0; |
172,9 → 174,10 |
while ((foundtxt = symtab_search_one(name, &startpos))) { |
startpos++; |
if (!found) |
strncpy(output, foundtxt, strlen(foundtxt)+1); |
strncpy(output, foundtxt, strlen(foundtxt) + 1); |
else { |
for (i=0; output[i] && foundtxt[i] && output[i]==foundtxt[i]; i++) |
for (i = 0; output[i] && foundtxt[i] && |
output[i] == foundtxt[i]; i++) |
; |
output[i] = '\0'; |
} |
/branches/network/kernel/generic/src/printf/vprintf.c |
---|
37,6 → 37,8 |
#include <putchar.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
#include <arch/types.h> |
#include <typedefs.h> |
SPINLOCK_INITIALIZE(printf_lock); /**< vprintf spinlock */ |
/branches/network/kernel/generic/src/printf/printf_core.c |
---|
75,7 → 75,6 |
PrintfQualifierInt, |
PrintfQualifierLong, |
PrintfQualifierLongLong, |
PrintfQualifierNative, |
PrintfQualifierPointer |
} qualifier_t; |
93,7 → 92,7 |
static int printf_putnchars(const char * buf, size_t count, |
struct printf_spec *ps) |
{ |
return ps->write((void *)buf, count, ps->data); |
return ps->write((void *) buf, count, ps->data); |
} |
/** Print a string without adding a newline. |
177,8 → 176,8 |
* |
* @return Number of characters printed, negative value on failure. |
*/ |
static int print_string(char *s, int width, int precision, uint64_t flags, |
struct printf_spec *ps) |
static int print_string(char *s, int width, unsigned int precision, |
uint64_t flags, struct printf_spec *ps) |
{ |
int counter = 0; |
size_t size; |
432,7 → 431,6 |
* - "" Signed or unsigned int (default value).@n |
* - "l" Signed or unsigned long int.@n |
* - "ll" Signed or unsigned long long int.@n |
* - "z" unative_t (non-standard extension).@n |
* |
* |
* CONVERSION:@n |
486,7 → 484,7 |
while ((c = fmt[i])) { |
/* control character */ |
if (c == '%' ) { |
if (c == '%') { |
/* print common characters if any processed */ |
if (i > j) { |
if ((retval = printf_putnchars(&fmt[j], |
536,7 → 534,7 |
} else if (fmt[i] == '*') { |
/* get width value from argument list */ |
i++; |
width = (int)va_arg(ap, int); |
width = (int) va_arg(ap, int); |
if (width < 0) { |
/* negative width sets '-' flag */ |
width *= -1; |
559,7 → 557,7 |
* list. |
*/ |
i++; |
precision = (int)va_arg(ap, int); |
precision = (int) va_arg(ap, int); |
if (precision < 0) { |
/* ignore negative precision */ |
precision = 0; |
585,9 → 583,6 |
qualifier = PrintfQualifierLongLong; |
} |
break; |
case 'z': /* unative_t */ |
qualifier = PrintfQualifierNative; |
break; |
default: |
/* default type */ |
qualifier = PrintfQualifierInt; |
627,7 → 622,7 |
* Integer values |
*/ |
case 'P': /* pointer */ |
flags |= __PRINTF_FLAG_BIGCHARS; |
flags |= __PRINTF_FLAG_BIGCHARS; |
case 'p': |
flags |= __PRINTF_FLAG_PREFIX; |
base = 16; |
670,34 → 665,28 |
switch (qualifier) { |
case PrintfQualifierByte: |
size = sizeof(unsigned char); |
number = (uint64_t)va_arg(ap, unsigned int); |
number = (uint64_t) va_arg(ap, unsigned int); |
break; |
case PrintfQualifierShort: |
size = sizeof(unsigned short); |
number = (uint64_t)va_arg(ap, unsigned int); |
number = (uint64_t) va_arg(ap, unsigned int); |
break; |
case PrintfQualifierInt: |
size = sizeof(unsigned int); |
number = (uint64_t)va_arg(ap, unsigned int); |
number = (uint64_t) va_arg(ap, unsigned int); |
break; |
case PrintfQualifierLong: |
size = sizeof(unsigned long); |
number = (uint64_t)va_arg(ap, unsigned long); |
number = (uint64_t) va_arg(ap, unsigned long); |
break; |
case PrintfQualifierLongLong: |
size = sizeof(unsigned long long); |
number = (uint64_t)va_arg(ap, |
unsigned long long); |
number = (uint64_t) va_arg(ap, unsigned long long); |
break; |
case PrintfQualifierPointer: |
size = sizeof(void *); |
number = (uint64_t)(unsigned long)va_arg(ap, |
void *); |
number = (uint64_t) (unsigned long) va_arg(ap, void *); |
break; |
case PrintfQualifierNative: |
size = sizeof(unative_t); |
number = (uint64_t)va_arg(ap, unative_t); |
break; |
default: /* Unknown qualifier */ |
counter = -counter; |
goto out; |
708,7 → 697,7 |
flags |= __PRINTF_FLAG_NEGATIVE; |
if (size == sizeof(uint64_t)) { |
number = -((int64_t)number); |
number = -((int64_t) number); |
} else { |
number = ~number; |
number &= |
734,7 → 723,7 |
} |
if (i > j) { |
if ((retval = printf_putnchars(&fmt[j], (unative_t)(i - j), |
if ((retval = printf_putnchars(&fmt[j], (unative_t) (i - j), |
ps)) < 0) { /* error */ |
counter = -counter; |
goto out; |
744,7 → 733,6 |
} |
out: |
return counter; |
} |
/branches/network/kernel/generic/src/interrupt/interrupt.c |
---|
103,31 → 103,37 |
/** kconsole cmd - print all exceptions */ |
static int exc_print_cmd(cmd_arg_t *argv) |
{ |
#if (IVT_ITEMS > 0) |
unsigned int i; |
char *symbol; |
spinlock_lock(&exctbl_lock); |
#ifdef __32_BITS__ |
printf("Exc Description Handler Symbol\n"); |
printf("--- -------------------- ---------- --------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("Exc Description Handler Symbol\n"); |
printf("--- -------------------- ------------------ --------\n"); |
#endif |
if (sizeof(void *) == 4) { |
printf("Exc Description Handler Symbol\n"); |
printf("--- -------------------- ---------- --------\n"); |
} else { |
printf("Exc Description Handler Symbol\n"); |
printf("--- -------------------- ------------------ --------\n"); |
} |
for (i = 0; i < IVT_ITEMS; i++) { |
symbol = get_symtab_entry((unative_t) exc_table[i].f); |
if (!symbol) |
symbol = "not found"; |
#ifdef __32_BITS__ |
printf("%-3u %-20s %10p %s\n", i + IVT_FIRST, exc_table[i].name, |
exc_table[i].f, symbol); |
#endif |
#ifdef __64_BITS__ |
printf("%-3u %-20s %18p %s\n", i + IVT_FIRST, exc_table[i].name, |
exc_table[i].f, symbol); |
#endif |
if (sizeof(void *) == 4) |
printf("%-3u %-20s %#10zx %s\n", i + IVT_FIRST, exc_table[i].name, |
exc_table[i].f, symbol); |
else |
printf("%-3u %-20s %#18zx %s\n", i + IVT_FIRST, exc_table[i].name, |
exc_table[i].f, symbol); |
if (((i + 1) % 20) == 0) { |
printf(" -- Press any key to continue -- "); |
spinlock_unlock(&exctbl_lock); |
138,6 → 144,7 |
} |
spinlock_unlock(&exctbl_lock); |
#endif |
return 1; |
} |
156,7 → 163,7 |
{ |
int i; |
for (i=0;i < IVT_ITEMS; i++) |
for (i = 0; i < IVT_ITEMS; i++) |
exc_register(i, "undef", (iroutine) exc_undef); |
cmd_initialize(&exc_info); |
/branches/network/kernel/generic/src/time/timeout.c |
---|
32,7 → 32,7 |
/** |
* @file |
* @brief Timeout management functions. |
* @brief Timeout management functions. |
*/ |
#include <time/timeout.h> |
61,7 → 61,7 |
* |
* Initialize all members except the lock. |
* |
* @param t Timeout to be initialized. |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_reinitialize(timeout_t *t) |
78,7 → 78,7 |
* |
* Initialize all members including the lock. |
* |
* @param t Timeout to be initialized. |
* @param t Timeout to be initialized. |
* |
*/ |
void timeout_initialize(timeout_t *t) |
94,14 → 94,14 |
* to timeout list and make it execute in |
* time microseconds (or slightly more). |
* |
* @param t Timeout structure. |
* @param time Number of usec in the future to execute |
* the handler. |
* @param f Timeout handler function. |
* @param arg Timeout handler argument. |
* @param t Timeout structure. |
* @param time Number of usec in the future to execute the handler. |
* @param f Timeout handler function. |
* @param arg Timeout handler argument. |
* |
*/ |
void timeout_register(timeout_t *t, uint64_t time, timeout_handler_t f, void *arg) |
void |
timeout_register(timeout_t *t, uint64_t time, timeout_handler_t f, void *arg) |
{ |
timeout_t *hlp = NULL; |
link_t *l, *m; |
165,9 → 165,9 |
* |
* Remove timeout from timeout list. |
* |
* @param t Timeout to unregister. |
* @param t Timeout to unregister. |
* |
* @return true on success, false on failure. |
* @return True on success, false on failure. |
*/ |
bool timeout_unregister(timeout_t *t) |
{ |
/branches/network/kernel/generic/src/time/clock.c |
---|
137,7 → 137,7 |
timeout_handler_t f; |
void *arg; |
count_t missed_clock_ticks = CPU->missed_clock_ticks; |
int i; |
unsigned int i; |
/* |
* To avoid lock ordering problems, |
/branches/network/kernel/generic/include/ipc/ipc.h |
---|
205,6 → 205,8 |
#define IPC_MAX_PHONES 16 |
#include <synch/spinlock.h> |
#include <synch/mutex.h> |
#include <synch/waitq.h> |
struct answerbox; |
225,7 → 227,7 |
/** Structure identifying phone (in TASK structure) */ |
typedef struct { |
SPINLOCK_DECLARE(lock); |
mutex_t lock; |
link_t link; |
struct answerbox *callee; |
ipc_phone_state_t state; |
258,6 → 260,12 |
typedef struct { |
unative_t args[IPC_CALL_LEN]; |
phone_t *phone; |
/* |
* The forward operation can masquerade the caller phone. For those |
* cases, we must keep it aside so that the answer is processed |
* correctly. |
*/ |
phone_t *caller_phone; |
} ipc_data_t; |
typedef struct { |
282,23 → 290,22 |
} call_t; |
extern void ipc_init(void); |
extern call_t * ipc_wait_for_call(answerbox_t *box, uint32_t usec, int flags); |
extern void ipc_answer(answerbox_t *box, call_t *request); |
extern int ipc_call(phone_t *phone, call_t *call); |
extern void ipc_call_sync(phone_t *phone, call_t *request); |
extern void ipc_phone_init(phone_t *phone); |
extern void ipc_phone_connect(phone_t *phone, answerbox_t *box); |
extern void ipc_call_free(call_t *call); |
extern call_t * ipc_call_alloc(int flags); |
extern void ipc_answerbox_init(answerbox_t *box); |
extern void ipc_call_static_init(call_t *call); |
extern call_t * ipc_wait_for_call(answerbox_t *, uint32_t, int); |
extern void ipc_answer(answerbox_t *, call_t *); |
extern int ipc_call(phone_t *, call_t *); |
extern int ipc_call_sync(phone_t *, call_t *); |
extern void ipc_phone_init(phone_t *); |
extern void ipc_phone_connect(phone_t *, answerbox_t *); |
extern void ipc_call_free(call_t *); |
extern call_t * ipc_call_alloc(int); |
extern void ipc_answerbox_init(answerbox_t *, struct task *); |
extern void ipc_call_static_init(call_t *); |
extern void task_print_list(void); |
extern int ipc_forward(call_t *call, phone_t *newphone, answerbox_t *oldbox, |
int mode); |
extern int ipc_forward(call_t *, phone_t *, answerbox_t *, int); |
extern void ipc_cleanup(void); |
extern int ipc_phone_hangup(phone_t *phone); |
extern void ipc_backsend_err(phone_t *phone, call_t *call, unative_t err); |
extern void ipc_print_task(task_id_t taskid); |
extern int ipc_phone_hangup(phone_t *); |
extern void ipc_backsend_err(phone_t *, call_t *, unative_t); |
extern void ipc_print_task(task_id_t); |
extern answerbox_t *ipc_phone_0; |
/branches/network/kernel/generic/include/errno.h |
---|
56,6 → 56,7 |
#define EINVAL -13 /* Invalid value */ |
#define EBUSY -14 /* Resource is busy */ |
#define EOVERFLOW -15 /* The result does not fit its size. */ |
#define EINTR -16 /* Operation was interrupted. */ |
#endif |
/branches/network/kernel/generic/include/lib/objc_ext.h |
---|
File deleted |
/branches/network/kernel/generic/include/lib/objc.h |
---|
File deleted |
/branches/network/kernel/generic/include/lib/elf.h |
---|
114,7 → 114,8 |
#define EE_MEMORY 2 /* Cannot allocate address space */ |
#define EE_INCOMPATIBLE 3 /* ELF image is not compatible with current architecture */ |
#define EE_UNSUPPORTED 4 /* Non-supported ELF (e.g. dynamic ELFs) */ |
#define EE_IRRECOVERABLE 5 |
#define EE_LOADER 5 /* The image is actually a program loader */ |
#define EE_IRRECOVERABLE 6 |
/** |
* ELF section types |
336,8 → 337,12 |
typedef struct elf64_symbol elf_symbol_t; |
#endif |
extern char *elf_error(int rc); |
extern char *elf_error(unsigned int rc); |
/* Interpreter string used to recognize the program loader */ |
#define ELF_INTERP_ZSTR "kernel" |
#define ELF_INTERP_ZLEN sizeof(ELF_INTERP_ZSTR) |
#endif |
/** @} |
/branches/network/kernel/generic/include/lib/rd.h |
---|
66,16 → 66,18 |
#define RE_UNSUPPORTED 2 /* Non-supported image (e.g. wrong version) */ |
/** RAM disk header */ |
typedef struct { |
struct rd_header { |
uint8_t magic[RD_MAGIC_SIZE]; |
uint8_t version; |
uint8_t data_type; |
uint32_t header_size; |
uint64_t data_size; |
} rd_header; |
} __attribute__ ((packed)); |
extern int init_rd(rd_header * addr, size_t size); |
typedef struct rd_header rd_header_t; |
extern int init_rd(rd_header_t *addr, size_t size); |
#endif |
/** @} |
/branches/network/kernel/generic/include/mm/as.h |
---|
53,10 → 53,6 |
#include <adt/btree.h> |
#include <lib/elf.h> |
#ifdef __OBJC__ |
#include <lib/objc.h> |
#endif |
/** |
* Defined to be true if user address space and kernel address space shadow each |
* other. |
84,47 → 80,6 |
/** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ |
#define AS_PF_DEFER 2 |
#ifdef __OBJC__ |
@interface as_t : base_t { |
@public |
/** Protected by asidlock. */ |
link_t inactive_as_with_asid_link; |
/** |
* Number of processors on wich is this address space active. |
* Protected by asidlock. |
*/ |
count_t cpu_refcount; |
/** |
* Address space identifier. |
* Constant on architectures that do not support ASIDs. |
* Protected by asidlock. |
*/ |
asid_t asid; |
/** Number of references (i.e tasks that reference this as). */ |
atomic_t refcount; |
mutex_t lock; |
/** B+tree of address space areas. */ |
btree_t as_area_btree; |
/** Non-generic content. */ |
as_genarch_t genarch; |
/** Architecture specific content. */ |
as_arch_t arch; |
} |
+ (pte_t *) page_table_create: (int) flags; |
+ (void) page_table_destroy: (pte_t *) page_table; |
- (void) page_table_lock: (bool) _lock; |
- (void) page_table_unlock: (bool) unlock; |
@end |
#else |
/** Address space structure. |
* |
* as_t contains the list of as_areas of userspace accessible |
168,7 → 123,6 |
void (* page_table_lock)(as_t *as, bool lock); |
void (* page_table_unlock)(as_t *as, bool unlock); |
} as_operations_t; |
#endif |
/** |
* This structure contains information associated with the shared address space |
249,10 → 203,7 |
extern as_t *AS_KERNEL; |
#ifndef __OBJC__ |
extern as_operations_t *as_operations; |
#endif |
extern link_t inactive_as_with_asid_head; |
extern void as_init(void); |
269,6 → 220,7 |
extern int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags); |
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask); |
extern int as_area_change_flags(as_t *as, int flags, uintptr_t address); |
extern int as_area_get_flags(as_area_t *area); |
extern bool as_area_check_access(as_area_t *area, pf_access_t access); |
299,11 → 251,19 |
extern mem_backend_t elf_backend; |
extern mem_backend_t phys_backend; |
extern int elf_load(elf_header_t *header, as_t *as); |
/** |
* This flags is passed when running the loader, otherwise elf_load() |
* would return with a EE_LOADER error code. |
*/ |
#define ELD_F_NONE 0 |
#define ELD_F_LOADER 1 |
extern unsigned int elf_load(elf_header_t *header, as_t *as, int flags); |
/* Address space area related syscalls. */ |
extern unative_t sys_as_area_create(uintptr_t address, size_t size, int flags); |
extern unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags); |
extern unative_t sys_as_area_change_flags(uintptr_t address, int flags); |
extern unative_t sys_as_area_destroy(uintptr_t address); |
/* Introspection functions. */ |
/branches/network/kernel/generic/include/mm/page.h |
---|
39,11 → 39,6 |
#include <mm/as.h> |
#include <memstr.h> |
/** |
* Macro for computing page color. |
*/ |
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) |
/** Operations to manipulate page mappings. */ |
typedef struct { |
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, |
/branches/network/kernel/generic/include/mm/frame.h |
---|
57,15 → 57,14 |
/** Maximum number of zones in system. */ |
#define ZONES_MAX 16 |
/** If possible, merge with neighbouring zones. */ |
#define ZONE_JOIN 0x1 |
/** Convert the frame address to kernel va. */ |
#define FRAME_KA 0x1 |
/** Do not panic and do not sleep on failure. */ |
#define FRAME_ATOMIC 0x2 |
#define FRAME_ATOMIC 0x2 |
/** Do not start reclaiming when no free memory. */ |
#define FRAME_NO_RECLAIM 0x4 |
#define FRAME_NO_RECLAIM 0x4 |
/** Do not allocate above 4 GiB. */ |
#define FRAME_LOW_4_GiB 0x8 |
static inline uintptr_t PFN2ADDR(pfn_t frame) |
{ |
90,30 → 89,30 |
} |
#define IS_BUDDY_ORDER_OK(index, order) \ |
((~(((unative_t) -1) << (order)) & (index)) == 0) |
((~(((unative_t) -1) << (order)) & (index)) == 0) |
#define IS_BUDDY_LEFT_BLOCK(zone, frame) \ |
(((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) |
(((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) |
#define IS_BUDDY_RIGHT_BLOCK(zone, frame) \ |
(((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) |
(((frame_index((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) |
#define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) \ |
(((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) |
(((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) |
#define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) \ |
(((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) |
(((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) |
#define frame_alloc(order, flags) \ |
frame_alloc_generic(order, flags, NULL) |
frame_alloc_generic(order, flags, NULL) |
extern void frame_init(void); |
extern void *frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone); |
extern void frame_free(uintptr_t frame); |
extern void frame_reference_add(pfn_t pfn); |
extern void *frame_alloc_generic(uint8_t, int, unsigned int *); |
extern void frame_free(uintptr_t); |
extern void frame_reference_add(pfn_t); |
extern int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags); |
extern void *frame_get_parent(pfn_t frame, unsigned int hint); |
extern void frame_set_parent(pfn_t frame, void *data, unsigned int hint); |
extern void frame_mark_unavailable(pfn_t start, count_t count); |
extern uintptr_t zone_conf_size(count_t count); |
extern void zone_merge(unsigned int z1, unsigned int z2); |
extern int zone_create(pfn_t, count_t, pfn_t, int); |
extern void *frame_get_parent(pfn_t, unsigned int); |
extern void frame_set_parent(pfn_t, void *, unsigned int); |
extern void frame_mark_unavailable(pfn_t, count_t); |
extern uintptr_t zone_conf_size(count_t); |
extern void zone_merge(unsigned int, unsigned int); |
extern void zone_merge_all(void); |
extern uint64_t zone_total_size(void); |
121,7 → 120,7 |
* Console functions |
*/ |
extern void zone_print_list(void); |
extern void zone_print_one(unsigned int znum); |
extern void zone_print_one(unsigned int); |
#endif |
/branches/network/kernel/generic/include/mm/buddy.h |
---|
66,7 → 66,6 |
void (*mark_available)(struct buddy_system *, link_t *); |
/** Find parent of block that has given order */ |
link_t *(* find_block)(struct buddy_system *, link_t *, uint8_t); |
void (* print_id)(struct buddy_system *, link_t *); |
} buddy_system_operations_t; |
typedef struct buddy_system { |
78,14 → 77,13 |
void *data; |
} buddy_system_t; |
extern void buddy_system_create(buddy_system_t *b, uint8_t max_order, |
buddy_system_operations_t *op, void *data); |
extern link_t *buddy_system_alloc(buddy_system_t *b, uint8_t i); |
extern bool buddy_system_can_alloc(buddy_system_t *b, uint8_t order); |
extern void buddy_system_free(buddy_system_t *b, link_t *block); |
extern void buddy_system_structure_print(buddy_system_t *b, size_t elem_size); |
extern size_t buddy_conf_size(int max_order); |
extern link_t *buddy_system_alloc_block(buddy_system_t *b, link_t *block); |
extern void buddy_system_create(buddy_system_t *, uint8_t, |
buddy_system_operations_t *, void *); |
extern link_t *buddy_system_alloc(buddy_system_t *, uint8_t); |
extern bool buddy_system_can_alloc(buddy_system_t *, uint8_t); |
extern void buddy_system_free(buddy_system_t *, link_t *); |
extern size_t buddy_conf_size(int); |
extern link_t *buddy_system_alloc_block(buddy_system_t *, link_t *); |
#endif |
/branches/network/kernel/generic/include/mm/slab.h |
---|
53,7 → 53,8 |
#define SLAB_INSIDE_SIZE (PAGE_SIZE >> 3) |
/** Maximum wasted space we allow for cache */ |
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order) >> 2) |
#define SLAB_MAX_BADNESS(cache) \ |
(((unsigned int) PAGE_SIZE << (cache)->order) >> 2) |
/* slab_reclaim constants */ |
99,8 → 100,8 |
int flags; |
/* Computed values */ |
uint8_t order; /**< Order of frames to be allocated */ |
int objects; /**< Number of objects that fit in */ |
uint8_t order; /**< Order of frames to be allocated */ |
unsigned int objects; /**< Number of objects that fit in */ |
/* Statistics */ |
atomic_t allocated_slabs; |
121,14 → 122,13 |
slab_mag_cache_t *mag_cache; |
} slab_cache_t; |
extern slab_cache_t * slab_cache_create(char *name, size_t size, size_t align, |
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), |
int flags); |
extern void slab_cache_destroy(slab_cache_t *cache); |
extern slab_cache_t *slab_cache_create(char *, size_t, size_t, |
int (*)(void *, int), int (*)(void *), int); |
extern void slab_cache_destroy(slab_cache_t *); |
extern void * slab_alloc(slab_cache_t *cache, int flags); |
extern void slab_free(slab_cache_t *cache, void *obj); |
extern count_t slab_reclaim(int flags); |
extern void * slab_alloc(slab_cache_t *, int); |
extern void slab_free(slab_cache_t *, void *); |
extern count_t slab_reclaim(int); |
/* slab subsytem initialization */ |
extern void slab_cache_init(void); |
138,9 → 138,9 |
extern void slab_print_list(void); |
/* malloc support */ |
extern void * malloc(unsigned int size, int flags); |
extern void * realloc(void *ptr, unsigned int size, int flags); |
extern void free(void *ptr); |
extern void *malloc(unsigned int, int); |
extern void *realloc(void *, unsigned int, int); |
extern void free(void *); |
#endif |
/** @} |
/branches/network/kernel/generic/include/macros.h |
---|
40,20 → 40,20 |
#define isdigit(d) (((d) >= '0') && ((d) <= '9')) |
#define islower(c) (((c) >= 'a') && ((c) <= 'z')) |
#define isupper(c) (((c) >= 'A') && ((c) <= 'Z')) |
#define isalpha(c) (is_lower(c) || is_upper(c)) |
#define isalphanum(c) (is_alpha(c) || is_digit(c)) |
#define isalpha(c) (is_lower((c)) || is_upper((c))) |
#define isalphanum(c) (is_alpha((c)) || is_digit((c))) |
#define isspace(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || \ |
((c) == '\r')) |
((c) == '\r')) |
#define min(a,b) ((a) < (b) ? (a) : (b)) |
#define max(a,b) ((a) > (b) ? (a) : (b)) |
/** Return true if the interlvals overlap. |
/** Return true if the intervals overlap. |
* |
* @param s1 Start address of the first interval. |
* @param sz1 Size of the first interval. |
* @param s2 Start address of the second interval. |
* @param sz2 Size of the second interval. |
* @param s1 Start address of the first interval. |
* @param sz1 Size of the first interval. |
* @param s2 Start address of the second interval. |
* @param sz2 Size of the second interval. |
*/ |
static inline int overlaps(uintptr_t s1, size_t sz1, uintptr_t s2, size_t sz2) |
{ |
64,11 → 64,15 |
} |
/* Compute overlapping of physical addresses */ |
#define PA_overlaps(x, szx, y, szy) overlaps(KA2PA(x), szx, KA2PA(y), szy) |
#define PA_overlaps(x, szx, y, szy) \ |
overlaps(KA2PA((x)), (szx), KA2PA((y)), (szy)) |
#define SIZE2KB(size) (size >> 10) |
#define SIZE2MB(size) (size >> 20) |
#define SIZE2KB(size) ((size) >> 10) |
#define SIZE2MB(size) ((size) >> 20) |
#define KB2SIZE(kb) ((kb) << 10) |
#define MB2SIZE(mb) ((mb) << 20) |
#define STRING(arg) STRING_ARG(arg) |
#define STRING_ARG(arg) #arg |
/branches/network/kernel/generic/include/config.h |
---|
40,8 → 40,6 |
#define STACK_SIZE PAGE_SIZE |
#define CONFIG_MEMORY_SIZE (8 * 1024 * 1024) |
#define CONFIG_INIT_TASKS 32 |
typedef struct { |
/branches/network/kernel/generic/include/proc/task.h |
---|
116,7 → 116,6 |
extern void task_done(void); |
extern task_t *task_create(as_t *as, char *name); |
extern void task_destroy(task_t *t); |
extern task_t *task_run_program(void *program_addr, char *name); |
extern task_t *task_find_by_id(task_id_t id); |
extern int task_kill(task_id_t id); |
extern uint64_t task_get_accounting(task_t *t); |
/branches/network/kernel/generic/include/proc/program.h |
---|
0,0 → 1,65 |
/* |
* Copyright (c) 2008 Jiri Svoboda |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_PROGRAM_H_ |
#define KERN_PROGRAM_H_ |
#include <arch/types.h> |
struct task; |
struct thread; |
/** Program info structure. |
* |
* A program is an abstraction of a freshly created (not yet running) |
* userspace task containing a main thread along with its userspace stack. |
*/ |
typedef struct program { |
struct task *task; /**< Program task */ |
struct thread *main_thread; /**< Program main thread */ |
} program_t; |
extern void *program_loader; |
extern void program_create(as_t *as, uintptr_t entry_addr, program_t *p); |
extern int program_create_from_image(void *image_addr, program_t *p); |
extern int program_create_loader(program_t *p); |
extern void program_ready(program_t *p); |
extern unative_t sys_program_spawn_loader(int *uspace_phone_id); |
#endif |
/** @} |
*/ |
/branches/network/kernel/generic/include/proc/tasklet.h |
---|
0,0 → 1,73 |
/* |
* Copyright (c) 2007 Jan Hudecek |
* Copyright (c) 2008 Martin Decky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericproc |
* @{ |
*/ |
/** @file tasklet.h |
* @brief Tasklets declarations |
*/ |
#ifndef KERN_TASKLET_H_ |
#define KERN_TASKLET_H_ |
#include <adt/list.h> |
/** Tasklet callback type */ |
typedef void (* tasklet_callback_t)(void *arg); |
/** Tasklet state */ |
typedef enum { |
NotActive, |
Scheduled, |
InProgress, |
Disabled |
} tasklet_state_t; |
/** Structure describing a tasklet */ |
typedef struct tasklet_descriptor { |
link_t link; |
/** Callback to call */ |
tasklet_callback_t callback; |
/** Argument passed to the callback */ |
void *arg; |
/** State of the tasklet */ |
tasklet_state_t state; |
} tasklet_descriptor_t; |
extern void tasklet_init(void); |
#endif |
/** @} |
*/ |
/branches/network/kernel/generic/include/synch/smc.h |
---|
0,0 → 1,43 |
/* |
* Copyright (c) 2008 Jiri Svoboda |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sync |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_SMC_H_ |
#define KERN_SMC_H_ |
extern unative_t sys_smc_coherence(uintptr_t va, size_t size); |
#endif |
/** @} |
*/ |
/branches/network/kernel/generic/include/synch/mutex.h |
---|
39,20 → 39,26 |
#include <synch/semaphore.h> |
#include <synch/synch.h> |
typedef enum { |
MUTEX_PASSIVE, |
MUTEX_ACTIVE |
} mutex_type_t; |
typedef struct { |
mutex_type_t type; |
semaphore_t sem; |
} mutex_t; |
#define mutex_lock(mtx) \ |
#define mutex_lock(mtx) \ |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) |
#define mutex_trylock(mtx) \ |
#define mutex_trylock(mtx) \ |
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING) |
#define mutex_lock_timeout(mtx, usec) \ |
#define mutex_lock_timeout(mtx, usec) \ |
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING) |
extern void mutex_initialize(mutex_t *mtx); |
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags); |
extern void mutex_unlock(mutex_t *mtx); |
extern void mutex_initialize(mutex_t *, mutex_type_t); |
extern int _mutex_lock_timeout(mutex_t *, uint32_t, int); |
extern void mutex_unlock(mutex_t *); |
#endif |
/branches/network/kernel/generic/include/synch/spinlock.h |
---|
110,8 → 110,8 |
#define DEADLOCK_PROBE(pname, value) \ |
if ((pname)++ > (value)) { \ |
(pname) = 0; \ |
printf("Deadlock probe %s: exceeded threshold %d\n", \ |
"cpu%d: function=%s, line=%d\n", \ |
printf("Deadlock probe %s: exceeded threshold %u\n", \ |
"cpu%u: function=%s, line=%u\n", \ |
#pname, (value), CPU->id, __func__, __LINE__); \ |
} |
#else |
/branches/network/kernel/generic/include/syscall/syscall.h |
---|
36,17 → 36,25 |
#define KERN_SYSCALL_H_ |
typedef enum { |
SYS_IO = 0, |
SYS_KLOG = 0, |
SYS_TLS_SET = 1, /* Hardcoded in AMD64, IA32 uspace - fibril.S */ |
SYS_THREAD_CREATE, |
SYS_THREAD_EXIT, |
SYS_THREAD_GET_ID, |
SYS_TASK_GET_ID, |
SYS_PROGRAM_SPAWN_LOADER, |
SYS_FUTEX_SLEEP, |
SYS_FUTEX_WAKEUP, |
SYS_SMC_COHERENCE, |
SYS_AS_AREA_CREATE, |
SYS_AS_AREA_RESIZE, |
SYS_AS_AREA_CHANGE_FLAGS, |
SYS_AS_AREA_DESTROY, |
SYS_IPC_CALL_SYNC_FAST, |
SYS_IPC_CALL_SYNC_SLOW, |
SYS_IPC_CALL_ASYNC_FAST, |
58,13 → 66,17 |
SYS_IPC_HANGUP, |
SYS_IPC_REGISTER_IRQ, |
SYS_IPC_UNREGISTER_IRQ, |
SYS_CAP_GRANT, |
SYS_CAP_REVOKE, |
SYS_PHYSMEM_MAP, |
SYS_IOSPACE_ENABLE, |
SYS_PREEMPT_CONTROL, |
SYS_SYSINFO_VALID, |
SYS_SYSINFO_VALUE, |
SYS_DEBUG_ENABLE_CONSOLE, |
SYSCALL_END |
} syscall_t; |
/branches/network/kernel/generic/include/memstr.h |
---|
42,8 → 42,8 |
* Architecture independent variants. |
*/ |
extern void *_memcpy(void *dst, const void *src, size_t cnt); |
extern void _memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void _memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void _memsetb(void *dst, size_t cnt, uint8_t x); |
extern void _memsetw(void *dst, size_t cnt, uint16_t x); |
extern char *strcpy(char *dest, const char *src); |
#endif |
/branches/network/kernel/generic/include/typedefs.h |
---|
35,8 → 35,22 |
#ifndef KERN_TYPEDEFS_H_ |
#define KERN_TYPEDEFS_H_ |
#include <arch/types.h> |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef void (* function)(); |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
#endif |
/** @} |
/branches/network/kernel/generic/include/console/klog.h |
---|
File deleted |
/branches/network/kernel/generic/include/console/console.h |
---|
41,6 → 41,9 |
extern chardev_t *stdin; |
extern chardev_t *stdout; |
extern void klog_init(void); |
extern void klog_update(void); |
extern uint8_t getc(chardev_t *chardev); |
uint8_t _getc(chardev_t *chardev); |
extern count_t gets(chardev_t *chardev, char *buf, size_t buflen); |
/branches/network/kernel/generic/include/stackarg.h |
---|
52,9 → 52,9 |
(ap).last = (uint8_t *) &(lst) |
#define va_arg(ap, type) \ |
(*((type *)((ap).last + ((ap).pos += sizeof(type) ) - sizeof(type)))) |
(*((type *)((ap).last + ((ap).pos += sizeof(type)) - sizeof(type)))) |
#define va_copy(dst,src) dst=src |
#define va_copy(dst, src) dst = src |
#define va_end(ap) |
/branches/network/kernel/generic/include/interrupt.h |
---|
40,7 → 40,6 |
#include <proc/task.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <console/klog.h> |
#include <ddi/irq.h> |
typedef void (* iroutine)(int n, istate_t *istate); |
49,8 → 48,8 |
{ \ |
if (istate_from_uspace(istate)) { \ |
task_t *task = TASK; \ |
klog_printf("Task %llu killed due to an exception at %p.", task->taskid, istate_get_pc(istate)); \ |
klog_printf(" " cmd, ##__VA_ARGS__); \ |
printf("Task %" PRIu64 " killed due to an exception at %p.", task->taskid, istate_get_pc(istate)); \ |
printf(" " cmd, ##__VA_ARGS__); \ |
task_kill(task->taskid); \ |
thread_exit(); \ |
} \ |
/branches/network/kernel/generic/include/ddi/device.h |
---|
35,6 → 35,9 |
#ifndef KERN_DEVICE_H_ |
#define KERN_DEVICE_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
extern devno_t device_assign_devno(void); |
#endif |
/branches/network/kernel/generic/include/adt/list.h |
---|
36,6 → 36,7 |
#define KERN_LIST_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
/** Doubly linked list head and link type. */ |
typedef struct link { |
/branches/network/kernel/generic/include/adt/avl.h |
---|
36,6 → 36,7 |
#define KERN_AVLTREE_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
/** |
* Macro for getting a pointer to the structure which contains the avltree |
/branches/network/kernel/generic/include/debug.h |
---|
38,11 → 38,11 |
#include <panic.h> |
#include <arch/debug.h> |
#define CALLER ((uintptr_t)__builtin_return_address(0)) |
#define CALLER ((uintptr_t) __builtin_return_address(0)) |
#ifndef HERE |
/** Current Instruction Pointer address */ |
# define HERE ((uintptr_t *) 0) |
# define HERE ((uintptr_t *) 0) |
#endif |
/** Debugging ASSERT macro |
55,12 → 55,51 |
* |
*/ |
#ifdef CONFIG_DEBUG |
# define ASSERT(expr) if (!(expr)) { panic("assertion failed (%s), caller=%.*p\n", #expr, sizeof(uintptr_t) * 2, CALLER); } |
# define ASSERT(expr) \ |
if (!(expr)) { \ |
panic("assertion failed (%s), caller=%p\n", #expr, CALLER); \ |
} |
#else |
# define ASSERT(expr) |
#endif |
/** Extensive debugging output macro |
* |
* If CONFIG_EDEBUG is set, the LOG() macro |
* will print whatever message is indicated plus |
* an information about the location. |
* |
*/ |
#ifdef CONFIG_EDEBUG |
# define LOG(format, ...) \ |
printf("%s() at %s:%u: " format "\n", __func__, __FILE__, \ |
__LINE__, ##__VA_ARGS__); |
#else |
# define LOG(format, ...) |
#endif |
/** Extensive debugging execute macro |
* |
* If CONFIG_EDEBUG is set, the LOG_EXEC() macro |
* will print an information about calling a given |
* function and call it. |
* |
*/ |
#ifdef CONFIG_EDEBUG |
# define LOG_EXEC(fnc) \ |
{ \ |
printf("%s() at %s:%u: " #fnc "\n", __func__, __FILE__, \ |
__LINE__); \ |
fnc; \ |
} |
#else |
# define LOG_EXEC(fnc) fnc |
#endif |
#endif |
/** @} |
*/ |
/branches/network/kernel/generic/include/panic.h |
---|
36,15 → 36,15 |
#define KERN_PANIC_H_ |
#ifdef CONFIG_DEBUG |
#define panic(format, ...) \ |
panic_printf("Kernel panic in %s() at %s on line %d: " format, __func__, \ |
__FILE__, __LINE__, ##__VA_ARGS__); |
# define panic(format, ...) \ |
panic_printf("Kernel panic in %s() at %s:%u: " format, __func__, \ |
__FILE__, __LINE__, ##__VA_ARGS__); |
#else |
#define panic(format, ...) \ |
panic_printf("Kernel panic: " format, ##__VA_ARGS__); |
# define panic(format, ...) \ |
panic_printf("Kernel panic: " format, ##__VA_ARGS__); |
#endif |
extern void panic_printf(char *fmt, ...) __attribute__((noreturn)) ; |
extern void panic_printf(char *fmt, ...) __attribute__((noreturn)); |
#endif |
/branches/network/kernel/generic/include/byteorder.h |
---|
35,26 → 35,60 |
#ifndef KERN_BYTEORDER_H_ |
#define KERN_BYTEORDER_H_ |
#include <arch/byteorder.h> |
#if !(defined(ARCH_IS_BIG_ENDIAN) ^ defined(ARCH_IS_LITTLE_ENDIAN)) |
#error The architecture must be either big-endian or little-endian. |
#endif |
#ifdef ARCH_IS_BIG_ENDIAN |
#define uint16_t_le2host(n) uint16_t_byteorder_swap(n) |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint16_t_be2host(n) (n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#else |
#define uint16_t_le2host(n) (n) |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define uint16_t_be2host(n) uint16_t_byteorder_swap(n) |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#endif |
static inline uint64_t uint64_t_byteorder_swap(uint64_t n) |
{ |
return ((n & 0xff) << 56) | |
((n & 0xff00) << 40) | |
((n & 0xff0000) << 24) | |
((n & 0xff000000LL) << 8) | |
((n & 0xff00000000LL) >> 8) | |
((n & 0xff0000000000LL) >> 24) | |
((n & 0xff000000000000LL) >> 40) | |
((n & 0xff00000000000000LL) >> 56); |
((n & 0xff00) << 40) | |
((n & 0xff0000) << 24) | |
((n & 0xff000000LL) << 8) | |
((n & 0xff00000000LL) >> 8) | |
((n & 0xff0000000000LL) >> 24) | |
((n & 0xff000000000000LL) >> 40) | |
((n & 0xff00000000000000LL) >> 56); |
} |
static inline uint32_t uint32_t_byteorder_swap(uint32_t n) |
{ |
return ((n & 0xff) << 24) | |
((n & 0xff00) << 8) | |
((n & 0xff0000) >> 8) | |
((n & 0xff000000) >> 24); |
((n & 0xff00) << 8) | |
((n & 0xff0000) >> 8) | |
((n & 0xff000000) >> 24); |
} |
static inline uint16_t uint16_t_byteorder_swap(uint16_t n) |
{ |
return ((n & 0xff) << 8) | |
((n & 0xff00) >> 8); |
} |
#endif |
/** @} |
/branches/network/kernel/arch/ia32/include/atomic.h |
---|
41,17 → 41,17 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock incl %0\n" : "=m" (val->count)); |
asm volatile ("lock incl %0\n" : "+m" (val->count)); |
#else |
asm volatile ("incl %0\n" : "=m" (val->count)); |
asm volatile ("incl %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock decl %0\n" : "=m" (val->count)); |
asm volatile ("lock decl %0\n" : "+m" (val->count)); |
#else |
asm volatile ("decl %0\n" : "=m" (val->count)); |
asm volatile ("decl %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
61,7 → 61,7 |
asm volatile ( |
"lock xaddl %1, %0\n" |
: "=m" (val->count), "+r" (r) |
: "+m" (val->count), "+r" (r) |
); |
return r; |
73,14 → 73,14 |
asm volatile ( |
"lock xaddl %1, %0\n" |
: "=m" (val->count), "+r"(r) |
: "+m" (val->count), "+r"(r) |
); |
return r; |
} |
#define atomic_preinc(val) (atomic_postinc(val)+1) |
#define atomic_predec(val) (atomic_postdec(val)-1) |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
static inline uint32_t test_and_set(atomic_t *val) { |
uint32_t v; |
88,7 → 88,7 |
asm volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"=m" (val->count) |
: "=r" (v),"+m" (val->count) |
); |
return v; |
101,20 → 101,20 |
preemption_disable(); |
asm volatile ( |
"0:;" |
"0:\n" |
#ifdef CONFIG_HT |
"pause;" /* Pentium 4's HT love this instruction */ |
"pause\n" /* Pentium 4's HT love this instruction */ |
#endif |
"mov %0, %1;" |
"testl %1, %1;" |
"jnz 0b;" /* Lightweight looping on locked spinlock */ |
"mov %0, %1\n" |
"testl %1, %1\n" |
"jnz 0b\n" /* lightweight looping on locked spinlock */ |
"incl %1;" /* now use the atomic operation */ |
"xchgl %0, %1;" |
"testl %1, %1;" |
"jnz 0b;" |
: "=m"(val->count),"=r"(tmp) |
); |
"incl %1\n" /* now use the atomic operation */ |
"xchgl %0, %1\n" |
"testl %1, %1\n" |
"jnz 0b\n" |
: "+m" (val->count), "=&r"(tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
/branches/network/kernel/arch/ia32/include/mm/page.h |
---|
40,8 → 40,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
128,6 → 126,8 |
#include <mm/mm.h> |
#include <arch/interrupt.h> |
#include <arch/types.h> |
#include <typedefs.h> |
/* Page fault error codes. */ |
/branches/network/kernel/arch/ia32/include/barrier.h |
---|
84,6 → 84,15 |
# endif |
#endif |
/* |
* On ia32, the hardware takes care about instruction and data cache coherence, |
* even on SMP systems. We issue a write barrier to be sure that writes |
* queueing in the store buffer drain to the memory (even though it would be |
* sufficient for them to drain to the D-cache). |
*/ |
#define smc_coherence(a) write_barrier() |
#define smc_coherence_block(a, l) write_barrier() |
#endif |
/** @} |
/branches/network/kernel/arch/ia32/include/memstr.h |
---|
35,116 → 35,13 |
#ifndef KERN_ia32_MEMSTR_H_ |
#define KERN_ia32_MEMSTR_H_ |
/** Copy memory |
* |
* Copy a given number of bytes (3rd argument) |
* from the memory location defined by 2nd argument |
* to the memory location defined by 1st argument. |
* The memory areas cannot overlap. |
* |
* @param dst Destination |
* @param src Source |
* @param cnt Number of bytes |
* @return Destination |
*/ |
static inline void * memcpy(void * dst, const void * src, size_t cnt) |
{ |
unative_t d0, d1, d2; |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
asm volatile( |
/* copy all full dwords */ |
"rep movsl\n\t" |
/* load count again */ |
"movl %4, %%ecx\n\t" |
/* ecx = ecx mod 4 */ |
"andl $3, %%ecx\n\t" |
/* are there last <=3 bytes? */ |
"jz 1f\n\t" |
/* copy last <=3 bytes */ |
"rep movsb\n\t" |
/* exit from asm block */ |
"1:\n" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" ((unative_t) (cnt / 4)), "g" ((unative_t) cnt), "1" ((unative_t) dst), "2" ((unative_t) src) |
: "memory"); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
return dst; |
} |
extern int memcmp(const void *a, const void *b, size_t cnt); |
/** Compare memory regions for equality |
* |
* Compare a given number of bytes (3rd argument) |
* at memory locations defined by 1st and 2nd argument |
* for equality. If bytes are equal function returns 0. |
* |
* @param src Region 1 |
* @param dst Region 2 |
* @param cnt Number of bytes |
* @return Zero if bytes are equal, non-zero otherwise |
*/ |
static inline int memcmp(const void * src, const void * dst, size_t cnt) |
{ |
uint32_t d0, d1, d2; |
int ret; |
asm ( |
"repe cmpsb\n\t" |
"je 1f\n\t" |
"movl %3, %0\n\t" |
"addl $1, %0\n\t" |
"1:\n" |
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) |
: "0" (0), "1" ((unative_t) src), "2" ((unative_t) dst), "3" ((unative_t) cnt) |
); |
return ret; |
} |
/** Fill memory with words |
* Fill a given number of words (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of words |
* @param x Value to fill |
*/ |
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
{ |
uint32_t d0, d1; |
asm volatile ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" (cnt), "2" (x) |
: "memory" |
); |
} |
/** Fill memory with bytes |
* Fill a given number of bytes (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of bytes |
* @param x Value to fill |
*/ |
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
{ |
uint32_t d0, d1; |
asm volatile ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" (cnt), "2" (x) |
: "memory" |
); |
} |
#endif |
/** @} |
/branches/network/kernel/arch/ia32/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_ia32_TYPES_H_ |
#define KERN_ia32_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed long int32_t; |
61,14 → 57,29 |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
#define PRIp "x" /**< Format for uintptr_t. */ |
#define PRIs "u" /**< Format for size_t. */ |
#define PRIc "u" /**< Format for count_t. */ |
#define PRIi "u" /**< Format for index_t. */ |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
#define PRId8 "d" /**< Format for int8_t. */ |
#define PRId16 "d" /**< Format for int16_t. */ |
#define PRId32 "d" /**< Format for int32_t. */ |
#define PRId64 "lld" /**< Format for int64_t. */ |
#define PRIdn "d" /**< Format for native_t. */ |
#define PRIu8 "u" /**< Format for uint8_t. */ |
#define PRIu16 "u" /**< Format for uint16_t. */ |
#define PRIu32 "u" /**< Format for uint32_t. */ |
#define PRIu64 "llu" /**< Format for uint64_t. */ |
#define PRIun "u" /**< Format for unative_t. */ |
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */ |
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */ |
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */ |
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */ |
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */ |
/** Page Table Entry. */ |
typedef struct { |
unsigned present : 1; |
/branches/network/kernel/arch/ia32/include/smp/apic.h |
---|
105,8 → 105,8 |
#define MODEL_CLUSTER 0x0 |
/** Interrupt Command Register. */ |
#define ICRlo (0x300/sizeof(uint32_t)) |
#define ICRhi (0x310/sizeof(uint32_t)) |
#define ICRlo (0x300 / sizeof(uint32_t)) |
#define ICRhi (0x310 / sizeof(uint32_t)) |
typedef struct { |
union { |
uint32_t lo; |
133,10 → 133,10 |
} __attribute__ ((packed)) icr_t; |
/* End Of Interrupt. */ |
#define EOI (0x0b0/sizeof(uint32_t)) |
#define EOI (0x0b0 / sizeof(uint32_t)) |
/** Error Status Register. */ |
#define ESR (0x280/sizeof(uint32_t)) |
#define ESR (0x280 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
uint8_t err_bitmap; |
154,7 → 154,7 |
} esr_t; |
/* Task Priority Register */ |
#define TPR (0x080/sizeof(uint32_t)) |
#define TPR (0x080 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
164,7 → 164,7 |
} tpr_t; |
/** Spurious-Interrupt Vector Register. */ |
#define SVR (0x0f0/sizeof(uint32_t)) |
#define SVR (0x0f0 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
176,7 → 176,7 |
} svr_t; |
/** Time Divide Configuration Register. */ |
#define TDCR (0x3e0/sizeof(uint32_t)) |
#define TDCR (0x3e0 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
186,13 → 186,13 |
} tdcr_t; |
/* Initial Count Register for Timer */ |
#define ICRT (0x380/sizeof(uint32_t)) |
#define ICRT (0x380 / sizeof(uint32_t)) |
/* Current Count Register for Timer */ |
#define CCRT (0x390/sizeof(uint32_t)) |
#define CCRT (0x390 / sizeof(uint32_t)) |
/** LVT Timer register. */ |
#define LVT_Tm (0x320/sizeof(uint32_t)) |
#define LVT_Tm (0x320 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
207,8 → 207,8 |
} lvt_tm_t; |
/** LVT LINT registers. */ |
#define LVT_LINT0 (0x350/sizeof(uint32_t)) |
#define LVT_LINT1 (0x360/sizeof(uint32_t)) |
#define LVT_LINT0 (0x350 / sizeof(uint32_t)) |
#define LVT_LINT1 (0x360 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
225,7 → 225,7 |
} lvt_lint_t; |
/** LVT Error register. */ |
#define LVT_Err (0x370/sizeof(uint32_t)) |
#define LVT_Err (0x370 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
239,7 → 239,7 |
} lvt_error_t; |
/** Local APIC ID Register. */ |
#define L_APIC_ID (0x020/sizeof(uint32_t)) |
#define L_APIC_ID (0x020 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
249,14 → 249,14 |
} l_apic_id_t; |
/** Local APIC Version Register */ |
#define LAVR (0x030/sizeof(uint32_t)) |
#define LAVR (0x030 / sizeof(uint32_t)) |
#define LAVR_Mask 0xff |
#define is_local_apic(x) (((x)&LAVR_Mask&0xf0)==0x1) |
#define is_82489DX_apic(x) ((((x)&LAVR_Mask&0xf0)==0x0)) |
#define is_local_xapic(x) (((x)&LAVR_Mask)==0x14) |
#define is_local_apic(x) (((x) & LAVR_Mask & 0xf0) == 0x1) |
#define is_82489DX_apic(x) ((((x) & LAVR_Mask & 0xf0) == 0x0)) |
#define is_local_xapic(x) (((x) & LAVR_Mask) == 0x14) |
/** Logical Destination Register. */ |
#define LDR (0x0d0/sizeof(uint32_t)) |
#define LDR (0x0d0 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
266,7 → 266,7 |
} ldr_t; |
/** Destination Format Register. */ |
#define DFR (0x0e0/sizeof(uint32_t)) |
#define DFR (0x0e0 / sizeof(uint32_t)) |
typedef union { |
uint32_t value; |
struct { |
276,8 → 276,8 |
} dfr_t; |
/* IO APIC */ |
#define IOREGSEL (0x00/sizeof(uint32_t)) |
#define IOWIN (0x10/sizeof(uint32_t)) |
#define IOREGSEL (0x00 / sizeof(uint32_t)) |
#define IOWIN (0x10 / sizeof(uint32_t)) |
#define IOAPICID 0x00 |
#define IOAPICVER 0x01 |
/branches/network/kernel/arch/ia32/include/byteorder.h |
---|
35,15 → 35,9 |
#ifndef KERN_ia32_BYTEORDER_H_ |
#define KERN_ia32_BYTEORDER_H_ |
#include <byteorder.h> |
/* IA-32 is little-endian */ |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define ARCH_IS_LITTLE_ENDIAN |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#endif |
/** @} |
/branches/network/kernel/arch/ia32/include/context_offset.h |
---|
0,0 → 1,83 |
/* |
* Copyright (c) 2008 Josef Cejka |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup ia32 |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_ia32_CONTEXT_OFFSET_H_ |
#define KERN_ia32_CONTEXT_OFFSET_H_ |
#define OFFSET_SP 0x0 |
#define OFFSET_PC 0x4 |
#define OFFSET_EBX 0x8 |
#define OFFSET_ESI 0xC |
#define OFFSET_EDI 0x10 |
#define OFFSET_EBP 0x14 |
#ifdef KERNEL |
# define OFFSET_IPL 0x18 |
#else |
# define OFFSET_TLS 0x18 |
#endif |
#ifdef __ASM__ |
# ctx: address of the structure with saved context |
# pc: return address |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req pc:req |
movl %esp,OFFSET_SP(\ctx) # %esp -> ctx->sp |
movl \pc,OFFSET_PC(\ctx) # %eip -> ctx->pc |
movl %ebx,OFFSET_EBX(\ctx) # %ebx -> ctx->ebx |
movl %esi,OFFSET_ESI(\ctx) # %esi -> ctx->esi |
movl %edi,OFFSET_EDI(\ctx) # %edi -> ctx->edi |
movl %ebp,OFFSET_EBP(\ctx) # %ebp -> ctx->ebp |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req pc:req |
movl OFFSET_SP(\ctx),%esp # ctx->sp -> %esp |
movl OFFSET_PC(\ctx),\pc # ctx->pc -> \pc |
movl OFFSET_EBX(\ctx),%ebx # ctx->ebx -> %ebx |
movl OFFSET_ESI(\ctx),%esi # ctx->esi -> %esi |
movl OFFSET_EDI(\ctx),%edi # ctx->edi -> %edi |
movl OFFSET_EBP(\ctx),%ebp # ctx->ebp -> %ebp |
.endm |
#endif /* __ASM__ */ |
#endif |
/** @} |
*/ |
/branches/network/kernel/arch/ia32/include/context.h |
---|
35,6 → 35,7 |
#ifndef KERN_ia32_CONTEXT_H_ |
#define KERN_ia32_CONTEXT_H_ |
#ifdef KERNEL |
#include <arch/types.h> |
#define STACK_ITEM_SIZE 4 |
47,6 → 48,8 |
*/ |
#define SP_DELTA (8 + STACK_ITEM_SIZE) |
#endif /* KERNEL */ |
/* |
* Only save registers that must be preserved across |
* function calls. |
/branches/network/kernel/arch/ia32/Makefile.inc |
---|
29,11 → 29,15 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf32-i386 |
BFD_ARCH = i386 |
BFD = binary |
TARGET = i686-pc-linux-gnu |
TOOLCHAIN_DIR = /usr/local/i686 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/i686 |
DEFS += -DMACHINE=$(MACHINE) -D__32_BITS__ |
46,7 → 50,8 |
# |
ifeq ($(MACHINE),athlon-xp) |
CMN2 = -march=athlon-xp -mmmx -msse -m3dnow |
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow |
CMN2 = -march=athlon-xp |
GCC_CFLAGS += $(CMN2) |
ICC_CFLAGS += $(CMN2) |
SUNCC_CFLAGS += -xarch=ssea |
55,7 → 60,8 |
CONFIG_HT = n |
endif |
ifeq ($(MACHINE),athlon-mp) |
CMN2 = -march=athlon-mp -mmmx -msse -m3dnow |
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow |
CMN2 = -march=athlon-mp |
GCC_CFLAGS += $(CMN2) |
ICC_CFLAGS += $(CMN2) |
SUNCC_CFLAGS += xarch=ssea |
63,7 → 69,8 |
CONFIG_HT = n |
endif |
ifeq ($(MACHINE),pentium3) |
CMN2 = -march=pentium3 -mmmx -msse |
FPU_NO_CFLAGS = -mno-mmx -mno-sse |
CMN2 = -march=pentium3 |
GCC_CFLAGS += $(CMN2) |
ICC_CFLAGS += $(CMN2) |
SUNCC_CFLAGS += -xarch=sse |
71,7 → 78,8 |
CONFIG_HT = n |
endif |
ifeq ($(MACHINE),core) |
CMN2 = -march=prescott -mfpmath=sse -mmmx -msse -msse2 -msse3 |
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2 -mno-sse3 |
CMN2 = -march=prescott |
GCC_CFLAGS += $(CMN2) |
ICC_CFLAGS += $(CMN2) |
SUNCC_CFLAGS += -xarch=sse3 |
78,7 → 86,8 |
DEFS += -DCONFIG_FENCES_P4 |
endif |
ifeq ($(MACHINE),pentium4) |
GCC_CFLAGS += -march=pentium4 -mfpmath=sse -mmmx -msse -msse2 |
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2 |
GCC_CFLAGS += -march=pentium4 |
ICC_CFLAGS += -march=pentium4 |
SUNCC_CFLAGS += -xarch=sse2 |
DEFS += -DCONFIG_FENCES_P4 |
120,7 → 129,7 |
CONFIG_SOFTINT = y |
ARCH_SOURCES = \ |
arch/$(ARCH)/src/context.s \ |
arch/$(ARCH)/src/context.S \ |
arch/$(ARCH)/src/debug/panic.s \ |
arch/$(ARCH)/src/delay.s \ |
arch/$(ARCH)/src/asm.S \ |
/branches/network/kernel/arch/ia32/src/context.s |
---|
File deleted |
/branches/network/kernel/arch/ia32/src/asm.S |
---|
37,6 → 37,8 |
.global paging_on |
.global enable_l_apic_in_msr |
.global interrupt_handlers |
.global memsetb |
.global memsetw |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_from_uspace_failover_address |
44,6 → 46,15 |
.global memcpy_to_uspace_failover_address |
# Wrapper for generic memsetb |
memsetb: |
jmp _memsetb |
# Wrapper for generic memsetw |
memsetw: |
jmp _memsetw |
#define MEMCPY_DST 4 |
#define MEMCPY_SRC 8 |
#define MEMCPY_SIZE 12 |
60,7 → 71,7 |
* @param MEMCPY_SRC(%esp) Source address. |
* @param MEMCPY_SIZE(%esp) Size. |
* |
* @return MEMCPY_SRC(%esp) on success and 0 on failure. |
* @return MEMCPY_DST(%esp) on success and 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
85,7 → 96,7 |
0: |
movl %edx, %edi |
movl %eax, %esi |
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */ |
movl MEMCPY_DST(%esp), %eax /* MEMCPY_DST(%esp), success */ |
ret |
/* |
173,6 → 184,7 |
movw %ax, %ds |
movw %ax, %es |
cld |
sti |
# syscall_handler(edx, ecx, ebx, esi, edi, ebp, eax) |
call syscall_handler |
233,6 → 245,8 |
movw %ax, %ds |
movw %ax, %es |
cld |
pushl %esp # *istate |
pushl $(\i) # intnum |
call exc_dispatch # excdispatch(intnum, *istate) |
/branches/network/kernel/arch/ia32/src/mm/as.c |
---|
39,9 → 39,7 |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
{ |
#ifndef __OBJC__ |
as_operations = &as_pt_operations; |
#endif |
} |
/** @} |
/branches/network/kernel/arch/ia32/src/drivers/ega.c |
---|
93,12 → 93,6 |
sysinfo_set_item_val("fb.width", NULL, ROW); |
sysinfo_set_item_val("fb.height", NULL, ROWS); |
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM); |
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) |
videoram)); |
#ifndef CONFIG_FB |
putchar('\n'); |
#endif |
} |
static void ega_display_char(char ch) |
115,7 → 109,7 |
return; |
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2); |
memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720); |
memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720); |
ega_cursor = ega_cursor - ROW; |
} |
/branches/network/kernel/arch/ia32/src/userspace.c |
---|
70,6 → 70,10 |
"pushl %3\n" |
"pushl %4\n" |
"movl %5, %%eax\n" |
/* %ebx is defined to hold pcb_ptr - set it to 0 */ |
"xorl %%ebx, %%ebx\n" |
"iret\n" |
: |
: "i" (selector(UDATA_DES) | PL_USER), |
/branches/network/kernel/arch/ia32/src/smp/smp.c |
---|
155,13 → 155,12 |
* Prepare new GDT for CPU in question. |
*/ |
gdt_new = (struct descriptor *) malloc(GDT_ITEMS * |
sizeof(struct descriptor), FRAME_ATOMIC); |
sizeof(struct descriptor), FRAME_ATOMIC | FRAME_LOW_4_GiB); |
if (!gdt_new) |
panic("couldn't allocate memory for GDT\n"); |
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor)); |
memsetb((uintptr_t)(&gdt_new[TSS_DES]), |
sizeof(struct descriptor), 0); |
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0); |
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor); |
protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new); |
gdtr.base = (uintptr_t) gdt_new; |
/branches/network/kernel/arch/ia32/src/smp/ap.S |
---|
45,7 → 45,7 |
KTEXT=8 |
KDATA=16 |
# This piece of code is real-mode and is meant to be alligned at 4K boundary. |
# This piece of code is real-mode and is meant to be aligned at 4K boundary. |
# The requirement for such an alignment comes from MP Specification's STARTUP IPI |
# requirements. |
/branches/network/kernel/arch/ia32/src/pm.c |
---|
112,7 → 112,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((uintptr_t) t, sizeof(struct tss), 0); |
memsetb(t, sizeof(struct tss), 0); |
} |
/* |
240,7 → 240,7 |
preemption_disable(); |
ipl_t ipl = interrupts_disable(); |
memsetb((uintptr_t) idt, sizeof(idt), 0); |
memsetb(idt, sizeof(idt), 0); |
ptr_16_32_t idtr; |
idtr.limit = sizeof(idt); |
/branches/network/kernel/arch/ia32/src/debug/panic.s |
---|
30,5 → 30,5 |
.global panic_printf |
panic_printf: |
movl $halt,(%esp) # fake stack to make printf return to halt |
movl $halt, (%esp) # fake stack to make printf return to halt |
jmp printf |
/branches/network/kernel/arch/ia32/src/boot/boot.S |
---|
50,6 → 50,7 |
.long multiboot_image_start |
multiboot_image_start: |
cld |
movl $START_STACK, %esp # initialize stack pointer |
lgdt KA2PA(bootstrap_gdtr) # initialize Global Descriptor Table register |
85,7 → 86,6 |
mov $vesa_init, %esi |
mov $VESA_INIT_SEGMENT << 4, %edi |
mov $e_vesa_init - vesa_init, %ecx |
cld |
rep movsb |
mov $VESA_INIT_SEGMENT << 4, %edi |
206,7 → 206,6 |
movl $BOOT_OFFSET, %esi |
movl $AP_BOOT_OFFSET, %edi |
movl $_hardcoded_unmapped_size, %ecx |
cld |
rep movsb |
#endif |
279,7 → 278,6 |
addl %eax, %edi |
movw $0x0c00, %ax # black background, light red foreground |
cld |
ploop: |
lodsb |
/branches/network/kernel/arch/ia32/src/context.S |
---|
0,0 → 1,67 |
# |
# Copyright (c) 2001-2004 Jakub Jermar |
# All rights reserved. |
# |
# Redistribution and use in source and binary forms, with or without |
# modification, are permitted provided that the following conditions |
# are met: |
# |
# - Redistributions of source code must retain the above copyright |
# notice, this list of conditions and the following disclaimer. |
# - Redistributions in binary form must reproduce the above copyright |
# notice, this list of conditions and the following disclaimer in the |
# documentation and/or other materials provided with the distribution. |
# - The name of the author may not be used to endorse or promote products |
# derived from this software without specific prior written permission. |
# |
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
#include <arch/context_offset.h> |
.text |
.global context_save_arch |
.global context_restore_arch |
## Save current CPU context |
# |
# Save CPU context to the context_t variable |
# pointed by the 1st argument. Returns 1 in EAX. |
# |
context_save_arch: |
movl 0(%esp),%eax # save pc value into eax |
movl 4(%esp),%edx # address of the context variable to save context to |
# save registers to given structure |
CONTEXT_SAVE_ARCH_CORE %edx %eax |
xorl %eax,%eax # context_save returns 1 |
incl %eax |
ret |
## Restore saved CPU context |
# |
# Restore CPU context from context_t variable |
# pointed by the 1st argument. Returns 0 in EAX. |
# |
context_restore_arch: |
movl 4(%esp),%eax # address of the context variable to restore context from |
# restore registers from given structure |
CONTEXT_RESTORE_ARCH_CORE %eax %edx |
movl %edx,0(%esp) # put saved pc on stack |
xorl %eax,%eax # context_restore returns 0 |
ret |
/branches/network/kernel/arch/amd64/include/atomic.h |
---|
41,17 → 41,17 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock incq %0\n" : "=m" (val->count)); |
asm volatile ("lock incq %0\n" : "+m" (val->count)); |
#else |
asm volatile ("incq %0\n" : "=m" (val->count)); |
asm volatile ("incq %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock decq %0\n" : "=m" (val->count)); |
asm volatile ("lock decq %0\n" : "+m" (val->count)); |
#else |
asm volatile ("decq %0\n" : "=m" (val->count)); |
asm volatile ("decq %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
61,7 → 61,7 |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
: "+m" (val->count), "+r" (r) |
); |
return r; |
73,14 → 73,14 |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
: "+m" (val->count), "+r" (r) |
); |
return r; |
} |
#define atomic_preinc(val) (atomic_postinc(val)+1) |
#define atomic_predec(val) (atomic_postdec(val)-1) |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
static inline uint64_t test_and_set(atomic_t *val) { |
uint64_t v; |
88,7 → 88,7 |
asm volatile ( |
"movq $1, %0\n" |
"xchgq %0, %1\n" |
: "=r" (v),"=m" (val->count) |
: "=r" (v), "+m" (val->count) |
); |
return v; |
102,20 → 102,20 |
preemption_disable(); |
asm volatile ( |
"0:;" |
"0:\n" |
#ifdef CONFIG_HT |
"pause;" |
"pause\n" |
#endif |
"mov %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" /* Lightweight looping on locked spinlock */ |
"mov %0, %1\n" |
"testq %1, %1\n" |
"jnz 0b\n" /* lightweight looping on locked spinlock */ |
"incq %1;" /* now use the atomic operation */ |
"xchgq %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" |
: "=m"(val->count),"=r"(tmp) |
); |
"incq %1\n" /* now use the atomic operation */ |
"xchgq %0, %1\n" |
"testq %1, %1\n" |
"jnz 0b\n" |
: "+m" (val->count), "=&r" (tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
/branches/network/kernel/arch/amd64/include/mm/page.h |
---|
52,8 → 52,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/amd64/include/memstr.h |
---|
35,110 → 35,13 |
#ifndef KERN_amd64_MEMSTR_H_ |
#define KERN_amd64_MEMSTR_H_ |
/** Copy memory |
* |
* Copy a given number of bytes (3rd argument) |
* from the memory location defined by 2nd argument |
* to the memory location defined by 1st argument. |
* The memory areas cannot overlap. |
* |
* @param dst Destination |
* @param src Source |
* @param cnt Number of bytes |
* @return Destination |
*/ |
static inline void * memcpy(void * dst, const void * src, size_t cnt) |
{ |
unative_t d0, d1, d2; |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
asm volatile( |
"rep movsq\n\t" |
"movq %4, %%rcx\n\t" |
"andq $7, %%rcx\n\t" |
"jz 1f\n\t" |
"rep movsb\n\t" |
"1:\n" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src) |
: "memory"); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
return dst; |
} |
extern int memcmp(const void *a, const void *b, size_t cnt); |
/** Compare memory regions for equality |
* |
* Compare a given number of bytes (3rd argument) |
* at memory locations defined by 1st and 2nd argument |
* for equality. If bytes are equal function returns 0. |
* |
* @param src Region 1 |
* @param dst Region 2 |
* @param cnt Number of bytes |
* @return Zero if bytes are equal, non-zero otherwise |
*/ |
static inline int memcmp(const void * src, const void * dst, size_t cnt) |
{ |
unative_t d0, d1, d2; |
unative_t ret; |
asm ( |
"repe cmpsb\n\t" |
"je 1f\n\t" |
"movq %3, %0\n\t" |
"addq $1, %0\n\t" |
"1:\n" |
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) |
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt) |
); |
return ret; |
} |
/** Fill memory with words |
* Fill a given number of words (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of words |
* @param x Value to fill |
*/ |
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
{ |
unative_t d0, d1; |
asm volatile ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
} |
/** Fill memory with bytes |
* Fill a given number of bytes (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of bytes |
* @param x Value to fill |
*/ |
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
{ |
unative_t d0, d1; |
asm volatile ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
} |
#endif |
/** @} |
/branches/network/kernel/arch/amd64/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_amd64_TYPES_H_ |
#define KERN_amd64_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
61,14 → 57,31 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
/**< Formats for uintptr_t, size_t, count_t and index_t */ |
#define PRIp "llx" |
#define PRIs "llu" |
#define PRIc "llu" |
#define PRIi "llu" |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ |
#define PRId8 "d" |
#define PRId16 "d" |
#define PRId32 "d" |
#define PRId64 "lld" |
#define PRIdn "lld" |
#define PRIu8 "u" |
#define PRIu16 "u" |
#define PRIu32 "u" |
#define PRIu64 "llu" |
#define PRIun "llu" |
#define PRIx8 "x" |
#define PRIx16 "x" |
#define PRIx32 "x" |
#define PRIx64 "llx" |
#define PRIxn "llx" |
/** Page Table Entry. */ |
typedef struct { |
unsigned present : 1; |
/branches/network/kernel/arch/amd64/include/byteorder.h |
---|
35,15 → 35,9 |
#ifndef KERN_amd64_BYTEORDER_H_ |
#define KERN_amd64_BYTEORDER_H_ |
#include <byteorder.h> |
/* AMD64 is little-endian */ |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define ARCH_IS_LITTLE_ENDIAN |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#endif |
/** @} |
/branches/network/kernel/arch/amd64/include/cpu.h |
---|
35,8 → 35,9 |
#ifndef KERN_amd64_CPU_H_ |
#define KERN_amd64_CPU_H_ |
#define RFLAGS_IF (1 << 9) |
#define RFLAGS_RF (1 << 16) |
#define RFLAGS_IF (1 << 9) |
#define RFLAGS_DF (1 << 10) |
#define RFLAGS_RF (1 << 16) |
#define EFER_MSR_NUM 0xc0000080 |
#define AMD_SCE_FLAG 0 |
/branches/network/kernel/arch/amd64/include/context_offset.h |
---|
37,6 → 37,43 |
#define OFFSET_R13 0x28 |
#define OFFSET_R14 0x30 |
#define OFFSET_R15 0x38 |
#define OFFSET_IPL 0x40 |
#ifdef KERNEL |
# define OFFSET_IPL 0x40 |
#else |
# define OFFSET_TLS 0x40 |
#endif |
#ifdef __ASM__ |
# ctx: address of the structure with saved context |
# pc: return address |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req pc:req |
movq \pc, OFFSET_PC(\ctx) |
movq %rsp, OFFSET_SP(\ctx) |
movq %rbx, OFFSET_RBX(\ctx) |
movq %rbp, OFFSET_RBP(\ctx) |
movq %r12, OFFSET_R12(\ctx) |
movq %r13, OFFSET_R13(\ctx) |
movq %r14, OFFSET_R14(\ctx) |
movq %r15, OFFSET_R15(\ctx) |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req pc:req |
movq OFFSET_R15(\ctx), %r15 |
movq OFFSET_R14(\ctx), %r14 |
movq OFFSET_R13(\ctx), %r13 |
movq OFFSET_R12(\ctx), %r12 |
movq OFFSET_RBP(\ctx), %rbp |
movq OFFSET_RBX(\ctx), %rbx |
movq OFFSET_SP(\ctx), %rsp # ctx->sp -> %rsp |
movq OFFSET_PC(\ctx), \pc |
.endm |
#endif |
#endif |
/branches/network/kernel/arch/amd64/include/context.h |
---|
35,6 → 35,8 |
#ifndef KERN_amd64_CONTEXT_H_ |
#define KERN_amd64_CONTEXT_H_ |
#ifdef KERNEL |
#include <arch/types.h> |
/* According to ABI the stack MUST be aligned on |
43,6 → 45,8 |
*/ |
#define SP_DELTA 16 |
#endif /* KERNEL */ |
/* We include only registers that must be preserved |
* during function call |
*/ |
/branches/network/kernel/arch/amd64/Makefile.inc |
---|
29,12 → 29,17 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf64-x86-64 |
BFD_ARCH = i386:x86-64 |
BFD = binary |
TARGET = amd64-linux-gnu |
TOOLCHAIN_DIR = /usr/local/amd64 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/amd64 |
FPU_NO_CFLAGS = -mno-sse -mno-sse2 |
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables |
GCC_CFLAGS += $(CMN1) |
ICC_CFLAGS += $(CMN1) |
/branches/network/kernel/arch/amd64/src/asm_utils.S |
---|
65,6 → 65,8 |
.global get_cycle |
.global read_efer_flag |
.global set_efer_flag |
.global memsetb |
.global memsetw |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
71,6 → 73,14 |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
# Wrapper for generic memsetb |
memsetb: |
jmp _memsetb |
# Wrapper for generic memsetw |
memsetw: |
jmp _memsetw |
#define MEMCPY_DST %rdi |
#define MEMCPY_SRC %rsi |
#define MEMCPY_SIZE %rdx |
88,12 → 98,12 |
* @param MEMCPY_SRC Source address. |
* @param MEMCPY_SIZE Number of bytes to copy. |
* |
* @retrun MEMCPY_SRC on success, 0 on failure. |
* @retrun MEMCPY_DST on success, 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
movq MEMCPY_SRC, %rax |
movq MEMCPY_DST, %rax |
movq MEMCPY_SIZE, %rcx |
shrq $3, %rcx /* size / 8 */ |
248,6 → 258,7 |
.endif |
save_all_gpr |
cld |
movq $(\i), %rdi # %rdi - first parameter |
movq %rsp, %rsi # %rsi - pointer to istate |
/branches/network/kernel/arch/amd64/src/userspace.c |
---|
61,6 → 61,8 |
"pushq %3\n" |
"pushq %4\n" |
"movq %5, %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
: : |
"i" (gdtselector(UDATA_DES) | PL_USER), |
/branches/network/kernel/arch/amd64/src/debugger.c |
---|
106,25 → 106,33 |
{ |
unsigned int i; |
char *symbol; |
#ifdef __32_BITS__ |
printf("# Count Address In symbol\n"); |
printf("-- ----- ---------- ---------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("# Count Address In symbol\n"); |
printf("-- ----- ------------------ ---------\n"); |
#endif |
if (sizeof(void *) == 4) { |
printf("# Count Address In symbol\n"); |
printf("-- ----- ---------- ---------\n"); |
} else { |
printf("# Count Address In symbol\n"); |
printf("-- ----- ------------------ ---------\n"); |
} |
for (i = 0; i < BKPOINTS_MAX; i++) |
if (breakpoints[i].address) { |
symbol = get_symtab_entry(breakpoints[i].address); |
if (sizeof(void *) == 4) |
printf("%-2u %-5d %#10zx %s\n", i, breakpoints[i].counter, |
breakpoints[i].address, symbol); |
else |
printf("%-2u %-5d %#18zx %s\n", i, breakpoints[i].counter, |
breakpoints[i].address, symbol); |
#ifdef __32_BITS__ |
printf("%-2u %-5d %#10zx %s\n", i, |
breakpoints[i].counter, breakpoints[i].address, |
symbol); |
#endif |
#ifdef __64_BITS__ |
printf("%-2u %-5d %#18zx %s\n", i, |
breakpoints[i].counter, breakpoints[i].address, |
symbol); |
#endif |
} |
return 1; |
} |
162,19 → 170,23 |
if ((flags & BKPOINT_INSTR)) { |
; |
} else { |
if (sizeof(int) == 4) |
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx); |
else /* 8 */ |
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx); |
#ifdef __32_BITS__ |
dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx); |
#endif |
#ifdef __64_BITS__ |
dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx); |
#endif |
if ((flags & BKPOINT_WRITE)) |
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x1) << (16 + 4 * curidx); |
else if ((flags & BKPOINT_READ_WRITE)) |
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx); |
} |
/* Enable global breakpoint */ |
dr7 |= 0x2 << (curidx*2); |
dr7 |= 0x2 << (curidx * 2); |
write_dr7(dr7); |
246,15 → 258,15 |
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { |
if (*((unative_t *) breakpoints[slot].address) != 0) |
return; |
printf("**** Found ZERO on address %lx (slot %d) ****\n", |
breakpoints[slot].address, slot); |
printf("*** Found ZERO on address %lx (slot %d) ***\n", |
breakpoints[slot].address, slot); |
} else { |
printf("Data watchpoint - new data: %lx\n", |
*((unative_t *) breakpoints[slot].address)); |
*((unative_t *) breakpoints[slot].address)); |
} |
} |
printf("Reached breakpoint %d:%lx(%s)\n", slot, getip(istate), |
get_symtab_entry(getip(istate))); |
get_symtab_entry(getip(istate))); |
printf("***Type 'exit' to exit kconsole.\n"); |
atomic_set(&haltstate,1); |
kconsole((void *) "debug"); |
292,7 → 304,8 |
/** Remove breakpoint from table */ |
int cmd_del_breakpoint(cmd_arg_t *argv) |
{ |
if (argv->intval < 0 || argv->intval > BKPOINTS_MAX) { |
unative_t bpno = argv->intval; |
if (bpno > BKPOINTS_MAX) { |
printf("Invalid breakpoint number.\n"); |
return 0; |
} |
346,7 → 359,9 |
} |
#ifdef CONFIG_SMP |
static void debug_ipi(int n __attribute__((unused)), istate_t *istate __attribute__((unused))) |
static void |
debug_ipi(int n __attribute__((unused)), |
istate_t *istate __attribute__((unused))) |
{ |
int i; |
362,7 → 377,7 |
{ |
int i; |
for (i=0; i<BKPOINTS_MAX; i++) |
for (i = 0; i < BKPOINTS_MAX; i++) |
breakpoints[i].address = NULL; |
cmd_initialize(&bkpts_info); |
383,11 → 398,9 |
panic("could not register command %s\n", addwatchp_info.name); |
#endif |
exc_register(VECTOR_DEBUG, "debugger", |
debug_exception); |
exc_register(VECTOR_DEBUG, "debugger", debug_exception); |
#ifdef CONFIG_SMP |
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", |
debug_ipi); |
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi); |
#endif |
} |
/branches/network/kernel/arch/amd64/src/pm.c |
---|
155,7 → 155,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((uintptr_t) t, sizeof(tss_t), 0); |
memsetb(t, sizeof(tss_t), 0); |
} |
/* |
239,7 → 239,7 |
preemption_disable(); |
ipl_t ipl = interrupts_disable(); |
memsetb((uintptr_t) idt, sizeof(idt), 0); |
memsetb(idt, sizeof(idt), 0); |
idtr_load(&idtr); |
interrupts_restore(ipl); |
/branches/network/kernel/arch/amd64/src/proc/thread.c |
---|
46,7 → 46,7 |
* Kernel RSP can be precalculated at thread creation time. |
*/ |
t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] = |
(uintptr_t)&t->kstack[PAGE_SIZE - sizeof(uint64_t)]; |
(uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)]; |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/syscall.c |
---|
62,9 → 62,11 |
write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry); |
/* Mask RFLAGS on syscall |
* - disable interrupts, until we exchange the stack register |
* (mask the IE bit) |
* (mask the IF bit) |
* - clear DF so that the string instructions operate in |
* the right direction |
*/ |
write_msr(AMD_MSR_SFMASK, 0x200); |
write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF); |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/boot/boot.S |
---|
54,6 → 54,7 |
.long multiboot_image_start |
multiboot_image_start: |
cld |
movl $START_STACK, %esp # initialize stack pointer |
lgdtl bootstrap_gdtr # initialize Global Descriptor Table register |
126,7 → 127,6 |
mov $vesa_init, %esi |
mov $VESA_INIT_SEGMENT << 4, %edi |
mov $e_vesa_init - vesa_init, %ecx |
cld |
rep movsb |
mov $VESA_INIT_SEGMENT << 4, %edi |
282,7 → 282,6 |
movq $BOOT_OFFSET, %rsi |
movq $AP_BOOT_OFFSET, %rdi |
movq $_hardcoded_unmapped_size, %rcx |
cld |
rep movsb |
#endif |
556,7 → 555,6 |
addl %eax, %edi |
movw $0x0c00, %ax # black background, light red foreground |
cld |
ploop: |
lodsb |
/branches/network/kernel/arch/amd64/src/cpu/cpu.c |
---|
124,7 → 124,8 |
void cpu_arch_init(void) |
{ |
CPU->arch.tss = tss_p; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - |
((uint8_t *) CPU->arch.tss); |
CPU->fpu_owner = NULL; |
} |
139,7 → 140,9 |
/* |
* Check for AMD processor. |
*/ |
if (info.cpuid_ebx==AMD_CPUID_EBX && info.cpuid_ecx==AMD_CPUID_ECX && info.cpuid_edx==AMD_CPUID_EDX) { |
if (info.cpuid_ebx == AMD_CPUID_EBX && |
info.cpuid_ecx == AMD_CPUID_ECX && |
info.cpuid_edx == AMD_CPUID_EDX) { |
CPU->arch.vendor = VendorAMD; |
} |
146,14 → 149,16 |
/* |
* Check for Intel processor. |
*/ |
if (info.cpuid_ebx==INTEL_CPUID_EBX && info.cpuid_ecx==INTEL_CPUID_ECX && info.cpuid_edx==INTEL_CPUID_EDX) { |
if (info.cpuid_ebx == INTEL_CPUID_EBX && |
info.cpuid_ecx == INTEL_CPUID_ECX && |
info.cpuid_edx == INTEL_CPUID_EDX) { |
CPU->arch.vendor = VendorIntel; |
} |
cpuid(1, &info); |
CPU->arch.family = (info.cpuid_eax>>8)&0xf; |
CPU->arch.model = (info.cpuid_eax>>4)&0xf; |
CPU->arch.stepping = (info.cpuid_eax>>0)&0xf; |
CPU->arch.family = (info.cpuid_eax >> 8) & 0xf; |
CPU->arch.model = (info.cpuid_eax >> 4) & 0xf; |
CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; |
} |
} |
160,8 → 165,8 |
void cpu_print_report(cpu_t* m) |
{ |
printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n", |
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, m->arch.stepping, |
m->frequency_mhz); |
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, |
m->arch.stepping, m->frequency_mhz); |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/context.S |
---|
40,17 → 40,10 |
# |
context_save_arch: |
movq (%rsp), %rdx # the caller's return %eip |
# In %edi is passed 1st argument |
movq %rdx, OFFSET_PC(%rdi) |
movq %rsp, OFFSET_SP(%rdi) |
CONTEXT_SAVE_ARCH_CORE %rdi %rdx |
movq %rbx, OFFSET_RBX(%rdi) |
movq %rbp, OFFSET_RBP(%rdi) |
movq %r12, OFFSET_R12(%rdi) |
movq %r13, OFFSET_R13(%rdi) |
movq %r14, OFFSET_R14(%rdi) |
movq %r15, OFFSET_R15(%rdi) |
xorq %rax,%rax # context_save returns 1 |
incq %rax |
ret |
62,16 → 55,9 |
# pointed by the 1st argument. Returns 0 in EAX. |
# |
context_restore_arch: |
movq OFFSET_R15(%rdi), %r15 |
movq OFFSET_R14(%rdi), %r14 |
movq OFFSET_R13(%rdi), %r13 |
movq OFFSET_R12(%rdi), %r12 |
movq OFFSET_RBP(%rdi), %rbp |
movq OFFSET_RBX(%rdi), %rbx |
movq OFFSET_SP(%rdi), %rsp # ctx->sp -> %rsp |
movq OFFSET_PC(%rdi), %rdx |
CONTEXT_RESTORE_ARCH_CORE %rdi %rdx |
movq %rdx,(%rsp) |
xorq %rax,%rax # context_restore returns 0 |
/branches/network/kernel/arch/amd64/src/mm/page.c |
---|
82,7 → 82,7 |
void page_arch_init(void) |
{ |
uintptr_t cur; |
int i; |
unsigned int i; |
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; |
if (config.cpu_active == 1) { |
/branches/network/kernel/arch/sparc64/include/atomic.h |
---|
37,6 → 37,7 |
#include <arch/barrier.h> |
#include <arch/types.h> |
#include <preemption.h> |
/** Atomic add operation. |
* |
56,7 → 57,8 |
a = *((uint64_t *) x); |
b = a + i; |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), |
"+r" (b) : "r" (a)); |
} while (a != b); |
return a; |
97,7 → 99,8 |
uint64_t v = 1; |
volatile uintptr_t x = (uint64_t) &val->count; |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); |
asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), |
"+r" (v) : "r" (0)); |
return v; |
} |
109,6 → 112,8 |
volatile uintptr_t x = (uint64_t) &val->count; |
preemption_disable(); |
asm volatile ( |
"0:\n" |
"casx %0, %3, %1\n" |
/branches/network/kernel/arch/sparc64/include/mm/page.h |
---|
53,11 → 53,6 |
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH)) |
/* |
* With 16K pages, there is only one page color. |
*/ |
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/sparc64/include/mm/tlb.h |
---|
160,7 → 160,7 |
static inline void mmu_primary_context_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v); |
flush(); |
flush_pipeline(); |
} |
/** Read MMU Secondary Context Register. |
179,7 → 179,7 |
static inline void mmu_secondary_context_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v); |
flush(); |
flush_pipeline(); |
} |
/** Read IMMU TLB Data Access Register. |
209,7 → 209,7 |
reg.value = 0; |
reg.tlb_entry = entry; |
asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value); |
flush(); |
flush_pipeline(); |
} |
/** Read DMMU TLB Data Access Register. |
279,7 → 279,7 |
static inline void itlb_tag_access_write(uint64_t v) |
{ |
asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v); |
flush(); |
flush_pipeline(); |
} |
/** Read IMMU TLB Tag Access Register. |
318,7 → 318,7 |
static inline void itlb_data_in_write(uint64_t v) |
{ |
asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v); |
flush(); |
flush_pipeline(); |
} |
/** Write DMMU TLB Data in Register. |
347,7 → 347,7 |
static inline void itlb_sfsr_write(uint64_t v) |
{ |
asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v); |
flush(); |
flush_pipeline(); |
} |
/** Read DTLB Synchronous Fault Status Register. |
400,7 → 400,7 |
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); /* da.value is the |
* address within the |
* ASI */ |
flush(); |
flush_pipeline(); |
} |
/** Perform DMMU TLB Demap Operation. |
/branches/network/kernel/arch/sparc64/include/mm/cache_spec.h |
---|
0,0 → 1,57 |
/* |
* Copyright (c) 2008 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64mm |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_sparc64_CACHE_SPEC_H_ |
#define KERN_sparc64_CACHE_SPEC_H_ |
/* |
* The following macros are valid for the following processors: |
* |
* UltraSPARC, UltraSPARC II, UltraSPARC IIi |
* |
* Should we support other UltraSPARC processors, we need to make sure that |
* the macros are defined correctly for them. |
*/ |
#define DCACHE_SIZE (16 * 1024) |
#define DCACHE_LINE_SIZE 32 |
#define ICACHE_SIZE (16 * 1024) |
#define ICACHE_WAYS 2 |
#define ICACHE_LINE_SIZE 32 |
#endif |
/** @} |
*/ |
/branches/network/kernel/arch/sparc64/include/barrier.h |
---|
57,8 → 57,11 |
#define write_barrier() \ |
asm volatile ("membar #StoreStore\n" ::: "memory") |
/** Flush Instruction Memory instruction. */ |
static inline void flush(void) |
#define flush(a) \ |
asm volatile ("flush %0\n" :: "r" ((a)) : "memory") |
/** Flush Instruction pipeline. */ |
static inline void flush_pipeline(void) |
{ |
/* |
* The FLUSH instruction takes address parameter. |
79,6 → 82,21 |
asm volatile ("membar #Sync\n"); |
} |
#define smc_coherence(a) \ |
{ \ |
write_barrier(); \ |
flush((a)); \ |
} |
#define FLUSH_INVAL_MIN 4 |
#define smc_coherence_block(a, l) \ |
{ \ |
unsigned long i; \ |
write_barrier(); \ |
for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \ |
flush((void *)(a) + i); \ |
} |
#endif |
/** @} |
/branches/network/kernel/arch/sparc64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/sparc64/include/asm.h |
---|
37,6 → 37,7 |
#include <arch/arch.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <align.h> |
#include <arch/register.h> |
#include <config.h> |
/branches/network/kernel/arch/sparc64/include/cpu.h |
---|
36,6 → 36,7 |
#define KERN_sparc64_CPU_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch/register.h> |
#include <arch/asm.h> |
/branches/network/kernel/arch/sparc64/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_sparc64_TYPES_H_ |
#define KERN_sparc64_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
61,13 → 57,31 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
/**< Formats for uintptr_t, size_t, count_t and index_t */ |
#define PRIp "llx" |
#define PRIs "llu" |
#define PRIc "llu" |
#define PRIi "llu" |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ |
#define PRId8 "d" |
#define PRId16 "d" |
#define PRId32 "d" |
#define PRId64 "lld" |
#define PRIdn "lld" |
#define PRIu8 "u" |
#define PRIu16 "u" |
#define PRIu32 "u" |
#define PRIu64 "llu" |
#define PRIun "llu" |
#define PRIx8 "x" |
#define PRIx16 "x" |
#define PRIx32 "x" |
#define PRIx64 "llx" |
#define PRIxn "llx" |
typedef uint8_t asi_t; |
#endif |
/branches/network/kernel/arch/sparc64/include/context_offset.h |
---|
48,4 → 48,60 |
#define OFFSET_L6 0x80 |
#define OFFSET_L7 0x88 |
#ifndef KERNEL |
# define OFFSET_TP 0x90 |
#endif |
#ifdef __ASM__ |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req |
stx %sp, [\ctx + OFFSET_SP] |
stx %o7, [\ctx + OFFSET_PC] |
stx %i0, [\ctx + OFFSET_I0] |
stx %i1, [\ctx + OFFSET_I1] |
stx %i2, [\ctx + OFFSET_I2] |
stx %i3, [\ctx + OFFSET_I3] |
stx %i4, [\ctx + OFFSET_I4] |
stx %i5, [\ctx + OFFSET_I5] |
stx %fp, [\ctx + OFFSET_FP] |
stx %i7, [\ctx + OFFSET_I7] |
stx %l0, [\ctx + OFFSET_L0] |
stx %l1, [\ctx + OFFSET_L1] |
stx %l2, [\ctx + OFFSET_L2] |
stx %l3, [\ctx + OFFSET_L3] |
stx %l4, [\ctx + OFFSET_L4] |
stx %l5, [\ctx + OFFSET_L5] |
stx %l6, [\ctx + OFFSET_L6] |
stx %l7, [\ctx + OFFSET_L7] |
#ifndef KERNEL |
stx %g7, [\ctx + OFFSET_TP] |
#endif |
.endm |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req |
ldx [\ctx + OFFSET_SP], %sp |
ldx [\ctx + OFFSET_PC], %o7 |
ldx [\ctx + OFFSET_I0], %i0 |
ldx [\ctx + OFFSET_I1], %i1 |
ldx [\ctx + OFFSET_I2], %i2 |
ldx [\ctx + OFFSET_I3], %i3 |
ldx [\ctx + OFFSET_I4], %i4 |
ldx [\ctx + OFFSET_I5], %i5 |
ldx [\ctx + OFFSET_FP], %fp |
ldx [\ctx + OFFSET_I7], %i7 |
ldx [\ctx + OFFSET_L0], %l0 |
ldx [\ctx + OFFSET_L1], %l1 |
ldx [\ctx + OFFSET_L2], %l2 |
ldx [\ctx + OFFSET_L3], %l3 |
ldx [\ctx + OFFSET_L4], %l4 |
ldx [\ctx + OFFSET_L5], %l5 |
ldx [\ctx + OFFSET_L6], %l6 |
ldx [\ctx + OFFSET_L7], %l7 |
#ifndef KERNEL |
ldx [\ctx + OFFSET_TP], %g7 |
#endif |
.endm |
#endif /* __ASM__ */ |
#endif |
/branches/network/kernel/arch/sparc64/include/byteorder.h |
---|
35,14 → 35,8 |
#ifndef KERN_sparc64_BYTEORDER_H_ |
#define KERN_sparc64_BYTEORDER_H_ |
#include <byteorder.h> |
#define ARCH_IS_BIG_ENDIAN |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#endif |
/** @} |
/branches/network/kernel/arch/sparc64/Makefile.inc |
---|
29,11 → 29,15 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf64-sparc |
BFD_ARCH = sparc |
BFD = binary |
TARGET = sparc64-linux-gnu |
TOOLCHAIN_DIR = /usr/local/sparc64 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/sparc64 |
GCC_CFLAGS += -m64 -mcpu=ultrasparc |
SUNCC_CFLAGS += -m64 -xarch=sparc -xregs=appl,no%float |
/branches/network/kernel/arch/sparc64/src/asm.S |
---|
41,6 → 41,7 |
*/ |
.global memcpy |
memcpy: |
mov %o0, %o3 ! save dst |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
59,7 → 60,7 |
mov %g2, %g3 |
2: |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
3: |
and %g1, -8, %g1 |
cmp %o0, %g1 |
93,7 → 94,7 |
mov %g2, %g3 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
/* |
* Almost the same as memcpy() except the loads are from userspace. |
100,6 → 101,7 |
*/ |
.global memcpy_from_uspace |
memcpy_from_uspace: |
mov %o0, %o3 ! save dst |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
118,7 → 120,7 |
mov %g2, %g3 |
2: |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
3: |
and %g1, -8, %g1 |
cmp %o0, %g1 |
152,7 → 154,7 |
mov %g2, %g3 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
/* |
* Almost the same as memcpy() except the stores are to userspace. |
159,6 → 161,7 |
*/ |
.global memcpy_to_uspace |
memcpy_to_uspace: |
mov %o0, %o3 ! save dst |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
177,7 → 180,7 |
mov %g2, %g3 |
2: |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
3: |
and %g1, -8, %g1 |
cmp %o0, %g1 |
211,7 → 214,7 |
mov %g2, %g3 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
mov %o3, %o0 |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
274,6 → 277,8 |
wrpr %g0, 0, %cleanwin ! avoid information leak |
mov %i2, %o0 ! uarg |
xor %o1, %o1, %o1 ! %o1 is defined to hold pcb_ptr |
! set it to 0 |
clr %i2 |
clr %i3 |
/branches/network/kernel/arch/sparc64/src/mm/cache.S |
---|
27,10 → 27,8 |
*/ |
#include <arch/arch.h> |
#include <arch/mm/cache_spec.h> |
#define DCACHE_SIZE (16 * 1024) |
#define DCACHE_LINE_SIZE 32 |
#define DCACHE_TAG_SHIFT 2 |
.register %g2, #scratch |
/branches/network/kernel/arch/sparc64/src/mm/as.c |
---|
76,7 → 76,7 |
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
sizeof(tsb_entry_t)); |
memsetb((uintptr_t) as->arch.itsb, |
memsetb(as->arch.itsb, |
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); |
#endif |
return 0; |
/branches/network/kernel/arch/sparc64/src/mm/tlb.c |
---|
490,7 → 490,7 |
*/ |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
unsigned int i; |
tlb_context_reg_t pc_save, ctx; |
/* switch to nucleus because we are mapped by the primary context */ |
/branches/network/kernel/arch/sparc64/src/mm/frame.c |
---|
48,7 → 48,7 |
*/ |
void frame_arch_init(void) |
{ |
int i; |
unsigned int i; |
pfn_t confdata; |
if (config.cpu_active == 1) { |
/branches/network/kernel/arch/sparc64/src/mm/page.c |
---|
66,7 → 66,7 |
} else { |
#ifdef CONFIG_SMP |
int i; |
unsigned int i; |
/* |
* Copy locked DTLB entries from the BSP. |
98,7 → 98,7 |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
unsigned int order; |
int i; |
unsigned int i; |
ASSERT(config.cpu_active == 1); |
/branches/network/kernel/arch/sparc64/src/smp/smp.c |
---|
99,7 → 99,7 |
waking_up_mid = mid; |
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) |
printf("%s: waiting for processor (mid = %d) timed out\n", |
printf("%s: waiting for processor (mid = %" PRIu32 ") timed out\n", |
__func__, mid); |
} |
} |
/branches/network/kernel/arch/sparc64/src/smp/ipi.c |
---|
116,7 → 116,7 |
*/ |
void ipi_broadcast_arch(int ipi) |
{ |
int i; |
unsigned int i; |
void (* func)(void); |
/branches/network/kernel/arch/sparc64/src/trap/exception.c |
---|
45,9 → 45,9 |
void dump_istate(istate_t *istate) |
{ |
printf("TSTATE=%#llx\n", istate->tstate); |
printf("TPC=%#llx (%s)\n", istate->tpc, get_symtab_entry(istate->tpc)); |
printf("TNPC=%#llx (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc)); |
printf("TSTATE=%#" PRIx64 "\n", istate->tstate); |
printf("TPC=%#" PRIx64 " (%s)\n", istate->tpc, get_symtab_entry(istate->tpc)); |
printf("TNPC=%#" PRIx64 " (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc)); |
} |
/** Handle instruction_access_exception. (0x8) */ |
/branches/network/kernel/arch/sparc64/src/trap/interrupt.c |
---|
97,8 → 97,8 |
* Spurious interrupt. |
*/ |
#ifdef CONFIG_DEBUG |
printf("cpu%d: spurious interrupt (intrcv=%#llx, " |
"data0=%#llx)\n", CPU->id, intrcv, data0); |
printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64 |
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0); |
#endif |
} |
/branches/network/kernel/arch/sparc64/src/cpu/cpu.c |
---|
135,7 → 135,7 |
break; |
} |
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf, |
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%d MHz)\n", m->id, manuf, |
impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000); |
} |
/branches/network/kernel/arch/sparc64/src/context.S |
---|
36,55 → 36,15 |
* functions. |
*/ |
#include <arch/context_offset.h> |
.text |
.global context_save_arch |
.global context_restore_arch |
.macro CONTEXT_STORE r |
stx %sp, [\r + OFFSET_SP] |
stx %o7, [\r + OFFSET_PC] |
stx %i0, [\r + OFFSET_I0] |
stx %i1, [\r + OFFSET_I1] |
stx %i2, [\r + OFFSET_I2] |
stx %i3, [\r + OFFSET_I3] |
stx %i4, [\r + OFFSET_I4] |
stx %i5, [\r + OFFSET_I5] |
stx %fp, [\r + OFFSET_FP] |
stx %i7, [\r + OFFSET_I7] |
stx %l0, [\r + OFFSET_L0] |
stx %l1, [\r + OFFSET_L1] |
stx %l2, [\r + OFFSET_L2] |
stx %l3, [\r + OFFSET_L3] |
stx %l4, [\r + OFFSET_L4] |
stx %l5, [\r + OFFSET_L5] |
stx %l6, [\r + OFFSET_L6] |
stx %l7, [\r + OFFSET_L7] |
.endm |
.macro CONTEXT_LOAD r |
ldx [\r + OFFSET_SP], %sp |
ldx [\r + OFFSET_PC], %o7 |
ldx [\r + OFFSET_I0], %i0 |
ldx [\r + OFFSET_I1], %i1 |
ldx [\r + OFFSET_I2], %i2 |
ldx [\r + OFFSET_I3], %i3 |
ldx [\r + OFFSET_I4], %i4 |
ldx [\r + OFFSET_I5], %i5 |
ldx [\r + OFFSET_FP], %fp |
ldx [\r + OFFSET_I7], %i7 |
ldx [\r + OFFSET_L0], %l0 |
ldx [\r + OFFSET_L1], %l1 |
ldx [\r + OFFSET_L2], %l2 |
ldx [\r + OFFSET_L3], %l3 |
ldx [\r + OFFSET_L4], %l4 |
ldx [\r + OFFSET_L5], %l5 |
ldx [\r + OFFSET_L6], %l6 |
ldx [\r + OFFSET_L7], %l7 |
.endm |
context_save_arch: |
CONTEXT_STORE %o0 |
CONTEXT_SAVE_ARCH_CORE %o0 |
retl |
mov 1, %o0 ! context_save_arch returns 1 |
98,6 → 58,6 |
# |
flushw |
CONTEXT_LOAD %o0 |
CONTEXT_RESTORE_ARCH_CORE %o0 |
retl |
xor %o0, %o0, %o0 ! context_restore_arch returns 0 |
/branches/network/kernel/arch/ia64/Makefile.inc |
---|
29,10 → 29,14 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf64-little |
BFD_ARCH = ia64-elf64 |
TARGET = ia64-pc-linux-gnu |
TOOLCHAIN_DIR = /usr/local/ia64 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ia64 |
INIT0_ADDRESS = 0xe000000004404000 |
INIT0_SIZE = 0x100000 |
/branches/network/kernel/arch/ia64/src/asm.S |
---|
51,7 → 51,7 |
adds r14 = 7, in1 |
mov r2 = ar.lc |
mov r8 = in1 ;; |
mov r8 = in0 |
and r14 = -8, r14 ;; |
cmp.ne p6, p7 = r14, in1 |
(p7) br.cond.dpnt 3f ;; |
63,7 → 63,7 |
(p6) mov r17 = r0 ;; |
(p6) mov ar.lc = r14 |
1: |
add r14 = r16, r8 |
add r14 = r16, in1 |
add r15 = r16, in0 |
adds r17 = 1, r17 ;; |
ld1 r14 = [r14] |
72,7 → 72,6 |
br.cloop.sptk.few 1b ;; |
2: |
mov ar.lc = r2 |
mov ar.pfs = loc0 |
br.ret.sptk.many rp |
3: |
90,7 → 89,7 |
4: |
shladd r14 = r16, 3, r0 |
adds r16 = 1, r17 ;; |
add r15 = r8, r14 |
add r15 = in1, r14 |
add r14 = in0, r14 |
mov r17 = r16 ;; |
ld8 r15 = [r15] ;; |
104,7 → 103,7 |
cmp.eq p6, p7 = 0, r15 |
add in0 = r14, in0 |
adds r15 = -1, r15 |
add r17 = r14, r8 |
add r17 = r14, in1 |
(p6) br.cond.dpnt 2b ;; |
mov ar.lc = r15 |
6: |
116,7 → 115,6 |
st1 [r15] = r14 |
br.cloop.sptk.few 6b ;; |
mov ar.lc = r2 |
mov ar.pfs = loc0 |
br.ret.sptk.many rp |
163,6 → 161,9 |
xor r1 = r1, r1 |
/* r2 is defined to hold pcb_ptr - set it to 0 */ |
xor r2 = r2, r2 |
mov loc1 = cr.ifs |
movl loc2 = PFM_MASK ;; |
and loc1 = loc2, loc1 ;; |
/branches/network/kernel/arch/ia64/src/mm/vhpt.c |
---|
81,7 → 81,7 |
void vhpt_invalidate_all() |
{ |
memsetb((uintptr_t) vhpt_base, 1 << VHPT_WIDTH, 0); |
memsetb(vhpt_base, 1 << VHPT_WIDTH, 0); |
} |
void vhpt_invalidate_asid(asid_t asid) |
/branches/network/kernel/arch/ia64/src/mm/tlb.c |
---|
59,7 → 59,7 |
uintptr_t adr; |
uint32_t count1, count2, stride1, stride2; |
int i, j; |
unsigned int i, j; |
adr = PAL_PTCE_INFO_BASE(); |
count1 = PAL_PTCE_INFO_COUNT1(); |
69,8 → 69,8 |
ipl = interrupts_disable(); |
for(i = 0; i < count1; i++) { |
for(j = 0; j < count2; j++) { |
for (i = 0; i < count1; i++) { |
for (j = 0; j < count2; j++) { |
asm volatile ( |
"ptc.e %0 ;;" |
: |
/branches/network/kernel/arch/ia64/src/drivers/ega.c |
---|
71,7 → 71,7 |
/* |
* Clear the screen. |
*/ |
_memsetw((uintptr_t) videoram, SCREEN, 0x0720); |
_memsetw(videoram, SCREEN, 0x0720); |
chardev_initialize("ega_out", &ega_console, &ega_ops); |
stdout = &ega_console; |
102,7 → 102,7 |
return; |
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2); |
_memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720); |
_memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720); |
ega_cursor = ega_cursor - ROW; |
} |
/branches/network/kernel/arch/ia64/src/ia64.c |
---|
62,12 → 62,13 |
/* Setup usermode init tasks. */ |
//#ifdef I460GX |
int i; |
unsigned int i; |
init.cnt = bootinfo->taskmap.count; |
for(i=0;i<init.cnt;i++) |
{ |
init.tasks[i].addr = ((unsigned long)bootinfo->taskmap.tasks[i].addr)|VRN_MASK; |
init.tasks[i].size = bootinfo->taskmap.tasks[i].size; |
for (i = 0; i < init.cnt; i++) { |
init.tasks[i].addr = ((unsigned long) bootinfo->taskmap.tasks[i].addr) | VRN_MASK; |
init.tasks[i].size = bootinfo->taskmap.tasks[i].size; |
} |
/* |
#else |
/branches/network/kernel/arch/ia64/include/mm/page.h |
---|
41,8 → 41,6 |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
/** Bit width of the TLB-locked portion of kernel address space. */ |
/branches/network/kernel/arch/ia64/include/barrier.h |
---|
45,9 → 45,33 |
#define read_barrier() memory_barrier() |
#define write_barrier() memory_barrier() |
#define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory") |
#define srlz_d() asm volatile (";; srlz.d\n" ::: "memory") |
#define srlz_i() \ |
asm volatile (";; srlz.i ;;\n" ::: "memory") |
#define srlz_d() \ |
asm volatile (";; srlz.d\n" ::: "memory") |
#define fc_i(a) \ |
asm volatile ("fc.i %0\n" :: "r" ((a)) : "memory") |
#define sync_i() \ |
asm volatile (";; sync.i\n" ::: "memory") |
#define smc_coherence(a) \ |
{ \ |
fc_i((a)); \ |
sync_i(); \ |
srlz_i(); \ |
} |
#define FC_INVAL_MIN 32 |
#define smc_coherence_block(a, l) \ |
{ \ |
unsigned long i; \ |
for (i = 0; i < (l); i += FC_INVAL_MIN) \ |
fc_i((void *)(a) + i); \ |
sync_i(); \ |
srlz_i(); \ |
} |
#endif |
/** @} |
/branches/network/kernel/arch/ia64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/ia64/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_ia64_TYPES_H_ |
#define KERN_ia64_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
69,14 → 65,29 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
#define PRIp "lx" /**< Format for uintptr_t. */ |
#define PRIs "lu" /**< Format for size_t. */ |
#define PRIc "lu" /**< Format for count_t. */ |
#define PRIi "lu" /**< Format for index_t. */ |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
#define PRId8 "d" /**< Format for int8_t. */ |
#define PRId16 "d" /**< Format for int16_t. */ |
#define PRId32 "d" /**< Format for int32_t. */ |
#define PRId64 "ld" /**< Format for int64_t. */ |
#define PRIdn "d" /**< Format for native_t. */ |
#define PRIu8 "u" /**< Format for uint8_t. */ |
#define PRIu16 "u" /**< Format for uint16_t. */ |
#define PRIu32 "u" /**< Format for uint32_t. */ |
#define PRIu64 "lu" /**< Format for uint64_t. */ |
#define PRIun "u" /**< Format for unative_t. */ |
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */ |
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */ |
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */ |
#define PRIx64 "lx" /**< Format for hexadecimal (u)int64_t. */ |
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */ |
#endif |
/** @} |
/branches/network/kernel/arch/ia64/include/byteorder.h |
---|
35,15 → 35,9 |
#ifndef KERN_ia64_BYTEORDER_H_ |
#define KERN_ia64_BYTEORDER_H_ |
#include <byteorder.h> |
/* IA-64 is little-endian */ |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define ARCH_IS_LITTLE_ENDIAN |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#endif |
/** @} |
/branches/network/kernel/arch/arm32/Makefile.inc |
---|
29,17 → 29,21 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf32-littlearm |
BFD_ARCH = arm |
BFD = binary |
TARGET = arm-linux-gnu |
TOOLCHAIN_DIR = /usr/local/arm |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/arm |
KERNEL_LOAD_ADDRESS = 0x80200000 |
ifeq ($(MACHINE), gxemul_testarm) |
# ifeq ($(MACHINE), gxemul_testarm) |
DMACHINE = MACHINE_GXEMUL_TESTARM |
endif |
# endif |
ATSIGN = % |
90,7 → 94,7 |
arch/$(ARCH)/src/mm/tlb.c \ |
arch/$(ARCH)/src/mm/page_fault.c |
ifeq ($(MACHINE), gxemul_testarm) |
# ifeq ($(MACHINE), gxemul_testarm) |
ARCH_SOURCES += arch/$(ARCH)/src/drivers/gxemul.c |
endif |
# endif |
/branches/network/kernel/arch/arm32/src/asm.S |
---|
45,7 → 45,8 |
add r3, r1, #3 |
bic r3, r3, #3 |
cmp r1, r3 |
stmdb sp!, {r4, lr} |
stmdb sp!, {r4, r5, lr} |
mov r5, r0 /* save dst */ |
beq 4f |
1: |
cmp r2, #0 |
58,8 → 59,8 |
cmp ip, r2 |
bne 2b |
3: |
mov r0, r1 |
ldmia sp!, {r4, pc} |
mov r0, r5 |
ldmia sp!, {r4, r5, pc} |
4: |
add r3, r0, #3 |
bic r3, r3, #3 |
94,5 → 95,5 |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
mov r0, #0 |
ldmia sp!, {r4, pc} |
mov r0, #0 |
ldmia sp!, {r4, r5, pc} |
/branches/network/kernel/arch/arm32/src/userspace.c |
---|
70,8 → 70,11 |
/* set first parameter */ |
ustate.r0 = (uintptr_t) kernel_uarg->uspace_uarg; |
/* %r1 is defined to hold pcb_ptr - set it to 0 */ |
ustate.r1 = 0; |
/* clear other registers */ |
ustate.r1 = ustate.r2 = ustate.r3 = ustate.r4 = ustate.r5 = |
ustate.r2 = ustate.r3 = ustate.r4 = ustate.r5 = |
ustate.r6 = ustate.r7 = ustate.r8 = ustate.r9 = ustate.r10 = |
ustate.r11 = ustate.r12 = ustate.lr = 0; |
/branches/network/kernel/arch/arm32/src/exception.c |
---|
40,6 → 40,7 |
#include <interrupt.h> |
#include <arch/machine.h> |
#include <arch/mm/page_fault.h> |
#include <arch/barrier.h> |
#include <print.h> |
#include <syscall/syscall.h> |
209,7 → 210,7 |
* |
* Addresses of handlers are stored in memory following exception vectors. |
*/ |
static void install_handler (unsigned handler_addr, unsigned* vector) |
static void install_handler(unsigned handler_addr, unsigned *vector) |
{ |
/* relative address (related to exc. vector) of the word |
* where handler's address is stored |
219,6 → 220,7 |
/* make it LDR instruction and store at exception vector */ |
*vector = handler_address_ptr | LDR_OPCODE; |
smc_coherence(*vector); |
/* store handler's address */ |
*(vector + EXC_VECTORS) = handler_addr; |
226,31 → 228,31 |
} |
/** Low-level Reset Exception handler. */ |
static void reset_exception_entry() |
static void reset_exception_entry(void) |
{ |
PROCESS_EXCEPTION(EXC_RESET); |
} |
/** Low-level Software Interrupt Exception handler. */ |
static void swi_exception_entry() |
static void swi_exception_entry(void) |
{ |
PROCESS_EXCEPTION(EXC_SWI); |
} |
/** Low-level Undefined Instruction Exception handler. */ |
static void undef_instr_exception_entry() |
static void undef_instr_exception_entry(void) |
{ |
PROCESS_EXCEPTION(EXC_UNDEF_INSTR); |
} |
/** Low-level Fast Interrupt Exception handler. */ |
static void fiq_exception_entry() |
static void fiq_exception_entry(void) |
{ |
PROCESS_EXCEPTION(EXC_FIQ); |
} |
/** Low-level Prefetch Abort Exception handler. */ |
static void prefetch_abort_exception_entry() |
static void prefetch_abort_exception_entry(void) |
{ |
asm("sub lr, lr, #4"); |
PROCESS_EXCEPTION(EXC_PREFETCH_ABORT); |
257,7 → 259,7 |
} |
/** Low-level Data Abort Exception handler. */ |
static void data_abort_exception_entry() |
static void data_abort_exception_entry(void) |
{ |
asm("sub lr, lr, #8"); |
PROCESS_EXCEPTION(EXC_DATA_ABORT); |
269,7 → 271,7 |
* because of possible occurence of nested interrupt exception, which |
* would overwrite (and thus spoil) stack pointer. |
*/ |
static void irq_exception_entry() |
static void irq_exception_entry(void) |
{ |
asm("sub lr, lr, #4"); |
setup_stack_and_save_regs(); |
/branches/network/kernel/arch/arm32/src/mm/page_fault.c |
---|
40,6 → 40,7 |
#include <genarch/mm/page_pt.h> |
#include <arch.h> |
#include <interrupt.h> |
#include <print.h> |
/** Returns value stored in fault status register. |
* |
173,7 → 174,8 |
*/ |
void data_abort(int exc_no, istate_t *istate) |
{ |
fault_status_t fsr = read_fault_status_register(); |
fault_status_t fsr __attribute__ ((unused)) = |
read_fault_status_register(); |
uintptr_t badvaddr = read_fault_address_register(); |
pf_access_t access = get_memory_access_type(istate->pc, badvaddr); |
/branches/network/kernel/arch/arm32/src/arm32.c |
---|
55,7 → 55,7 |
/** Performs arm32 specific initialization before main_bsp() is called. */ |
void arch_pre_main(void) |
{ |
int i; |
unsigned int i; |
init.cnt = bootinfo.cnt; |
/branches/network/kernel/arch/arm32/src/debug/print.c |
---|
56,10 → 56,10 |
*/ |
static int debug_write(const char *str, size_t count, void *unused) |
{ |
int i; |
for (i = 0; i < count; ++i) { |
unsigned int i; |
for (i = 0; i < count; ++i) |
putc(str[i]); |
} |
return i; |
} |
/branches/network/kernel/arch/arm32/src/cpu/cpu.c |
---|
56,7 → 56,7 |
}; |
/** Length of the #imp_data array */ |
static int imp_data_length = sizeof(imp_data) / sizeof(char *); |
static unsigned int imp_data_length = sizeof(imp_data) / sizeof(char *); |
/** Architecture names */ |
static char *arch_data[] = { |
71,7 → 71,7 |
}; |
/** Length of the #arch_data array */ |
static int arch_data_length = sizeof(arch_data) / sizeof(char *); |
static unsigned int arch_data_length = sizeof(arch_data) / sizeof(char *); |
/** Retrieves processor identification from CP15 register 0. |
/branches/network/kernel/arch/arm32/include/mm/page.h |
---|
43,8 → 43,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifndef __ASM__ |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
/branches/network/kernel/arch/arm32/include/barrier.h |
---|
46,6 → 46,9 |
#define read_barrier() asm volatile ("" ::: "memory") |
#define write_barrier() asm volatile ("" ::: "memory") |
#define smc_coherence(a) |
#define smc_coherence_block(a, l) |
#endif |
/** @} |
/branches/network/kernel/arch/arm32/include/memstr.h |
---|
38,10 → 38,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/arm32/include/types.h |
---|
42,10 → 42,6 |
# define ATTRIBUTE_PACKED |
#endif |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed long int32_t; |
68,15 → 64,29 |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
#define PRIp "x" /**< Format for uintptr_t. */ |
#define PRIs "u" /**< Format for size_t. */ |
#define PRIc "u" /**< Format for count_t. */ |
#define PRIi "u" /**< Format for index_t. */ |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
#define PRId8 "d" /**< Format for int8_t. */ |
#define PRId16 "d" /**< Format for int16_t. */ |
#define PRId32 "d" /**< Format for int32_t. */ |
#define PRId64 "lld" /**< Format for int64_t. */ |
#define PRIdn "d" /**< Format for native_t. */ |
#define PRIu8 "u" /**< Format for uint8_t. */ |
#define PRIu16 "u" /**< Format for uint16_t. */ |
#define PRIu32 "u" /**< Format for uint32_t. */ |
#define PRIu64 "llu" /**< Format for uint64_t. */ |
#define PRIun "u" /**< Format for unative_t. */ |
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */ |
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */ |
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */ |
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */ |
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */ |
/** Page table entry. |
* |
* We have different structs for level 0 and level 1 page table entries. |
/branches/network/kernel/arch/arm32/include/byteorder.h |
---|
36,24 → 36,10 |
#ifndef KERN_arm32_BYTEORDER_H_ |
#define KERN_arm32_BYTEORDER_H_ |
#include <byteorder.h> |
#ifdef BIG_ENDIAN |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#define ARCH_IS_BIG_ENDIAN |
#else |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#define ARCH_IS_LITTLE_ENDIAN |
#endif |
#endif |
/branches/network/kernel/arch/ppc32/Makefile.inc |
---|
29,11 → 29,15 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf32-powerpc |
BFD_ARCH = powerpc:common |
BFD = binary |
TARGET = ppc-linux-gnu |
TOOLCHAIN_DIR = /usr/local/ppc |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ppc |
GCC_CFLAGS += -mcpu=powerpc -msoft-float -m32 |
AFLAGS += -a32 |
/branches/network/kernel/arch/ppc32/include/mm/page.h |
---|
40,8 → 40,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/ppc32/include/mm/tlb.h |
---|
36,6 → 36,8 |
#define KERN_ppc32_TLB_H_ |
#include <arch/interrupt.h> |
#include <arch/types.h> |
#include <typedefs.h> |
typedef struct { |
unsigned v : 1; /**< Valid */ |
/branches/network/kernel/arch/ppc32/include/barrier.h |
---|
42,6 → 42,43 |
#define read_barrier() asm volatile ("sync" ::: "memory") |
#define write_barrier() asm volatile ("eieio" ::: "memory") |
/* |
* The IMB sequence used here is valid for all possible cache models |
* on uniprocessor. SMP might require a different sequence. |
* See PowerPC Programming Environment for 32-Bit Microprocessors, |
* chapter 5.1.5.2 |
*/ |
static inline void smc_coherence(void *addr) |
{ |
asm volatile ( |
"dcbst 0, %0\n" |
"sync\n" |
"icbi 0, %0\n" |
"isync\n" |
:: "r" (addr) |
); |
} |
#define COHERENCE_INVAL_MIN 4 |
static inline void smc_coherence_block(void *addr, unsigned long len) |
{ |
unsigned long i; |
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) { |
asm volatile ("dcbst 0, %0\n" :: "r" (addr + i)); |
} |
asm volatile ("sync"); |
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) { |
asm volatile ("icbi 0, %0\n" :: "r" (addr + i)); |
} |
asm volatile ("isync"); |
} |
#endif |
/** @} |
/branches/network/kernel/arch/ppc32/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/ppc32/include/exception.h |
---|
82,6 → 82,7 |
{ |
istate->pc = retaddr; |
} |
/** Return true if exception happened while in userspace */ |
#include <panic.h> |
static inline int istate_from_uspace(istate_t *istate) |
89,6 → 90,7 |
panic("istate_from_uspace not yet implemented"); |
return 0; |
} |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->pc; |
/branches/network/kernel/arch/ppc32/include/boot/boot.h |
---|
38,7 → 38,7 |
#define BOOT_OFFSET 0x8000 |
/* Temporary stack size for boot process */ |
#define TEMP_STACK_SIZE 0x100 |
#define TEMP_STACK_SIZE 0x1000 |
#define TASKMAP_MAX_RECORDS 32 |
#define MEMMAP_MAX_RECORDS 32 |
/branches/network/kernel/arch/ppc32/include/drivers/cuda.h |
---|
36,6 → 36,7 |
#define KERN_ppc32_CUDA_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
extern void cuda_init(devno_t devno, uintptr_t base, size_t size); |
extern int cuda_get_scancode(void); |
/branches/network/kernel/arch/ppc32/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_ppc32_TYPES_H_ |
#define KERN_ppc32_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
61,14 → 57,31 |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
/**< Formats for uintptr_t, size_t, count_t and index_t */ |
#define PRIp "x" |
#define PRIs "u" |
#define PRIc "u" |
#define PRIi "u" |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ |
#define PRId8 "d" |
#define PRId16 "d" |
#define PRId32 "d" |
#define PRId64 "lld" |
#define PRIdn "d" |
#define PRIu8 "u" |
#define PRIu16 "u" |
#define PRIu32 "u" |
#define PRIu64 "llu" |
#define PRIun "u" |
#define PRIx8 "x" |
#define PRIx16 "x" |
#define PRIx32 "x" |
#define PRIx64 "llx" |
#define PRIxn "x" |
/** Page Table Entry. */ |
typedef struct { |
unsigned p : 1; /**< Present bit. */ |
/branches/network/kernel/arch/ppc32/include/context_offset.h |
---|
73,4 → 73,59 |
#define OFFSET_FR31 0x88 |
#define OFFSET_FPSCR 0x90 |
#ifdef __ASM__ |
# include <arch/asm/regname.h> |
# ctx: address of the structure with saved context |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req |
stw sp, OFFSET_SP(\ctx) |
stw r2, OFFSET_R2(\ctx) |
stw r13, OFFSET_R13(\ctx) |
stw r14, OFFSET_R14(\ctx) |
stw r15, OFFSET_R15(\ctx) |
stw r16, OFFSET_R16(\ctx) |
stw r17, OFFSET_R17(\ctx) |
stw r18, OFFSET_R18(\ctx) |
stw r19, OFFSET_R19(\ctx) |
stw r20, OFFSET_R20(\ctx) |
stw r21, OFFSET_R21(\ctx) |
stw r22, OFFSET_R22(\ctx) |
stw r23, OFFSET_R23(\ctx) |
stw r24, OFFSET_R24(\ctx) |
stw r25, OFFSET_R25(\ctx) |
stw r26, OFFSET_R26(\ctx) |
stw r27, OFFSET_R27(\ctx) |
stw r28, OFFSET_R28(\ctx) |
stw r29, OFFSET_R29(\ctx) |
stw r30, OFFSET_R30(\ctx) |
stw r31, OFFSET_R31(\ctx) |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req |
lwz sp, OFFSET_SP(\ctx) |
lwz r2, OFFSET_R2(\ctx) |
lwz r13, OFFSET_R13(\ctx) |
lwz r14, OFFSET_R14(\ctx) |
lwz r15, OFFSET_R15(\ctx) |
lwz r16, OFFSET_R16(\ctx) |
lwz r17, OFFSET_R17(\ctx) |
lwz r18, OFFSET_R18(\ctx) |
lwz r19, OFFSET_R19(\ctx) |
lwz r20, OFFSET_R20(\ctx) |
lwz r21, OFFSET_R21(\ctx) |
lwz r22, OFFSET_R22(\ctx) |
lwz r23, OFFSET_R23(\ctx) |
lwz r24, OFFSET_R24(\ctx) |
lwz r25, OFFSET_R25(\ctx) |
lwz r26, OFFSET_R26(\ctx) |
lwz r27, OFFSET_R27(\ctx) |
lwz r28, OFFSET_R28(\ctx) |
lwz r29, OFFSET_R29(\ctx) |
lwz r30, OFFSET_R30(\ctx) |
lwz r31, OFFSET_R31(\ctx) |
.endm |
#endif /* __ASM__ */ |
#endif |
/branches/network/kernel/arch/ppc32/include/byteorder.h |
---|
35,14 → 35,8 |
#ifndef KERN_ppc32_BYTEORDER_H_ |
#define KERN_ppc32_BYTEORDER_H_ |
#include <byteorder.h> |
#define ARCH_IS_BIG_ENDIAN |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#endif |
/** @} |
/branches/network/kernel/arch/ppc32/src/asm.S |
---|
65,6 → 65,10 |
# set stack |
mr sp, r4 |
# %r3 is defined to hold pcb_ptr - set it to 0 |
xor r3, r3, r3 |
# jump to userspace |
199,47 → 203,7 |
rfi |
memsetb: |
rlwimi r5, r5, 8, 16, 23 |
rlwimi r5, r5, 16, 0, 15 |
addi r14, r3, -4 |
cmplwi 0, r4, 4 |
blt 7f |
stwu r5, 4(r14) |
beqlr |
andi. r15, r14, 3 |
add r4, r15, r4 |
subf r14, r15, r14 |
srwi r15, r4, 2 |
mtctr r15 |
bdz 6f |
1: |
stwu r5, 4(r14) |
bdnz 1b |
6: |
andi. r4, r4, 3 |
7: |
cmpwi 0, r4, 0 |
beqlr |
mtctr r4 |
addi r6, r6, 3 |
8: |
stbu r5, 1(r14) |
bdnz 8b |
blr |
b _memsetb |
memcpy: |
memcpy_from_uspace: |
308,4 → 272,6 |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
b memcpy_from_uspace_failover_address |
# return zero, failure |
xor r3, r3, r3 |
blr |
/branches/network/kernel/arch/ppc32/src/mm/tlb.c |
---|
47,16 → 47,19 |
* The as->lock must be held on entry to this function |
* if lock is true. |
* |
* @param as Address space. |
* @param lock Lock/unlock the address space. |
* @param badvaddr Faulting virtual address. |
* @param access Access mode that caused the fault. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code will be stored. |
* @return PTE on success, NULL otherwise. |
* @param as Address space. |
* @param lock Lock/unlock the address space. |
* @param badvaddr Faulting virtual address. |
* @param access Access mode that caused the fault. |
* @param istate Pointer to interrupted state. |
* @param pfrc Pointer to variable where as_page_fault() return code |
* will be stored. |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access, istate_t *istate, int *pfrc) |
static pte_t * |
find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access, |
istate_t *istate, int *pfrc) |
{ |
/* |
* Check if the mapping exists in page tables. |
77,27 → 80,27 |
*/ |
page_table_unlock(as, lock); |
switch (rc = as_page_fault(badvaddr, access, istate)) { |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
*/ |
page_table_lock(as, lock); |
pte = page_mapping_find(as, badvaddr); |
ASSERT((pte) && (pte->p)); |
*pfrc = 0; |
return pte; |
case AS_PF_DEFER: |
page_table_lock(as, lock); |
*pfrc = rc; |
return NULL; |
case AS_PF_FAULT: |
page_table_lock(as, lock); |
printf("Page fault.\n"); |
*pfrc = rc; |
return NULL; |
default: |
panic("unexpected rc (%d)\n", rc); |
case AS_PF_OK: |
/* |
* The higher-level page fault handler succeeded, |
* The mapping ought to be in place. |
*/ |
page_table_lock(as, lock); |
pte = page_mapping_find(as, badvaddr); |
ASSERT((pte) && (pte->p)); |
*pfrc = 0; |
return pte; |
case AS_PF_DEFER: |
page_table_lock(as, lock); |
*pfrc = rc; |
return NULL; |
case AS_PF_FAULT: |
page_table_lock(as, lock); |
printf("Page fault.\n"); |
*pfrc = rc; |
return NULL; |
default: |
panic("unexpected rc (%d)\n", rc); |
} |
} |
} |
114,7 → 117,8 |
s = get_symtab_entry(istate->lr); |
if (s) |
sym2 = s; |
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2); |
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, |
istate->pc, symbol, sym2); |
} |
147,7 → 151,8 |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) { |
if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && |
(phte[base + i].api == api))) { |
found = true; |
break; |
} |
160,7 → 165,9 |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) { |
if ((!phte[base2 + i].v) || |
((phte[base2 + i].vsid == vsid) && |
(phte[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
214,7 → 221,9 |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base + i].v) || ((phte_physical[base + i].vsid == vsid) && (phte_physical[base + i].api == api))) { |
if ((!phte_physical[base + i].v) || |
((phte_physical[base + i].vsid == vsid) && |
(phte_physical[base + i].api == api))) { |
found = true; |
break; |
} |
227,7 → 236,9 |
/* Find unused or colliding |
PTE in PTEG */ |
for (i = 0; i < 8; i++) { |
if ((!phte_physical[base2 + i].v) || ((phte_physical[base2 + i].vsid == vsid) && (phte_physical[base2 + i].api == api))) { |
if ((!phte_physical[base2 + i].v) || |
((phte_physical[base2 + i].vsid == vsid) && |
(phte_physical[base2 + i].api == api))) { |
found = true; |
base = base2; |
h = 1; |
254,8 → 265,8 |
/** Process Instruction/Data Storage Interrupt |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
void pht_refill(int n, istate_t *istate) |
284,21 → 295,22 |
page_table_lock(as, lock); |
pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc); |
pte = find_mapping_and_check(as, lock, badvaddr, |
PF_ACCESS_READ /* FIXME */, istate, &pfrc); |
if (!pte) { |
switch (pfrc) { |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(as, lock); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfrc); |
case AS_PF_FAULT: |
goto fail; |
break; |
case AS_PF_DEFER: |
/* |
* The page fault came during copy_from_uspace() |
* or copy_to_uspace(). |
*/ |
page_table_unlock(as, lock); |
return; |
default: |
panic("Unexpected pfrc (%d)\n", pfrc); |
} |
} |
316,8 → 328,8 |
/** Process Instruction/Data Storage Interrupt in Real Mode |
* |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* @param n Interrupt vector number. |
* @param istate Interrupted register context. |
* |
*/ |
bool pht_real_refill(int n, istate_t *istate) |
373,7 → 385,8 |
uint32_t i; |
for (i = 0; i < 8192; i++) { |
if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) && (phte[i].vsid < ((asid << 4) + 16))) |
if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) && |
(phte[i].vsid < ((asid << 4) + 16))) |
phte[i].v = 0; |
} |
tlb_invalidate_all(); |
407,7 → 420,11 |
} \ |
} else \ |
length = 0; \ |
printf(name ": page=%.*p frame=%.*p length=%d KB (mask=%#x)%s%s\n", sizeof(upper) * 2, upper & 0xffff0000, sizeof(lower) * 2, lower & 0xffff0000, length, mask, ((upper >> 1) & 1) ? " supervisor" : "", (upper & 1) ? " user" : ""); |
printf(name ": page=%.*p frame=%.*p length=%d KB (mask=%#x)%s%s\n", \ |
sizeof(upper) * 2, upper & 0xffff0000, sizeof(lower) * 2, \ |
lower & 0xffff0000, length, mask, \ |
((upper >> 1) & 1) ? " supervisor" : "", \ |
(upper & 1) ? " user" : ""); |
void tlb_print(void) |
421,7 → 438,10 |
: "=r" (vsid) |
: "r" (sr << 28) |
); |
printf("vsid[%d]: VSID=%.*p (ASID=%d)%s%s\n", sr, sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4, ((vsid >> 30) & 1) ? " supervisor" : "", ((vsid >> 29) & 1) ? " user" : ""); |
printf("vsid[%d]: VSID=%.*p (ASID=%d)%s%s\n", sr, |
sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4, |
((vsid >> 30) & 1) ? " supervisor" : "", |
((vsid >> 29) & 1) ? " user" : ""); |
} |
uint32_t upper; |
/branches/network/kernel/arch/ppc32/src/mm/page.c |
---|
47,13 → 47,16 |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > |
KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%" PRIs " bytes)", |
physaddr, size) |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), |
physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); |
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); |
/branches/network/kernel/arch/ppc32/src/interrupt.c |
---|
80,7 → 80,7 |
* Spurious interrupt. |
*/ |
#ifdef CONFIG_DEBUG |
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum); |
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum); |
#endif |
} |
/branches/network/kernel/arch/ppc32/src/context.S |
---|
26,7 → 26,6 |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
#include <arch/asm/regname.h> |
#include <arch/context_offset.h> |
.text |
34,56 → 33,8 |
.global context_save_arch |
.global context_restore_arch |
.macro CONTEXT_STORE r |
stw sp, OFFSET_SP(\r) |
stw r2, OFFSET_R2(\r) |
stw r13, OFFSET_R13(\r) |
stw r14, OFFSET_R14(\r) |
stw r15, OFFSET_R15(\r) |
stw r16, OFFSET_R16(\r) |
stw r17, OFFSET_R17(\r) |
stw r18, OFFSET_R18(\r) |
stw r19, OFFSET_R19(\r) |
stw r20, OFFSET_R20(\r) |
stw r21, OFFSET_R21(\r) |
stw r22, OFFSET_R22(\r) |
stw r23, OFFSET_R23(\r) |
stw r24, OFFSET_R24(\r) |
stw r25, OFFSET_R25(\r) |
stw r26, OFFSET_R26(\r) |
stw r27, OFFSET_R27(\r) |
stw r28, OFFSET_R28(\r) |
stw r29, OFFSET_R29(\r) |
stw r30, OFFSET_R30(\r) |
stw r31, OFFSET_R31(\r) |
.endm |
.macro CONTEXT_LOAD r |
lwz sp, OFFSET_SP(\r) |
lwz r2, OFFSET_R2(\r) |
lwz r13, OFFSET_R13(\r) |
lwz r14, OFFSET_R14(\r) |
lwz r15, OFFSET_R15(\r) |
lwz r16, OFFSET_R16(\r) |
lwz r17, OFFSET_R17(\r) |
lwz r18, OFFSET_R18(\r) |
lwz r19, OFFSET_R19(\r) |
lwz r20, OFFSET_R20(\r) |
lwz r21, OFFSET_R21(\r) |
lwz r22, OFFSET_R22(\r) |
lwz r23, OFFSET_R23(\r) |
lwz r24, OFFSET_R24(\r) |
lwz r25, OFFSET_R25(\r) |
lwz r26, OFFSET_R26(\r) |
lwz r27, OFFSET_R27(\r) |
lwz r28, OFFSET_R28(\r) |
lwz r29, OFFSET_R29(\r) |
lwz r30, OFFSET_R30(\r) |
lwz r31, OFFSET_R31(\r) |
.endm |
context_save_arch: |
CONTEXT_STORE r3 |
CONTEXT_SAVE_ARCH_CORE r3 |
mflr r4 |
stw r4, OFFSET_PC(r3) |
96,7 → 47,7 |
blr |
context_restore_arch: |
CONTEXT_LOAD r3 |
CONTEXT_RESTORE_ARCH_CORE r3 |
lwz r4, OFFSET_CR(r3) |
mtcr r4 |
/branches/network/kernel/arch/ia32xen/Makefile.inc |
---|
29,11 → 29,15 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf32-i386 |
BFD_ARCH = i386 |
BFD = elf32-i386 |
TARGET = i686-pc-linux-gnu |
TOOLCHAIN_DIR = /usr/local/i686 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/i686 |
DEFS += -DMACHINE=$(MACHINE) -D__32_BITS__ |
111,7 → 115,7 |
CONFIG_SOFTINT = y |
ARCH_SOURCES = \ |
arch/$(ARCH)/src/context.s \ |
arch/$(ARCH)/src/context.S \ |
arch/$(ARCH)/src/debug/panic.s \ |
arch/$(ARCH)/src/delay.s \ |
arch/$(ARCH)/src/asm.S \ |
/branches/network/kernel/arch/ia32xen/src/context.s |
---|
File deleted |
\ No newline at end of file |
Property changes: |
Deleted: svn:special |
-* |
\ No newline at end of property |
/branches/network/kernel/arch/ia32xen/src/asm.S |
---|
67,7 → 67,7 |
* @param MEMCPY_SRC(%esp) Source address. |
* @param MEMCPY_SIZE(%esp) Size. |
* |
* @return MEMCPY_SRC(%esp) on success and 0 on failure. |
* @return MEMCPY_DST(%esp) on success and 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
92,7 → 92,7 |
0: |
movl %edx, %edi |
movl %eax, %esi |
movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */ |
movl MEMCPY_DST(%esp), %eax /* MEMCPY_DST(%esp), success */ |
ret |
/* |
/branches/network/kernel/arch/ia32xen/src/userspace.c |
---|
68,6 → 68,10 |
"pushl %3\n" |
"pushl %4\n" |
"movl %5, %%eax\n" |
/* %ebx is defined to hold pcb_ptr - set it to 0 */ |
"xorl %%ebx, %%ebx\n" |
"iret\n" |
: |
: "i" (selector(UDATA_DES) | PL_USER), |
/branches/network/kernel/arch/ia32xen/src/ia32xen.c |
---|
69,7 → 69,7 |
void arch_pre_main(void) |
{ |
pte_t pte; |
memsetb((uintptr_t) &pte, sizeof(pte), 0); |
memsetb(&pte, sizeof(pte), 0); |
pte.present = 1; |
pte.writeable = 1; |
103,7 → 103,7 |
uintptr_t tpa = PFN2ADDR(meminfo.start + meminfo.reserved); |
uintptr_t tva = PA2KA(tpa); |
memsetb(tva, PAGE_SIZE, 0); |
memsetb((void *) tva, PAGE_SIZE, 0); |
pte_t *tptl3 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(start_info.ptl0, PTL0_INDEX(tva))); |
SET_FRAME_ADDRESS(tptl3, PTL3_INDEX(tva), 0); |
/branches/network/kernel/arch/ia32xen/src/pm.c |
---|
98,7 → 98,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((uintptr_t) t, sizeof(struct tss), 0); |
memsetb(t, sizeof(struct tss), 0); |
} |
static void trap(void) |
/branches/network/kernel/arch/ia32xen/src/smp/smp.c |
---|
98,7 → 98,7 |
*/ |
void kmp(void *arg) |
{ |
int i; |
unsigned int i; |
ASSERT(ops != NULL); |
142,11 → 142,11 |
/* |
* Prepare new GDT for CPU in question. |
*/ |
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC))) |
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS * sizeof(struct descriptor), FRAME_ATOMIC))) |
panic("couldn't allocate memory for GDT\n"); |
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor)); |
memsetb((uintptr_t)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0); |
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0); |
gdtr.base = (uintptr_t) gdt_new; |
if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) { |
/branches/network/kernel/arch/ia32xen/src/smp/mps.c |
---|
77,11 → 77,11 |
struct __io_intr_entry *io_intr_entries = NULL; |
struct __l_intr_entry *l_intr_entries = NULL; |
int processor_entry_cnt = 0; |
int bus_entry_cnt = 0; |
int io_apic_entry_cnt = 0; |
int io_intr_entry_cnt = 0; |
int l_intr_entry_cnt = 0; |
unsigned int processor_entry_cnt = 0; |
unsigned int bus_entry_cnt = 0; |
unsigned int io_apic_entry_cnt = 0; |
unsigned int io_intr_entry_cnt = 0; |
unsigned int l_intr_entry_cnt = 0; |
waitq_t ap_completion_wq; |
417,7 → 417,7 |
int mps_irq_to_pin(unsigned int irq) |
{ |
int i; |
unsigned int i; |
for (i = 0; i < io_intr_entry_cnt; i++) { |
if (io_intr_entries[i].src_bus_irq == irq && io_intr_entries[i].intr_type == 0) |
/branches/network/kernel/arch/ia32xen/src/context.S |
---|
0,0 → 1,0 |
link ../../ia32/src/context.S |
Property changes: |
Added: svn:special |
+* |
\ No newline at end of property |
/branches/network/kernel/arch/ia32xen/src/mm/tlb.c |
---|
65,7 → 65,7 |
*/ |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
unsigned int i; |
for (i = 0; i < cnt; i++) |
invlpg(page + i * PAGE_SIZE); |
/branches/network/kernel/arch/ia32xen/include/mm/page.h |
---|
40,8 → 40,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/ia32xen/include/types.h |
---|
61,14 → 61,6 |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/** Page Table Entry. */ |
typedef struct { |
unsigned present : 1; |
/branches/network/kernel/arch/ia32xen/include/context_offset.h |
---|
0,0 → 1,0 |
link ../../ia32/include/context_offset.h |
Property changes: |
Added: svn:special |
+* |
\ No newline at end of property |
/branches/network/kernel/arch/ppc64/Makefile.inc |
---|
29,11 → 29,15 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf64-powerpc |
BFD_ARCH = powerpc:common64 |
BFD = binary |
TARGET = ppc64-linux-gnu |
TOOLCHAIN_DIR = /usr/local/ppc64 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/ppc64 |
GCC_CFLAGS += -mcpu=powerpc64 -msoft-float -m64 |
AFLAGS += -a64 |
/branches/network/kernel/arch/ppc64/include/mm/page.h |
---|
40,8 → 40,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/ppc64/include/barrier.h |
---|
38,10 → 38,13 |
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") |
#define memory_barrier() asm volatile ("sync" ::: "memory") |
#define read_barrier() asm volatile ("sync" ::: "memory") |
#define write_barrier() asm volatile ("eieio" ::: "memory") |
#define memory_barrier() asm volatile ("sync" ::: "memory") |
#define read_barrier() asm volatile ("sync" ::: "memory") |
#define write_barrier() asm volatile ("eieio" ::: "memory") |
#define smc_coherence(a) |
#define smc_coherence_block(a, l) |
#endif |
/** @} |
/branches/network/kernel/arch/ppc64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/ppc64/include/exception.h |
---|
82,6 → 82,7 |
{ |
istate->pc = retaddr; |
} |
/** Return true if exception happened while in userspace */ |
#include <panic.h> |
static inline int istate_from_uspace(istate_t *istate) |
89,6 → 90,7 |
panic("istate_from_uspace not yet implemented"); |
return 0; |
} |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->pc; |
/branches/network/kernel/arch/ppc64/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_ppc64_TYPES_H_ |
#define KERN_ppc64_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
61,14 → 57,6 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/** Page Table Entry. */ |
typedef struct { |
unsigned p : 1; /**< Present bit. */ |
/branches/network/kernel/arch/ppc64/include/context_offset.h |
---|
73,4 → 73,60 |
#define OFFSET_FR31 0x88 |
#define OFFSET_FPSCR 0x90 |
#ifdef __ASM__ |
# include <arch/asm/regname.h> |
# ctx: address of the structure with saved context |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req |
stw sp, OFFSET_SP(\ctx) |
stw r2, OFFSET_R2(\ctx) |
stw r13, OFFSET_R13(\ctx) |
stw r14, OFFSET_R14(\ctx) |
stw r15, OFFSET_R15(\ctx) |
stw r16, OFFSET_R16(\ctx) |
stw r17, OFFSET_R17(\ctx) |
stw r18, OFFSET_R18(\ctx) |
stw r19, OFFSET_R19(\ctx) |
stw r20, OFFSET_R20(\ctx) |
stw r21, OFFSET_R21(\ctx) |
stw r22, OFFSET_R22(\ctx) |
stw r23, OFFSET_R23(\ctx) |
stw r24, OFFSET_R24(\ctx) |
stw r25, OFFSET_R25(\ctx) |
stw r26, OFFSET_R26(\ctx) |
stw r27, OFFSET_R27(\ctx) |
stw r28, OFFSET_R28(\ctx) |
stw r29, OFFSET_R29(\ctx) |
stw r30, OFFSET_R30(\ctx) |
stw r31, OFFSET_R31(\ctx) |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req |
lwz sp, OFFSET_SP(\ctx) |
lwz r2, OFFSET_R2(\ctx) |
lwz r13, OFFSET_R13(\ctx) |
lwz r14, OFFSET_R14(\ctx) |
lwz r15, OFFSET_R15(\ctx) |
lwz r16, OFFSET_R16(\ctx) |
lwz r17, OFFSET_R17(\ctx) |
lwz r18, OFFSET_R18(\ctx) |
lwz r19, OFFSET_R19(\ctx) |
lwz r20, OFFSET_R20(\ctx) |
lwz r21, OFFSET_R21(\ctx) |
lwz r22, OFFSET_R22(\ctx) |
lwz r23, OFFSET_R23(\ctx) |
lwz r24, OFFSET_R24(\ctx) |
lwz r25, OFFSET_R25(\ctx) |
lwz r26, OFFSET_R26(\ctx) |
lwz r27, OFFSET_R27(\ctx) |
lwz r28, OFFSET_R28(\ctx) |
lwz r29, OFFSET_R29(\ctx) |
lwz r30, OFFSET_R30(\ctx) |
lwz r31, OFFSET_R31(\ctx) |
.endm |
#endif /* __ASM__ */ |
#endif |
/branches/network/kernel/arch/ppc64/include/byteorder.h |
---|
35,14 → 35,8 |
#ifndef KERN_ppc64_BYTEORDER_H_ |
#define KERN_ppc64_BYTEORDER_H_ |
#include <byteorder.h> |
#define ARCH_IS_BIG_ENDIAN |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#endif |
/** @} |
/branches/network/kernel/arch/ppc64/src/asm.S |
---|
66,6 → 66,10 |
mr sp, r4 |
# %r3 is defined to hold pcb_ptr - set it to 0 |
xor r3, r3, r3 |
# jump to userspace |
rfi |
/branches/network/kernel/arch/ppc64/src/mm/page.c |
---|
252,7 → 252,7 |
void pht_init(void) |
{ |
memsetb((uintptr_t) phte, 1 << PHT_BITS, 0); |
memsetb(phte, 1 << PHT_BITS, 0); |
} |
289,7 → 289,7 |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
panic("Unable to map physical memory %p (%" PRIs " bytes)", physaddr, size) |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
/branches/network/kernel/arch/ppc64/src/cpu/cpu.c |
---|
53,7 → 53,7 |
void cpu_print_report(cpu_t *m) |
{ |
printf("cpu%d: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision); |
printf("cpu%u: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision); |
} |
/** @} |
/branches/network/kernel/arch/ppc64/src/interrupt.c |
---|
80,7 → 80,7 |
* Spurious interrupt. |
*/ |
#ifdef CONFIG_DEBUG |
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum); |
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum); |
#endif |
} |
/branches/network/kernel/arch/ppc64/src/context.S |
---|
34,56 → 34,8 |
.global context_save_arch |
.global context_restore_arch |
.macro CONTEXT_STORE r |
stw sp, OFFSET_SP(\r) |
stw r2, OFFSET_R2(\r) |
stw r13, OFFSET_R13(\r) |
stw r14, OFFSET_R14(\r) |
stw r15, OFFSET_R15(\r) |
stw r16, OFFSET_R16(\r) |
stw r17, OFFSET_R17(\r) |
stw r18, OFFSET_R18(\r) |
stw r19, OFFSET_R19(\r) |
stw r20, OFFSET_R20(\r) |
stw r21, OFFSET_R21(\r) |
stw r22, OFFSET_R22(\r) |
stw r23, OFFSET_R23(\r) |
stw r24, OFFSET_R24(\r) |
stw r25, OFFSET_R25(\r) |
stw r26, OFFSET_R26(\r) |
stw r27, OFFSET_R27(\r) |
stw r28, OFFSET_R28(\r) |
stw r29, OFFSET_R29(\r) |
stw r30, OFFSET_R30(\r) |
stw r31, OFFSET_R31(\r) |
.endm |
.macro CONTEXT_LOAD r |
lwz sp, OFFSET_SP(\r) |
lwz r2, OFFSET_R2(\r) |
lwz r13, OFFSET_R13(\r) |
lwz r14, OFFSET_R14(\r) |
lwz r15, OFFSET_R15(\r) |
lwz r16, OFFSET_R16(\r) |
lwz r17, OFFSET_R17(\r) |
lwz r18, OFFSET_R18(\r) |
lwz r19, OFFSET_R19(\r) |
lwz r20, OFFSET_R20(\r) |
lwz r21, OFFSET_R21(\r) |
lwz r22, OFFSET_R22(\r) |
lwz r23, OFFSET_R23(\r) |
lwz r24, OFFSET_R24(\r) |
lwz r25, OFFSET_R25(\r) |
lwz r26, OFFSET_R26(\r) |
lwz r27, OFFSET_R27(\r) |
lwz r28, OFFSET_R28(\r) |
lwz r29, OFFSET_R29(\r) |
lwz r30, OFFSET_R30(\r) |
lwz r31, OFFSET_R31(\r) |
.endm |
context_save_arch: |
CONTEXT_STORE r3 |
CONTEXT_SAVE_ARCH_CORE r3 |
mflr r4 |
stw r4, OFFSET_PC(r3) |
96,7 → 48,7 |
blr |
context_restore_arch: |
CONTEXT_LOAD r3 |
CONTEXT_RESTORE_ARCH_CORE r3 |
lwz r4, OFFSET_CR(r3) |
mtcr r4 |
/branches/network/kernel/arch/mips32/Makefile.inc |
---|
29,9 → 29,13 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_ARCH = mips |
TARGET = mipsel-linux-gnu |
TOOLCHAIN_DIR = /usr/local/mipsel |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/mipsel |
KERNEL_LOAD_ADDRESS = 0x80100000 |
INIT_ADDRESS = 0x81000000 |
56,20 → 60,6 |
## Accepted MACHINEs |
# |
ifeq ($(MACHINE),indy) |
# GCC 4.0.1 compiled for mipsEL has problems compiling in |
# BigEndian mode with the swl/swr/lwl/lwr instructions. |
# We have to compile it with mips-sgi-irix5 to get it right. |
BFD_NAME = elf32-bigmips |
BFD = ecoff-bigmips --impure |
TARGET = mips-sgi-irix5 |
TOOLCHAIN_DIR = /usr/local/mips/bin |
KERNEL_LOAD_ADDRESS = 0x88002000 |
GCC_CFLAGS += -EB -DBIG_ENDIAN -DARCH_HAS_FPU -march=r4600 |
INIT_ADDRESS = 0 |
INIT_SIZE = 0 |
endif |
ifeq ($(MACHINE),lgxemul) |
BFD_NAME = elf32-tradlittlemips |
BFD = binary |
79,7 → 69,7 |
BFD_NAME = elf32-bigmips |
BFD = ecoff-bigmips |
TARGET = mips-sgi-irix5 |
TOOLCHAIN_DIR = /usr/local/mips/bin |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/mips/bin |
GCC_CFLAGS += -EB -DBIG_ENDIAN -DARCH_HAS_FPU -mips3 |
INIT_ADDRESS = 0x81800000 |
endif |
123,7 → 113,6 |
arch/$(ARCH)/src/mm/as.c \ |
arch/$(ARCH)/src/fpu_context.c \ |
arch/$(ARCH)/src/ddi/ddi.c \ |
arch/$(ARCH)/src/drivers/arc.c \ |
arch/$(ARCH)/src/drivers/msim.c \ |
arch/$(ARCH)/src/drivers/serial.c \ |
arch/$(ARCH)/src/smp/order.c |
/branches/network/kernel/arch/mips32/src/asm.S |
---|
71,6 → 71,7 |
and $v0,$v0,$v1 |
beq $a1,$v0,3f |
move $t0,$a0 |
move $t2,$a0 # save dst |
0: |
beq $a2,$zero,2f |
86,7 → 87,7 |
2: |
jr $ra |
move $v0,$a1 |
move $v0,$t2 |
3: |
addiu $v0,$a0,3 |
126,7 → 127,7 |
sb $a0,0($v1) |
jr $ra |
move $v0,$a1 |
move $v0,$t2 |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
/branches/network/kernel/arch/mips32/src/mm/frame.c |
---|
32,27 → 32,238 |
/** @file |
*/ |
#include <macros.h> |
#include <arch/mm/frame.h> |
#include <arch/mm/tlb.h> |
#include <interrupt.h> |
#include <mm/frame.h> |
#include <mm/asid.h> |
#include <config.h> |
#include <arch/drivers/arc.h> |
#include <arch/drivers/msim.h> |
#include <arch/drivers/serial.h> |
#include <print.h> |
#define ZERO_PAGE_MASK TLB_PAGE_MASK_256K |
#define ZERO_FRAMES 2048 |
#define ZERO_PAGE_WIDTH 18 /* 256K */ |
#define ZERO_PAGE_SIZE (1 << ZERO_PAGE_WIDTH) |
#define ZERO_PAGE_ASID ASID_INVALID |
#define ZERO_PAGE_TLBI 0 |
#define ZERO_PAGE_ADDR 0 |
#define ZERO_PAGE_OFFSET (ZERO_PAGE_SIZE / sizeof(uint32_t) - 1) |
#define ZERO_PAGE_VALUE (((volatile uint32_t *) ZERO_PAGE_ADDR)[ZERO_PAGE_OFFSET]) |
#define ZERO_PAGE_VALUE_KSEG1(frame) (((volatile uint32_t *) (0xa0000000 + (frame << ZERO_PAGE_WIDTH)))[ZERO_PAGE_OFFSET]) |
#define MAX_REGIONS 32 |
typedef struct { |
pfn_t start; |
pfn_t count; |
} phys_region_t; |
static count_t phys_regions_count = 0; |
static phys_region_t phys_regions[MAX_REGIONS]; |
/** Check whether frame is available |
* |
* Returns true if given frame is generally available for use. |
* Returns false if given frame is used for physical memory |
* mapped devices and cannot be used. |
* |
*/ |
static bool frame_available(pfn_t frame) |
{ |
#if MACHINE == msim |
/* MSIM device (dprinter) */ |
if (frame == (KA2PA(MSIM_VIDEORAM) >> ZERO_PAGE_WIDTH)) |
return false; |
/* MSIM device (dkeyboard) */ |
if (frame == (KA2PA(MSIM_KBD_ADDRESS) >> ZERO_PAGE_WIDTH)) |
return false; |
#endif |
#if MACHINE == simics |
/* Simics device (serial line) */ |
if (frame == (KA2PA(SERIAL_ADDRESS) >> ZERO_PAGE_WIDTH)) |
return false; |
#endif |
#if (MACHINE == lgxemul) || (MACHINE == bgxemul) |
/* gxemul devices */ |
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE, |
0x10000000, MB2SIZE(256))) |
return false; |
#endif |
return true; |
} |
/** Check whether frame is safe to write |
* |
* Returns true if given frame is safe for read/write test. |
* Returns false if given frame should not be touched. |
* |
*/ |
static bool frame_safe(pfn_t frame) |
{ |
/* Kernel structures */ |
if ((frame << ZERO_PAGE_WIDTH) < KA2PA(config.base)) |
return false; |
/* Kernel */ |
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE, |
KA2PA(config.base), config.kernel_size)) |
return false; |
/* Kernel stack */ |
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE, |
KA2PA(config.stack_base), config.stack_size)) |
return false; |
/* Init tasks */ |
bool safe = true; |
count_t i; |
for (i = 0; i < init.cnt; i++) |
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE, |
KA2PA(init.tasks[i].addr), init.tasks[i].size)) { |
safe = false; |
break; |
} |
return safe; |
} |
static void frame_add_region(pfn_t start_frame, pfn_t end_frame) |
{ |
if (end_frame > start_frame) { |
/* Convert 1M frames to 16K frames */ |
pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH); |
pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH); |
/* Interrupt vector frame is blacklisted */ |
pfn_t conf_frame; |
if (first == 0) |
conf_frame = 1; |
else |
conf_frame = first; |
zone_create(first, count, conf_frame, 0); |
if (phys_regions_count < MAX_REGIONS) { |
phys_regions[phys_regions_count].start = first; |
phys_regions[phys_regions_count].count = count; |
phys_regions_count++; |
} |
} |
} |
/** Create memory zones |
* |
* If ARC is known, read information from ARC, otherwise |
* assume some defaults. |
* - blacklist first FRAME because there is an exception vector |
* Walk through available 256 KB chunks of physical |
* memory and create zones. |
* |
* Note: It is assumed that the TLB is not yet being |
* used in any way, thus there is no interference. |
* |
*/ |
void frame_arch_init(void) |
{ |
if (!arc_frame_init()) { |
zone_create(0, ADDR2PFN(CONFIG_MEMORY_SIZE), 1, 0); |
/* |
* Blacklist interrupt vector |
*/ |
frame_mark_unavailable(0, 1); |
ipl_t ipl = interrupts_disable(); |
/* Clear and initialize TLB */ |
cp0_pagemask_write(ZERO_PAGE_MASK); |
cp0_entry_lo0_write(0); |
cp0_entry_lo1_write(0); |
cp0_entry_hi_write(0); |
count_t i; |
for (i = 0; i < TLB_ENTRY_COUNT; i++) { |
cp0_index_write(i); |
tlbwi(); |
} |
pfn_t start_frame = 0; |
pfn_t frame; |
bool avail = true; |
/* Walk through all 1 MB frames */ |
for (frame = 0; frame < ZERO_FRAMES; frame++) { |
if (!frame_available(frame)) |
avail = false; |
else { |
if (frame_safe(frame)) { |
entry_lo_t lo0; |
entry_lo_t lo1; |
entry_hi_t hi; |
tlb_prepare_entry_lo(&lo0, false, true, true, false, frame << (ZERO_PAGE_WIDTH - 12)); |
tlb_prepare_entry_lo(&lo1, false, false, false, false, 0); |
tlb_prepare_entry_hi(&hi, ZERO_PAGE_ASID, ZERO_PAGE_ADDR); |
cp0_pagemask_write(ZERO_PAGE_MASK); |
cp0_entry_lo0_write(lo0.value); |
cp0_entry_lo1_write(lo1.value); |
cp0_entry_hi_write(hi.value); |
cp0_index_write(ZERO_PAGE_TLBI); |
tlbwi(); |
ZERO_PAGE_VALUE = 0; |
if (ZERO_PAGE_VALUE != 0) |
avail = false; |
else { |
ZERO_PAGE_VALUE = 0xdeadbeef; |
if (ZERO_PAGE_VALUE != 0xdeadbeef) |
avail = false; |
#if (MACHINE == lgxemul) || (MACHINE == bgxemul) |
else { |
ZERO_PAGE_VALUE_KSEG1(frame) = 0xaabbccdd; |
if (ZERO_PAGE_VALUE_KSEG1(frame) != 0xaabbccdd) |
avail = false; |
} |
#endif |
} |
} |
} |
if (!avail) { |
frame_add_region(start_frame, frame); |
start_frame = frame + 1; |
avail = true; |
} |
} |
frame_add_region(start_frame, frame); |
/* Blacklist interrupt vector frame */ |
frame_mark_unavailable(0, 1); |
/* Cleanup */ |
cp0_pagemask_write(ZERO_PAGE_MASK); |
cp0_entry_lo0_write(0); |
cp0_entry_lo1_write(0); |
cp0_entry_hi_write(0); |
cp0_index_write(ZERO_PAGE_TLBI); |
tlbwi(); |
interrupts_restore(ipl); |
} |
void physmem_print(void) |
{ |
printf("Base Size\n"); |
printf("---------- ----------\n"); |
count_t i; |
for (i = 0; i < phys_regions_count; i++) { |
printf("%#010x %10u\n", |
PFN2ADDR(phys_regions[i].start), PFN2ADDR(phys_regions[i].count)); |
} |
} |
/** @} |
*/ |
/branches/network/kernel/arch/mips32/src/mm/tlb.c |
---|
53,9 → 53,6 |
static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, int *pfrc); |
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn); |
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr); |
/** Initialize TLB |
* |
* Initialize TLB. |
76,7 → 73,6 |
cp0_index_write(i); |
tlbwi(); |
} |
/* |
* The kernel is going to make use of some wired |
131,8 → 127,8 |
*/ |
pte->a = 1; |
prepare_entry_hi(&hi, asid, badvaddr); |
prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn); |
tlb_prepare_entry_hi(&hi, asid, badvaddr); |
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn); |
/* |
* New entry is to be inserted into TLB |
178,7 → 174,7 |
* Locate the faulting entry in TLB. |
*/ |
hi.value = cp0_entry_hi_read(); |
prepare_entry_hi(&hi, hi.asid, badvaddr); |
tlb_prepare_entry_hi(&hi, hi.asid, badvaddr); |
cp0_entry_hi_write(hi.value); |
tlbp(); |
index.value = cp0_index_read(); |
221,7 → 217,7 |
*/ |
pte->a = 1; |
prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn); |
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn); |
/* |
* The entry is to be updated in TLB. |
262,7 → 258,7 |
* Locate the faulting entry in TLB. |
*/ |
hi.value = cp0_entry_hi_read(); |
prepare_entry_hi(&hi, hi.asid, badvaddr); |
tlb_prepare_entry_hi(&hi, hi.asid, badvaddr); |
cp0_entry_hi_write(hi.value); |
tlbp(); |
index.value = cp0_index_read(); |
312,7 → 308,7 |
pte->a = 1; |
pte->d = 1; |
prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn); |
tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn); |
/* |
* The entry is to be updated in TLB. |
445,7 → 441,7 |
} |
} |
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn) |
void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn) |
{ |
lo->value = 0; |
lo->g = g; |
455,7 → 451,7 |
lo->pfn = pfn; |
} |
void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr) |
void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr) |
{ |
hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2); |
hi->asid = asid; |
572,7 → 568,7 |
*/ |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
unsigned int i; |
ipl_t ipl; |
entry_lo_t lo0, lo1; |
entry_hi_t hi, hi_save; |
583,9 → 579,9 |
hi_save.value = cp0_entry_hi_read(); |
ipl = interrupts_disable(); |
for (i = 0; i < cnt+1; i+=2) { |
for (i = 0; i < cnt + 1; i += 2) { |
hi.value = 0; |
prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE); |
tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE); |
cp0_entry_hi_write(hi.value); |
tlbp(); |
/branches/network/kernel/arch/mips32/src/mm/as.c |
---|
44,7 → 44,7 |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
{ |
as_operations = &as_pt_operations; |
as_operations = &as_pt_operations; |
asid_fifo_init(); |
} |
/branches/network/kernel/arch/mips32/src/drivers/arc.c |
---|
File deleted |
/branches/network/kernel/arch/mips32/src/drivers/msim.c |
---|
39,12 → 39,9 |
#include <arch/cp0.h> |
#include <console/console.h> |
#include <sysinfo/sysinfo.h> |
#include <ddi/ddi.h> |
/** Address of devices. */ |
#define MSIM_VIDEORAM 0xB0000000 |
#define MSIM_KBD_ADDRESS 0xB0000000 |
#define MSIM_KBD_IRQ 2 |
static parea_t msim_parea; |
static chardev_t console; |
static irq_t msim_irq; |
158,6 → 155,16 |
sysinfo_set_item_val("kbd.devno", NULL, devno); |
sysinfo_set_item_val("kbd.inr", NULL, MSIM_KBD_IRQ); |
sysinfo_set_item_val("kbd.address.virtual", NULL, MSIM_KBD_ADDRESS); |
msim_parea.pbase = KA2PA(MSIM_VIDEORAM); |
msim_parea.vbase = MSIM_VIDEORAM; |
msim_parea.frames = 1; |
msim_parea.cacheable = false; |
ddi_parea_register(&msim_parea); |
sysinfo_set_item_val("fb", NULL, true); |
sysinfo_set_item_val("fb.kind", NULL, 3); |
sysinfo_set_item_val("fb.address.physical", NULL, KA2PA(MSIM_VIDEORAM)); |
} |
/** @} |
/branches/network/kernel/arch/mips32/src/console.c |
---|
34,18 → 34,15 |
#include <console/console.h> |
#include <arch/console.h> |
#include <arch/drivers/arc.h> |
#include <arch/drivers/serial.h> |
#include <arch/drivers/msim.h> |
void console_init(devno_t devno) |
{ |
if (!arc_console()) { |
if (serial_init()) |
serial_console(devno); |
else |
msim_console(devno); |
} |
if (serial_init()) |
serial_console(devno); |
else |
msim_console(devno); |
} |
/** Acquire console back for kernel |
/branches/network/kernel/arch/mips32/src/mips32.c |
---|
48,8 → 48,8 |
#include <sysinfo/sysinfo.h> |
#include <arch/interrupt.h> |
#include <arch/drivers/arc.h> |
#include <console/chardev.h> |
#include <arch/barrier.h> |
#include <arch/debugger.h> |
#include <genarch/fb/fb.h> |
#include <genarch/fb/visuals.h> |
96,18 → 96,21 |
/* Initialize dispatch table */ |
exception_init(); |
arc_init(); |
/* Copy the exception vectors to the right places */ |
memcpy(TLB_EXC, (char *) tlb_refill_entry, EXCEPTION_JUMP_SIZE); |
smc_coherence_block(TLB_EXC, EXCEPTION_JUMP_SIZE); |
memcpy(NORM_EXC, (char *) exception_entry, EXCEPTION_JUMP_SIZE); |
smc_coherence_block(NORM_EXC, EXCEPTION_JUMP_SIZE); |
memcpy(CACHE_EXC, (char *) cache_error_entry, EXCEPTION_JUMP_SIZE); |
smc_coherence_block(CACHE_EXC, EXCEPTION_JUMP_SIZE); |
/* |
* Switch to BEV normal level so that exception vectors point to the kernel. |
* Clear the error level. |
* Switch to BEV normal level so that exception vectors point to the |
* kernel. Clear the error level. |
*/ |
cp0_status_write(cp0_status_read() & ~(cp0_status_bev_bootstrap_bit|cp0_status_erl_error_bit)); |
cp0_status_write(cp0_status_read() & |
~(cp0_status_bev_bootstrap_bit | cp0_status_erl_error_bit)); |
/* |
* Mask all interrupts |
122,7 → 125,8 |
interrupt_init(); |
console_init(device_assign_devno()); |
#ifdef CONFIG_FB |
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8); // gxemul framebuffer |
/* GXemul framebuffer */ |
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8); |
#endif |
sysinfo_set_item_val("machine." STRING(MACHINE), NULL, 1); |
} |
143,13 → 147,14 |
{ |
/* EXL = 1, UM = 1, IE = 1 */ |
cp0_status_write(cp0_status_read() | (cp0_status_exl_exception_bit | |
cp0_status_um_bit | cp0_status_ie_enabled_bit)); |
cp0_status_um_bit | cp0_status_ie_enabled_bit)); |
cp0_epc_write((uintptr_t) kernel_uarg->uspace_entry); |
userspace_asm(((uintptr_t) kernel_uarg->uspace_stack + PAGE_SIZE), |
(uintptr_t) kernel_uarg->uspace_uarg, |
(uintptr_t) kernel_uarg->uspace_entry); |
(uintptr_t) kernel_uarg->uspace_uarg, |
(uintptr_t) kernel_uarg->uspace_entry); |
while (1); |
while (1) |
; |
} |
/** Perform mips32 specific tasks needed before the new task is run. */ |
160,7 → 165,8 |
/** Perform mips32 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE - |
SP_DELTA]; |
} |
void after_thread_ran_arch(void) |
179,10 → 185,10 |
void arch_reboot(void) |
{ |
if (!arc_reboot()) |
___halt(); |
___halt(); |
while (1); |
while (1) |
; |
} |
/** @} |
/branches/network/kernel/arch/mips32/src/interrupt.c |
---|
38,7 → 38,6 |
#include <arch.h> |
#include <arch/cp0.h> |
#include <time/clock.h> |
#include <arch/drivers/arc.h> |
#include <ipc/sysipc.h> |
#include <ddi/device.h> |
/branches/network/kernel/arch/mips32/src/start.S |
---|
349,5 → 349,7 |
userspace_asm: |
add $sp, $a0, 0 |
add $v0, $a1, 0 |
add $t9, $a2, 0 # Set up correct entry into PIC code |
add $t9, $a2, 0 # Set up correct entry into PIC code |
xor $a0, $a0, $a0 # $a0 is defined to hold pcb_ptr |
# set it to 0 |
eret |
/branches/network/kernel/arch/mips32/src/debugger.c |
---|
33,6 → 33,7 |
*/ |
#include <arch/debugger.h> |
#include <arch/barrier.h> |
#include <memstr.h> |
#include <console/kconsole.h> |
#include <console/cmd.h> |
72,7 → 73,8 |
}; |
static cmd_info_t addbkpt_info = { |
.name = "addbkpt", |
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch insts unsupported.", |
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch " |
"insts unsupported.", |
.func = cmd_add_breakpoint, |
.argc = 1, |
.argv = &add_argv |
84,7 → 86,8 |
}; |
static cmd_info_t addbkpte_info = { |
.name = "addbkpte", |
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call func(or Nothing if 0).", |
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call " |
"func(or Nothing if 0).", |
.func = cmd_add_breakpoint, |
.argc = 2, |
.argv = adde_argv |
93,7 → 96,7 |
static struct { |
uint32_t andmask; |
uint32_t value; |
}jmpinstr[] = { |
} jmpinstr[] = { |
{0xf3ff0000, 0x41000000}, /* BCzF */ |
{0xf3ff0000, 0x41020000}, /* BCzFL */ |
{0xf3ff0000, 0x41010000}, /* BCzT */ |
117,7 → 120,7 |
{0xfc000000, 0x08000000}, /* J */ |
{0xfc000000, 0x0c000000}, /* JAL */ |
{0xfc1f07ff, 0x00000009}, /* JALR */ |
{0,0} /* EndOfTable */ |
{0, 0} /* EndOfTable */ |
}; |
/** Test, if the given instruction is a jump or branch instruction |
129,7 → 132,7 |
{ |
int i; |
for (i=0;jmpinstr[i].andmask;i++) { |
for (i = 0; jmpinstr[i].andmask; i++) { |
if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value) |
return true; |
} |
152,14 → 155,16 |
spinlock_lock(&bkpoint_lock); |
/* Check, that the breakpoints do not conflict */ |
for (i=0; i<BKPOINTS_MAX; i++) { |
for (i = 0; i < BKPOINTS_MAX; i++) { |
if (breakpoints[i].address == (uintptr_t)argv->intval) { |
printf("Duplicate breakpoint %d.\n", i); |
spinlock_unlock(&bkpoints_lock); |
return 0; |
} else if (breakpoints[i].address == (uintptr_t)argv->intval + sizeof(unative_t) || \ |
breakpoints[i].address == (uintptr_t)argv->intval - sizeof(unative_t)) { |
printf("Adjacent breakpoints not supported, conflict with %d.\n", i); |
} else if (breakpoints[i].address == (uintptr_t)argv->intval + |
sizeof(unative_t) || breakpoints[i].address == |
(uintptr_t)argv->intval - sizeof(unative_t)) { |
printf("Adjacent breakpoints not supported, conflict " |
"with %d.\n", i); |
spinlock_unlock(&bkpoints_lock); |
return 0; |
} |
166,7 → 171,7 |
} |
for (i=0; i<BKPOINTS_MAX; i++) |
for (i = 0; i < BKPOINTS_MAX; i++) |
if (!breakpoints[i].address) { |
cur = &breakpoints[i]; |
break; |
185,7 → 190,7 |
cur->flags = 0; |
} else { /* We are add extended */ |
cur->flags = BKPOINT_FUNCCALL; |
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval; |
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval; |
} |
if (is_jump(cur->instruction)) |
cur->flags |= BKPOINT_ONESHOT; |
193,6 → 198,7 |
/* Set breakpoint */ |
*((unative_t *)cur->address) = 0x0d; |
smc_coherence(cur->address); |
spinlock_unlock(&bkpoint_lock); |
interrupts_restore(ipl); |
200,8 → 206,6 |
return 1; |
} |
/** Remove breakpoint from table */ |
int cmd_del_breakpoint(cmd_arg_t *argv) |
{ |
208,7 → 212,7 |
bpinfo_t *cur; |
ipl_t ipl; |
if (argv->intval < 0 || argv->intval > BKPOINTS_MAX) { |
if (argv->intval > BKPOINTS_MAX) { |
printf("Invalid breakpoint number.\n"); |
return 0; |
} |
229,7 → 233,9 |
return 0; |
} |
((uint32_t *)cur->address)[0] = cur->instruction; |
smc_coherence(((uint32_t *)cur->address)[0]); |
((uint32_t *)cur->address)[1] = cur->nextinstruction; |
smc_coherence(((uint32_t *)cur->address)[1]); |
cur->address = NULL; |
252,11 → 258,11 |
symbol = get_symtab_entry(breakpoints[i].address); |
printf("%-2u %-5d %#10zx %-6s %-7s %-8s %s\n", i, |
breakpoints[i].counter, breakpoints[i].address, |
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : "false"), |
((breakpoints[i].flags & BKPOINT_ONESHOT) ? "true" : "false"), |
((breakpoints[i].flags & BKPOINT_FUNCCALL) ? "true" : "false"), |
symbol); |
breakpoints[i].counter, breakpoints[i].address, |
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : |
"false"), ((breakpoints[i].flags & BKPOINT_ONESHOT) |
? "true" : "false"), ((breakpoints[i].flags & |
BKPOINT_FUNCCALL) ? "true" : "false"), symbol); |
} |
return 1; |
} |
266,7 → 272,7 |
{ |
int i; |
for (i=0; i<BKPOINTS_MAX; i++) |
for (i = 0; i < BKPOINTS_MAX; i++) |
breakpoints[i].address = NULL; |
cmd_initialize(&bkpts_info); |
305,16 → 311,16 |
panic("Breakpoint in branch delay slot not supported.\n"); |
spinlock_lock(&bkpoint_lock); |
for (i=0; i<BKPOINTS_MAX; i++) { |
for (i = 0; i < BKPOINTS_MAX; i++) { |
/* Normal breakpoint */ |
if (fireaddr == breakpoints[i].address \ |
&& !(breakpoints[i].flags & BKPOINT_REINST)) { |
if (fireaddr == breakpoints[i].address && |
!(breakpoints[i].flags & BKPOINT_REINST)) { |
cur = &breakpoints[i]; |
break; |
} |
/* Reinst only breakpoint */ |
if ((breakpoints[i].flags & BKPOINT_REINST) \ |
&& (fireaddr ==breakpoints[i].address+sizeof(unative_t))) { |
if ((breakpoints[i].flags & BKPOINT_REINST) && |
(fireaddr == breakpoints[i].address + sizeof(unative_t))) { |
cur = &breakpoints[i]; |
break; |
} |
323,8 → 329,10 |
if (cur->flags & BKPOINT_REINST) { |
/* Set breakpoint on first instruction */ |
((uint32_t *)cur->address)[0] = 0x0d; |
smc_coherence(((uint32_t *)cur->address)[0]); |
/* Return back the second */ |
((uint32_t *)cur->address)[1] = cur->nextinstruction; |
smc_coherence(((uint32_t *)cur->address)[1]); |
cur->flags &= ~BKPOINT_REINST; |
spinlock_unlock(&bkpoint_lock); |
return; |
333,11 → 341,12 |
printf("Warning: breakpoint recursion\n"); |
if (!(cur->flags & BKPOINT_FUNCCALL)) |
printf("***Breakpoint %d: %p in %s.\n", i, |
fireaddr, get_symtab_entry(istate->epc)); |
printf("***Breakpoint %d: %p in %s.\n", i, fireaddr, |
get_symtab_entry(istate->epc)); |
/* Return first instruction back */ |
((uint32_t *)cur->address)[0] = cur->instruction; |
smc_coherence(cur->address); |
if (! (cur->flags & BKPOINT_ONESHOT)) { |
/* Set Breakpoint on next instruction */ |
/branches/network/kernel/arch/mips32/src/exception.c |
---|
161,7 → 161,7 |
* Spurious interrupt. |
*/ |
#ifdef CONFIG_DEBUG |
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, i); |
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, i); |
#endif |
} |
} |
/branches/network/kernel/arch/mips32/src/context.S |
---|
26,7 → 26,6 |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
#include <arch/asm/regname.h> |
#include <arch/context_offset.h> |
.text |
38,41 → 37,9 |
.global context_save_arch |
.global context_restore_arch |
.macro CONTEXT_STORE r |
sw $s0,OFFSET_S0(\r) |
sw $s1,OFFSET_S1(\r) |
sw $s2,OFFSET_S2(\r) |
sw $s3,OFFSET_S3(\r) |
sw $s4,OFFSET_S4(\r) |
sw $s5,OFFSET_S5(\r) |
sw $s6,OFFSET_S6(\r) |
sw $s7,OFFSET_S7(\r) |
sw $s8,OFFSET_S8(\r) |
sw $gp,OFFSET_GP(\r) |
sw $ra,OFFSET_PC(\r) |
sw $sp,OFFSET_SP(\r) |
.endm |
.macro CONTEXT_LOAD r |
lw $s0,OFFSET_S0(\r) |
lw $s1,OFFSET_S1(\r) |
lw $s2,OFFSET_S2(\r) |
lw $s3,OFFSET_S3(\r) |
lw $s4,OFFSET_S4(\r) |
lw $s5,OFFSET_S5(\r) |
lw $s6,OFFSET_S6(\r) |
lw $s7,OFFSET_S7(\r) |
lw $s8,OFFSET_S8(\r) |
lw $gp,OFFSET_GP(\r) |
lw $ra,OFFSET_PC(\r) |
lw $sp,OFFSET_SP(\r) |
.endm |
context_save_arch: |
CONTEXT_STORE $a0 |
CONTEXT_SAVE_ARCH_CORE $a0 |
# context_save returns 1 |
j $31 |
79,7 → 46,7 |
li $2, 1 |
context_restore_arch: |
CONTEXT_LOAD $a0 |
CONTEXT_RESTORE_ARCH_CORE $a0 |
# context_restore returns 0 |
j $31 |
/branches/network/kernel/arch/mips32/src/cpu/cpu.c |
---|
104,22 → 104,20 |
void cpu_print_report(cpu_t *m) |
{ |
struct data_t *data; |
int i; |
unsigned int i; |
if (m->arch.imp_num & 0x80) { |
/* Count records */ |
for (i=0;imp_data80[i].vendor;i++) |
; |
for (i = 0; imp_data80[i].vendor; i++); |
if ((m->arch.imp_num & 0x7f) >= i) { |
printf("imp=%d\n",m->arch.imp_num); |
printf("imp=%d\n", m->arch.imp_num); |
return; |
} |
data = &imp_data80[m->arch.imp_num & 0x7f]; |
} else { |
for (i=0;imp_data[i].vendor;i++) |
; |
for (i = 0; imp_data[i].vendor; i++); |
if (m->arch.imp_num >= i) { |
printf("imp=%d\n",m->arch.imp_num); |
printf("imp=%d\n", m->arch.imp_num); |
return; |
} |
data = &imp_data[m->arch.imp_num]; |
127,7 → 125,7 |
printf("cpu%d: %s %s (rev=%d.%d, imp=%d)\n", |
m->id, data->vendor, data->model, m->arch.rev_num >> 4, |
m->arch.rev_num & 0xf, m->arch.imp_num); |
m->arch.rev_num & 0xf, m->arch.imp_num); |
} |
/** @} |
/branches/network/kernel/arch/mips32/include/drivers/arc.h |
---|
File deleted |
/branches/network/kernel/arch/mips32/include/drivers/serial.h |
---|
37,6 → 37,8 |
#include <console/chardev.h> |
#define SERIAL_ADDRESS 0x98000000 |
#define SERIAL_MAX 4 |
#define SERIAL_COM1 0x3f8 |
#define SERIAL_COM1_IRQ 4 |
43,16 → 45,19 |
#define SERIAL_COM2 0x2f8 |
#define SERIAL_COM2_IRQ 3 |
#define P_WRITEB(where,what) (*((volatile char *) (0xB8000000+where))=what) |
#define P_READB(where) (*((volatile char *)(0xB8000000+where))) |
#define P_WRITEB(where, what) (*((volatile char *) (SERIAL_ADDRESS + where)) = what) |
#define P_READB(where) (*((volatile char *) (SERIAL_ADDRESS + where))) |
#define SERIAL_READ(x) P_READB(x) |
#define SERIAL_WRITE(x,c) P_WRITEB(x,c) |
#define SERIAL_READ(x) P_READB(x) |
#define SERIAL_WRITE(x, c) P_WRITEB(x, c) |
/* Interrupt enable register */ |
#define SERIAL_READ_IER(x) (P_READB((x) + 1)) |
#define SERIAL_WRITE_IER(x,c) (P_WRITEB((x)+1,c)) |
#define SERIAL_WRITE_IER(x,c) (P_WRITEB((x) + 1, c)) |
/* Interrupt identification register */ |
#define SERIAL_READ_IIR(x) (P_READB((x) + 2)) |
/* Line status register */ |
#define SERIAL_READ_LSR(x) (P_READB((x) + 5)) |
#define TRANSMIT_EMPTY_BIT 5 |
/branches/network/kernel/arch/mips32/include/drivers/msim.h |
---|
35,6 → 35,11 |
#ifndef KERN_mips32_MSIM_H_ |
#define KERN_mips32_MSIM_H_ |
/** Address of devices. */ |
#define MSIM_VIDEORAM 0x90000000 |
#define MSIM_KBD_ADDRESS 0x90000000 |
#define MSIM_KBD_IRQ 2 |
#include <console/chardev.h> |
void msim_console(devno_t devno); |
/branches/network/kernel/arch/mips32/include/mm/page.h |
---|
40,8 → 40,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifndef __ASM__ |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
/branches/network/kernel/arch/mips32/include/mm/tlb.h |
---|
35,6 → 35,9 |
#ifndef KERN_mips32_TLB_H_ |
#define KERN_mips32_TLB_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch/mm/asid.h> |
#include <arch/exception.h> |
#ifdef TLBCNT |
46,7 → 49,13 |
#define TLB_WIRED 1 |
#define TLB_KSTACK_WIRED_INDEX 0 |
#define TLB_PAGE_MASK_16K (0x3<<13) |
#define TLB_PAGE_MASK_4K (0x000 << 13) |
#define TLB_PAGE_MASK_16K (0x003 << 13) |
#define TLB_PAGE_MASK_64K (0x00f << 13) |
#define TLB_PAGE_MASK_256K (0x03f << 13) |
#define TLB_PAGE_MASK_1M (0x0ff << 13) |
#define TLB_PAGE_MASK_4M (0x3ff << 13) |
#define TLB_PAGE_MASK_16M (0xfff << 13) |
#define PAGE_UNCACHED 2 |
#define PAGE_CACHEABLE_EXC_WRITE 5 |
159,6 → 168,8 |
extern void tlb_invalid(istate_t *istate); |
extern void tlb_refill(istate_t *istate); |
extern void tlb_modified(istate_t *istate); |
extern void tlb_prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn); |
extern void tlb_prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr); |
#endif |
/branches/network/kernel/arch/mips32/include/mm/as.h |
---|
38,7 → 38,7 |
#define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 |
#define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0x80000000 |
#define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffff |
#define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0x9fffffff |
#define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x00000000 |
#define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0x7fffffff |
/branches/network/kernel/arch/mips32/include/atomic.h |
---|
58,13 → 58,13 |
asm volatile ( |
"1:\n" |
" ll %0, %1\n" |
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */ |
" addu %0, %0, %3\n" /* same as addi, but never traps on overflow */ |
" move %2, %0\n" |
" sc %0, %1\n" |
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ |
" nop\n" |
: "=r" (tmp), "=m" (val->count), "=r" (v) |
: "i" (i), "i" (0) |
: "=&r" (tmp), "+m" (val->count), "=&r" (v) |
: "r" (i), "i" (0) |
); |
return v; |
/branches/network/kernel/arch/mips32/include/barrier.h |
---|
45,6 → 45,9 |
#define read_barrier() asm volatile ("" ::: "memory") |
#define write_barrier() asm volatile ("" ::: "memory") |
#define smc_coherence(a) |
#define smc_coherence_block(a, l) |
#endif |
/** @} |
/branches/network/kernel/arch/mips32/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
extern int memcmp(const void *a, const void *b, size_t cnt); |
#endif |
/branches/network/kernel/arch/mips32/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_mips32_TYPES_H_ |
#define KERN_mips32_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed long int32_t; |
61,14 → 57,29 |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
#define PRIp "x" /**< Format for uintptr_t. */ |
#define PRIs "u" /**< Format for size_t. */ |
#define PRIc "u" /**< Format for count_t. */ |
#define PRIi "u" /**< Format for index_t. */ |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
#define PRId8 "d" /**< Format for int8_t. */ |
#define PRId16 "d" /**< Format for int16_t. */ |
#define PRId32 "ld" /**< Format for int32_t. */ |
#define PRId64 "lld" /**< Format for int64_t. */ |
#define PRIdn "d" /**< Format for native_t. */ |
#define PRIu8 "u" /**< Format for uint8_t. */ |
#define PRIu16 "u" /**< Format for uint16_t. */ |
#define PRIu32 "u" /**< Format for uint32_t. */ |
#define PRIu64 "llu" /**< Format for uint64_t. */ |
#define PRIun "u" /**< Format for unative_t. */ |
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */ |
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */ |
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */ |
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */ |
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */ |
/** Page Table Entry. */ |
typedef struct { |
unsigned g : 1; /**< Global bit. */ |
/branches/network/kernel/arch/mips32/include/byteorder.h |
---|
35,24 → 35,10 |
#ifndef KERN_mips32_BYTEORDER_H_ |
#define KERN_mips32_BYTEORDER_H_ |
#include <byteorder.h> |
#ifdef BIG_ENDIAN |
#define uint32_t_le2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_le2host(n) uint64_t_byteorder_swap(n) |
#define uint32_t_be2host(n) (n) |
#define uint64_t_be2host(n) (n) |
#define ARCH_IS_BIG_ENDIAN |
#else |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#define ARCH_IS_LITTLE_ENDIAN |
#endif |
#endif |
/branches/network/kernel/arch/mips32/include/context_offset.h |
---|
42,6 → 42,24 |
#define OFFSET_S8 0x28 |
#define OFFSET_GP 0x2c |
#ifdef KERNEL |
# define OFFSET_IPL 0x30 |
#else |
# define OFFSET_TLS 0x30 |
# define OFFSET_F20 0x34 |
# define OFFSET_F21 0x38 |
# define OFFSET_F22 0x3c |
# define OFFSET_F23 0x40 |
# define OFFSET_F24 0x44 |
# define OFFSET_F25 0x48 |
# define OFFSET_F26 0x4c |
# define OFFSET_F27 0x50 |
# define OFFSET_F28 0x54 |
# define OFFSET_F29 0x58 |
# define OFFSET_F30 0x5c |
#endif /* KERNEL */ |
/* istate_t */ |
#define EOFFSET_AT 0x0 |
#define EOFFSET_V0 0x4 |
79,4 → 97,122 |
#define EOFFSET_K1 0x84 |
#define REGISTER_SPACE 136 |
#ifdef __ASM__ |
#include <arch/asm/regname.h> |
# ctx: address of the structure with saved context |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req |
sw $s0,OFFSET_S0(\ctx) |
sw $s1,OFFSET_S1(\ctx) |
sw $s2,OFFSET_S2(\ctx) |
sw $s3,OFFSET_S3(\ctx) |
sw $s4,OFFSET_S4(\ctx) |
sw $s5,OFFSET_S5(\ctx) |
sw $s6,OFFSET_S6(\ctx) |
sw $s7,OFFSET_S7(\ctx) |
sw $s8,OFFSET_S8(\ctx) |
sw $gp,OFFSET_GP(\ctx) |
#ifndef KERNEL |
sw $k1,OFFSET_TLS(\ctx) |
# ifdef CONFIG_MIPS_FPU |
mfc1 $t0,$20 |
sw $t0, OFFSET_F20(\ctx) |
mfc1 $t0,$21 |
sw $t0, OFFSET_F21(\ctx) |
mfc1 $t0,$22 |
sw $t0, OFFSET_F22(\ctx) |
mfc1 $t0,$23 |
sw $t0, OFFSET_F23(\ctx) |
mfc1 $t0,$24 |
sw $t0, OFFSET_F24(\ctx) |
mfc1 $t0,$25 |
sw $t0, OFFSET_F25(\ctx) |
mfc1 $t0,$26 |
sw $t0, OFFSET_F26(\ctx) |
mfc1 $t0,$27 |
sw $t0, OFFSET_F27(\ctx) |
mfc1 $t0,$28 |
sw $t0, OFFSET_F28(\ctx) |
mfc1 $t0,$29 |
sw $t0, OFFSET_F29(\ctx) |
mfc1 $t0,$30 |
sw $t0, OFFSET_F30(\ctx) |
# endif /* CONFIG_MIPS_FPU */ |
#endif /* KERNEL */ |
sw $ra,OFFSET_PC(\ctx) |
sw $sp,OFFSET_SP(\ctx) |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req |
lw $s0,OFFSET_S0(\ctx) |
lw $s1,OFFSET_S1(\ctx) |
lw $s2,OFFSET_S2(\ctx) |
lw $s3,OFFSET_S3(\ctx) |
lw $s4,OFFSET_S4(\ctx) |
lw $s5,OFFSET_S5(\ctx) |
lw $s6,OFFSET_S6(\ctx) |
lw $s7,OFFSET_S7(\ctx) |
lw $s8,OFFSET_S8(\ctx) |
lw $gp,OFFSET_GP(\ctx) |
#ifndef KERNEL |
lw $k1,OFFSET_TLS(\ctx) |
# ifdef CONFIG_MIPS_FPU |
lw $t0, OFFSET_F20(\ctx) |
mtc1 $t0,$20 |
lw $t0, OFFSET_F21(\ctx) |
mtc1 $t0,$21 |
lw $t0, OFFSET_F22(\ctx) |
mtc1 $t0,$22 |
lw $t0, OFFSET_F23(\ctx) |
mtc1 $t0,$23 |
lw $t0, OFFSET_F24(\ctx) |
mtc1 $t0,$24 |
lw $t0, OFFSET_F25(\ctx) |
mtc1 $t0,$25 |
lw $t0, OFFSET_F26(\ctx) |
mtc1 $t0,$26 |
lw $t0, OFFSET_F27(\ctx) |
mtc1 $t0,$27 |
lw $t0, OFFSET_F28(\ctx) |
mtc1 $t0,$28 |
lw $t0, OFFSET_F29(\ctx) |
mtc1 $t0,$29 |
lw $t0, OFFSET_F30(\ctx) |
mtc1 $t0,$30 |
# endif /* CONFIG_MIPS_FPU */ |
#endif /* KERNEL */ |
lw $ra,OFFSET_PC(\ctx) |
lw $sp,OFFSET_SP(\ctx) |
.endm |
#endif |
#endif |
/branches/network/kernel/doc/BUGS_FOUND |
---|
File deleted |
/branches/network/kernel/doc/AUTHORS |
---|
1,12 → 1,12 |
Jakub Jermar <jermar@helenos.eu> |
Martin Decky <decky@helenos.eu> |
Ondrej Palkovsky <palkovsky@helenos.eu> |
Jakub Vana <vana@helenos.eu> |
Josef Cejka <cejka@helenos.eu> |
Michal Kebrt <michalek.k@seznam.cz> |
Sergey Bondari <bondari@helenos.eu> |
Pavel Jancik <alfik.009@seznam.cz> |
Petr Stepan <stepan.petr@volny.cz> |
Michal Konopa <mkonopa@seznam.cz> |
Jakub Jermar |
Martin Decky |
Ondrej Palkovsky |
Jiri Svoboda |
Jakub Vana |
Josef Cejka |
Michal Kebrt |
Sergey Bondari |
Pavel Jancik |
Petr Stepan |
Michal Konopa |
Vojtech Mencl |
/branches/network/kernel/doc/arch/mips32 |
---|
3,11 → 3,9 |
mips32 is the second port of SPARTAN kernel originally written by Jakub Jermar. |
It was first developed to run on MIPS R4000 32-bit simulator. |
Now it can run on real hardware as well. |
It can be compiled and run either as little- or big-endian. |
HARDWARE REQUIREMENTS |
o SGI Indy R4600 |
o emulated MIPS 4K CPU |
CPU |
/branches/network/kernel/kernel.config |
---|
1,3 → 1,31 |
# |
# Copyright (c) 2006 Ondrej Palkovsky |
# All rights reserved. |
# |
# Redistribution and use in source and binary forms, with or without |
# modification, are permitted provided that the following conditions |
# are met: |
# |
# - Redistributions of source code must retain the above copyright |
# notice, this list of conditions and the following disclaimer. |
# - Redistributions in binary form must reproduce the above copyright |
# notice, this list of conditions and the following disclaimer in the |
# documentation and/or other materials provided with the distribution. |
# - The name of the author may not be used to endorse or promote products |
# derived from this software without specific prior written permission. |
# |
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
## General configuration directives |
# Architecture |
53,16 → 81,10 |
@ "simics" Virtutech Simics simulator |
@ "lgxemul" GXEmul Little Endian |
@ "bgxemul" GXEmul Big Endian |
@ "indy" SGI Indy |
! [ARCH=mips32] MACHINE (choice) |
# Machine type |
@ "gxemul_testarm" GXEmul testarm |
! [ARCH=arm32] MACHINE (choice) |
# Framebuffer support |
! [(ARCH=mips32&MACHINE=lgxemul)|(ARCH=mips32&MACHINE=bgxemul)|(ARCH=ia32)|(ARCH=amd64)|(ARCH=arm32&MACHINE=gxemul_testarm)] CONFIG_FB (y/n) |
! [(ARCH=mips32&MACHINE=lgxemul)|(ARCH=mips32&MACHINE=bgxemul)|(ARCH=ia32)|(ARCH=amd64)|(ARCH=arm32)] CONFIG_FB (y/n) |
# Framebuffer width |
@ "640" |
128,6 → 150,9 |
# General debuging and assert checking |
! CONFIG_DEBUG (y/n) |
# Extensive debugging output |
! [CONFIG_DEBUG=y] CONFIG_EDEBUG (n/y) |
# Deadlock detection support for spinlocks |
! [CONFIG_DEBUG=y&CONFIG_SMP=y] CONFIG_DEBUG_SPINLOCK (y/n) |
142,9 → 167,3 |
# Compile kernel tests |
! CONFIG_TEST (y/n) |
## Experimental features |
# Enable experimental features |
! CONFIG_EXPERIMENTAL (n/y) |
/branches/network/kernel/genarch/include/mm/page_pt.h |
---|
116,9 → 116,7 |
#define PTE_WRITABLE(p) PTE_WRITABLE_ARCH((p)) |
#define PTE_EXECUTABLE(p) PTE_EXECUTABLE_ARCH((p)) |
#ifndef __OBJC__ |
extern as_operations_t as_pt_operations; |
#endif |
extern page_mapping_operations_t pt_mapping_operations; |
extern void page_mapping_insert_pt(as_t *as, uintptr_t page, uintptr_t frame, |
/branches/network/kernel/genarch/include/ofw/ofw_tree.h |
---|
30,6 → 30,7 |
#define KERN_OFW_TREE_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
#define OFW_TREE_PROPERTY_MAX_NAMELEN 32 |
/branches/network/kernel/genarch/src/mm/as_pt.c |
---|
52,31 → 52,6 |
static void pt_lock(as_t *as, bool lock); |
static void pt_unlock(as_t *as, bool unlock); |
#ifdef __OBJC__ |
@implementation as_t |
+ (pte_t *) page_table_create: (int) flags |
{ |
return ptl0_create(flags); |
} |
+ (void) page_table_destroy: (pte_t *) page_table |
{ |
ptl0_destroy(page_table); |
} |
- (void) page_table_lock: (bool) _lock |
{ |
pt_lock(self, _lock); |
} |
- (void) page_table_unlock: (bool) unlock |
{ |
pt_unlock(self, unlock); |
} |
@end |
#else |
as_operations_t as_pt_operations = { |
.page_table_create = ptl0_create, |
.page_table_destroy = ptl0_destroy, |
83,7 → 58,6 |
.page_table_lock = pt_lock, |
.page_table_unlock = pt_unlock |
}; |
#endif |
/** Create PTL0. |
* |
103,7 → 77,7 |
table_size = FRAME_SIZE << PTL0_SIZE; |
if (flags & FLAG_AS_KERNEL) { |
memsetb((uintptr_t) dst_ptl0, table_size, 0); |
memsetb(dst_ptl0, table_size, 0); |
} else { |
uintptr_t src, dst; |
118,7 → 92,7 |
src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; |
dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; |
memsetb((uintptr_t) dst_ptl0, table_size, 0); |
memsetb(dst_ptl0, table_size, 0); |
memcpy((void *) dst, (void *) src, table_size - (src - (uintptr_t) src_ptl0)); |
mutex_unlock(&AS_KERNEL->lock); |
interrupts_restore(ipl); |
/branches/network/kernel/genarch/src/mm/as_ht.c |
---|
72,7 → 72,7 |
{ |
if (flags & FLAG_AS_KERNEL) { |
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations); |
mutex_initialize(&page_ht_lock); |
mutex_initialize(&page_ht_lock, MUTEX_PASSIVE); |
} |
return NULL; |
} |
/branches/network/kernel/genarch/src/mm/page_pt.c |
---|
76,7 → 76,7 |
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { |
newpt = (pte_t *)frame_alloc(PTL1_SIZE, FRAME_KA); |
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL1_SIZE, 0); |
memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0); |
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); |
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); |
} |
85,7 → 85,7 |
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { |
newpt = (pte_t *)frame_alloc(PTL2_SIZE, FRAME_KA); |
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL2_SIZE, 0); |
memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0); |
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); |
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); |
} |
94,7 → 94,7 |
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { |
newpt = (pte_t *)frame_alloc(PTL3_SIZE, FRAME_KA); |
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL3_SIZE, 0); |
memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0); |
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); |
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); |
} |
146,7 → 146,7 |
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page))); |
/* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */ |
memsetb((uintptr_t) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0); |
/* |
* Second, free all empty tables along the way from PTL3 down to PTL0. |
166,11 → 166,11 |
*/ |
frame_free(KA2PA((uintptr_t) ptl3)); |
if (PTL2_ENTRIES) |
memsetb((uintptr_t) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); |
else if (PTL1_ENTRIES) |
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); |
else |
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
} else { |
/* |
* PTL3 is not empty. |
195,9 → 195,9 |
*/ |
frame_free(KA2PA((uintptr_t) ptl2)); |
if (PTL1_ENTRIES) |
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); |
else |
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
} |
else { |
/* |
223,7 → 223,7 |
* Release the frame and remove PTL1 pointer from preceding table. |
*/ |
frame_free(KA2PA((uintptr_t) ptl1)); |
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); |
} |
} |
/branches/network/kernel/genarch/src/acpi/madt.c |
---|
126,7 → 126,7 |
int madt_irq_to_pin(unsigned int irq) |
{ |
ASSERT(irq < sizeof(isa_irq_map)/sizeof(int)); |
ASSERT(irq < sizeof(isa_irq_map) / sizeof(int)); |
return isa_irq_map[irq]; |
} |
184,15 → 184,15 |
case MADT_IO_SAPIC: |
case MADT_L_SAPIC: |
case MADT_PLATFORM_INTR_SRC: |
printf("MADT: skipping %s entry (type=%zd)\n", entry[h->type], h->type); |
printf("MADT: skipping %s entry (type=%" PRIu8 ")\n", entry[h->type], h->type); |
break; |
default: |
if (h->type >= MADT_RESERVED_SKIP_BEGIN && h->type <= MADT_RESERVED_SKIP_END) { |
printf("MADT: skipping reserved entry (type=%zd)\n", h->type); |
printf("MADT: skipping reserved entry (type=%" PRIu8 ")\n", h->type); |
} |
if (h->type >= MADT_RESERVED_OEM_BEGIN) { |
printf("MADT: skipping OEM entry (type=%zd)\n", h->type); |
printf("MADT: skipping OEM entry (type=%" PRIu8 ")\n", h->type); |
} |
break; |
} |
233,8 → 233,8 |
void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, uint32_t index) |
{ |
ASSERT(override->source < sizeof(isa_irq_map)/sizeof(int)); |
printf("MADT: ignoring %s entry: bus=%zd, source=%zd, global_int=%zd, flags=%#hx\n", |
ASSERT(override->source < sizeof(isa_irq_map) / sizeof(int)); |
printf("MADT: ignoring %s entry: bus=%" PRIu8 ", source=%" PRIu8 ", global_int=%" PRIu32 ", flags=%#" PRIx16 "\n", |
entry[override->header.type], override->bus, override->source, |
override->global_int, override->flags); |
} |
/branches/network/kernel/genarch/src/acpi/acpi.c |
---|
105,7 → 105,7 |
if (!acpi_sdt_check((uint8_t *) h)) |
goto next; |
*signature_map[j].sdt_ptr = h; |
printf("%#zp: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); |
printf("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); |
} |
} |
next: |
126,7 → 126,7 |
if (!acpi_sdt_check((uint8_t *) h)) |
goto next; |
*signature_map[j].sdt_ptr = h; |
printf("%#zp: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); |
printf("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description); |
} |
} |
next: |
160,7 → 160,7 |
return; |
rsdp_found: |
printf("%#zp: ACPI Root System Description Pointer\n", acpi_rsdp); |
printf("%p: ACPI Root System Description Pointer\n", acpi_rsdp); |
acpi_rsdt = (struct acpi_rsdt *) (unative_t) acpi_rsdp->rsdt_address; |
if (acpi_rsdp->revision) acpi_xsdt = (struct acpi_xsdt *) ((uintptr_t) acpi_rsdp->xsdt_address); |
169,11 → 169,11 |
if (acpi_xsdt) map_sdt((struct acpi_sdt_header *) acpi_xsdt); |
if (acpi_rsdt && !acpi_sdt_check((uint8_t *) acpi_rsdt)) { |
printf("RSDT: %s\n", "bad checksum"); |
printf("RSDT: bad checksum\n"); |
return; |
} |
if (acpi_xsdt && !acpi_sdt_check((uint8_t *) acpi_xsdt)) { |
printf("XSDT: %s\n", "bad checksum"); |
printf("XSDT: bad checksum\n"); |
return; |
} |
/branches/network/kernel/genarch/src/ofw/ebus.c |
---|
57,7 → 57,7 |
ranges = prop->size / sizeof(ofw_ebus_range_t); |
range = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < ranges; i++) { |
if (reg->space != range[i].child_space) |
102,7 → 102,7 |
uint32_t addr = reg->addr & intr_mask->addr_mask; |
uint32_t intr = interrupt & intr_mask->intr_mask; |
int i; |
unsigned int i; |
for (i = 0; i < count; i++) { |
if ((intr_map[i].space == space) && (intr_map[i].addr == addr) |
&& (intr_map[i].intr == intr)) |
/branches/network/kernel/genarch/src/ofw/fhc.c |
---|
55,7 → 55,7 |
ranges = prop->size / sizeof(ofw_fhc_range_t); |
range = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < ranges; i++) { |
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) { |
97,7 → 97,7 |
ranges = prop->size / sizeof(ofw_central_range_t); |
range = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < ranges; i++) { |
if (overlaps(reg->addr, reg->size, range[i].child_base, range[i].size)) { |
/branches/network/kernel/genarch/src/ofw/ofw_tree.c |
---|
61,7 → 61,7 |
*/ |
ofw_tree_property_t *ofw_tree_getprop(const ofw_tree_node_t *node, const char *name) |
{ |
int i; |
unsigned int i; |
for (i = 0; i < node->properties; i++) { |
if (strcmp(node->property[i].name, name) == 0) |
/branches/network/kernel/genarch/src/ofw/pci.c |
---|
65,7 → 65,7 |
ranges = prop->size / sizeof(ofw_pci_range_t); |
range = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < ranges; i++) { |
if ((reg->space & PCI_SPACE_MASK) != (range[i].space & PCI_SPACE_MASK)) |
100,7 → 100,7 |
assigned_addresses = prop->size / sizeof(ofw_pci_reg_t); |
assigned_address = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < assigned_addresses; i++) { |
if ((assigned_address[i].space & PCI_REG_MASK) == (reg->space & PCI_REG_MASK)) { |
/branches/network/kernel/genarch/src/ofw/sbus.c |
---|
61,7 → 61,7 |
ranges = prop->size / sizeof(ofw_sbus_range_t); |
range = prop->value; |
int i; |
unsigned int i; |
for (i = 0; i < ranges; i++) { |
if (overlaps(reg->addr, reg->size, range[i].child_base, |
/branches/network/kernel/Makefile |
---|
43,11 → 43,11 |
-DKERNEL |
GCC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \ |
-fno-builtin -fomit-frame-pointer -Wall -Wmissing-prototypes -Werror \ |
-fno-builtin -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes -Werror \ |
-nostdlib -nostdinc |
ICC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \ |
-fno-builtin -fomit-frame-pointer -Wall -Wmissing-prototypes -Werror \ |
-fno-builtin -Wall -Wmissing-prototypes -Werror \ |
-nostdlib -nostdinc \ |
-wd170 |
90,6 → 90,10 |
DEFS += -DCONFIG_DEBUG |
endif |
ifeq ($(CONFIG_EDEBUG),y) |
DEFS += -DCONFIG_EDEBUG |
endif |
ifeq ($(CONFIG_DEBUG_SPINLOCK),y) |
DEFS += -DCONFIG_DEBUG_SPINLOCK |
endif |
217,7 → 221,6 |
generic/src/console/chardev.c \ |
generic/src/console/console.c \ |
generic/src/console/kconsole.c \ |
generic/src/console/klog.c \ |
generic/src/console/cmd.c \ |
generic/src/cpu/cpu.c \ |
generic/src/ddi/ddi.c \ |
229,10 → 232,12 |
generic/src/main/uinit.c \ |
generic/src/main/version.c \ |
generic/src/main/shutdown.c \ |
generic/src/proc/program.c \ |
generic/src/proc/scheduler.c \ |
generic/src/proc/thread.c \ |
generic/src/proc/task.c \ |
generic/src/proc/the.c \ |
generic/src/proc/tasklet.c \ |
generic/src/syscall/syscall.c \ |
generic/src/syscall/copy.c \ |
generic/src/mm/buddy.c \ |
266,6 → 271,7 |
generic/src/synch/rwlock.c \ |
generic/src/synch/mutex.c \ |
generic/src/synch/semaphore.c \ |
generic/src/synch/smc.c \ |
generic/src/synch/waitq.c \ |
generic/src/synch/futex.c \ |
generic/src/smp/ipi.c \ |
311,16 → 317,6 |
test/sysinfo/sysinfo1.c |
endif |
## Experimental features |
# |
ifeq ($(CONFIG_EXPERIMENTAL),y) |
GENERIC_SOURCES += generic/src/lib/objc_ext.c \ |
generic/src/lib/objc.c |
EXTRA_OBJECTS = $(LIBDIR)/libobjc.a |
EXTRA_FLAGS += -x objective-c |
endif |
GENERIC_OBJECTS := $(addsuffix .o,$(basename $(GENERIC_SOURCES))) |
ARCH_OBJECTS := $(addsuffix .o,$(basename $(ARCH_SOURCES))) |
GENARCH_OBJECTS := $(addsuffix .o,$(basename $(GENARCH_SOURCES))) |
389,5 → 385,15 |
%.o: %.s |
$(AS) $(AFLAGS) $< -o $@ |
# |
# The FPU tests are the only objects for which we allow the compiler to generate |
# FPU instructions. |
# |
test/fpu/%.o: test/fpu/%.c |
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@ |
# |
# Ordinary objects. |
# |
%.o: %.c |
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@ |
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) $(FPU_NO_CFLAGS) -c $< -o $@ |
/branches/network/kernel/test/mm/slab2.c |
---|
68,8 → 68,8 |
slab_free(cache2, data2); |
break; |
} |
memsetb((uintptr_t) data1, ITEM_SIZE, 0); |
memsetb((uintptr_t) data2, ITEM_SIZE, 0); |
memsetb(data1, ITEM_SIZE, 0); |
memsetb(data2, ITEM_SIZE, 0); |
*((void **) data1) = olddata1; |
*((void **) data2) = olddata2; |
olddata1 = data1; |
100,7 → 100,7 |
printf("Incorrect memory size - use another test."); |
return; |
} |
memsetb((uintptr_t) data1, ITEM_SIZE, 0); |
memsetb(data1, ITEM_SIZE, 0); |
*((void **) data1) = olddata1; |
olddata1 = data1; |
} |
108,7 → 108,7 |
data1 = slab_alloc(cache1, FRAME_ATOMIC); |
if (!data1) |
break; |
memsetb((uintptr_t) data1, ITEM_SIZE, 0); |
memsetb(data1, ITEM_SIZE, 0); |
*((void **) data1) = olddata1; |
olddata1 = data1; |
} |
150,11 → 150,11 |
mutex_unlock(&starter_mutex); |
if (!sh_quiet) |
printf("Starting thread #%llu...\n",THREAD->tid); |
printf("Starting thread #%" PRIu64 "...\n", THREAD->tid); |
/* Alloc all */ |
if (!sh_quiet) |
printf("Thread #%llu allocating...\n", THREAD->tid); |
printf("Thread #%" PRIu64 " allocating...\n", THREAD->tid); |
while (1) { |
/* Call with atomic to detect end of memory */ |
166,7 → 166,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu releasing...\n", THREAD->tid); |
printf("Thread #%" PRIu64 " releasing...\n", THREAD->tid); |
while (data) { |
new = *((void **)data); |
176,7 → 176,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu allocating...\n", THREAD->tid); |
printf("Thread #%" PRIu64 " allocating...\n", THREAD->tid); |
while (1) { |
/* Call with atomic to detect end of memory */ |
188,7 → 188,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu releasing...\n", THREAD->tid); |
printf("Thread #%" PRIu64 " releasing...\n", THREAD->tid); |
while (data) { |
new = *((void **)data); |
198,7 → 198,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu finished\n", THREAD->tid); |
printf("Thread #%" PRIu64 " finished\n", THREAD->tid); |
slab_print_list(); |
semaphore_up(&thr_sem); |
216,7 → 216,7 |
printf("Running stress test with size %d\n", size); |
condvar_initialize(&thread_starter); |
mutex_initialize(&starter_mutex); |
mutex_initialize(&starter_mutex, MUTEX_PASSIVE); |
thr_cache = slab_cache_create("thread_cache", size, 0, NULL, NULL, 0); |
semaphore_initialize(&thr_sem,0); |
/branches/network/kernel/test/mm/slab1.c |
---|
53,7 → 53,7 |
for (i = 0; i < count; i++) { |
data[i] = slab_alloc(cache, 0); |
memsetb((uintptr_t) data[i], size, 0); |
memsetb(data[i], size, 0); |
} |
if (!quiet) { |
71,7 → 71,7 |
for (i = 0; i < count; i++) { |
data[i] = slab_alloc(cache, 0); |
memsetb((uintptr_t) data[i], size, 0); |
memsetb(data[i], size, 0); |
} |
if (!quiet) { |
89,7 → 89,7 |
for (i = count / 2; i < count; i++) { |
data[i] = slab_alloc(cache, 0); |
memsetb((uintptr_t) data[i], size, 0); |
memsetb(data[i], size, 0); |
} |
if (!quiet) { |
137,7 → 137,7 |
thread_detach(THREAD); |
if (!sh_quiet) |
printf("Starting thread #%llu...\n", THREAD->tid); |
printf("Starting thread #%" PRIu64 "...\n", THREAD->tid); |
for (j = 0; j < 10; j++) { |
for (i = 0; i < THR_MEM_COUNT; i++) |
151,7 → 151,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu finished\n", THREAD->tid); |
printf("Thread #%" PRIu64 " finished\n", THREAD->tid); |
semaphore_up(&thr_sem); |
} |
/branches/network/kernel/test/mm/falloc2.c |
---|
55,10 → 55,10 |
uint8_t val = THREAD->tid % THREADS; |
index_t k; |
uintptr_t * frames = (uintptr_t *) malloc(MAX_FRAMES * sizeof(uintptr_t), FRAME_ATOMIC); |
void **frames = (void **) malloc(MAX_FRAMES * sizeof(void *), FRAME_ATOMIC); |
if (frames == NULL) { |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Unable to allocate frames\n", THREAD->tid, CPU->id); |
printf("Thread #%" PRIu64 " (cpu%u): Unable to allocate frames\n", THREAD->tid, CPU->id); |
atomic_inc(&thread_fail); |
atomic_dec(&thread_count); |
return; |
69,11 → 69,11 |
for (run = 0; run < THREAD_RUNS; run++) { |
for (order = 0; order <= MAX_ORDER; order++) { |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order); |
printf("Thread #%" PRIu64 " (cpu%u): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order); |
allocated = 0; |
for (i = 0; i < (MAX_FRAMES >> order); i++) { |
frames[allocated] = (uintptr_t)frame_alloc(order, FRAME_ATOMIC | FRAME_KA); |
frames[allocated] = frame_alloc(order, FRAME_ATOMIC | FRAME_KA); |
if (frames[allocated]) { |
memsetb(frames[allocated], FRAME_SIZE << order, val); |
allocated++; |
82,16 → 82,16 |
} |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated); |
printf("Thread #%" PRIu64 " (cpu%u): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated); |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Deallocating ... \n", THREAD->tid, CPU->id); |
printf("Thread #%" PRIu64 " (cpu%u): Deallocating ... \n", THREAD->tid, CPU->id); |
for (i = 0; i < allocated; i++) { |
for (k = 0; k <= ((FRAME_SIZE << order) - 1); k++) { |
for (k = 0; k <= (((index_t) FRAME_SIZE << order) - 1); k++) { |
if (((uint8_t *) frames[i])[k] != val) { |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Unexpected data (%d) in block %p offset %#zx\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k); |
printf("Thread #%" PRIu64 " (cpu%u): Unexpected data (%c) in block %p offset %#" PRIi "\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k); |
atomic_inc(&thread_fail); |
goto cleanup; |
} |
100,7 → 100,7 |
} |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Finished run.\n", THREAD->tid, CPU->id); |
printf("Thread #%" PRIu64 " (cpu%u): Finished run.\n", THREAD->tid, CPU->id); |
} |
} |
108,7 → 108,7 |
free(frames); |
if (!sh_quiet) |
printf("Thread #%llu (cpu%d): Exiting\n", THREAD->tid, CPU->id); |
printf("Thread #%" PRIu64 " (cpu%u): Exiting\n", THREAD->tid, CPU->id); |
atomic_dec(&thread_count); |
} |
124,7 → 124,7 |
thread_t * thrd = thread_create(falloc, NULL, TASK, 0, "falloc", false); |
if (!thrd) { |
if (!quiet) |
printf("Could not create thread %d\n", i); |
printf("Could not create thread %u\n", i); |
break; |
} |
thread_ready(thrd); |
132,7 → 132,7 |
while (atomic_get(&thread_count) > 0) { |
if (!quiet) |
printf("Threads left: %d\n", atomic_get(&thread_count)); |
printf("Threads left: %ld\n", atomic_get(&thread_count)); |
thread_sleep(1); |
} |
/branches/network/kernel/test/avltree/avltree1.c |
---|
48,7 → 48,8 |
static int test_tree_balance(avltree_node_t *node); |
static avltree_node_t *test_tree_parents(avltree_node_t *node); |
static void print_tree_structure_flat (avltree_node_t *node, int level); |
static void print_tree_structure_flat (avltree_node_t *node, int level) |
__attribute__ ((used)); |
static avltree_node_t *alloc_avltree_node(void); |
static avltree_node_t *test_tree_parents(avltree_node_t *node) |
61,14 → 62,15 |
if (node->lft) { |
tmp = test_tree_parents(node->lft); |
if (tmp != node) { |
printf("Bad parent pointer key: %d, address: %p\n", |
tmp->key, node->lft); |
printf("Bad parent pointer key: %" PRIu64 |
", address: %p\n", tmp->key, node->lft); |
} |
} |
if (node->rgt) { |
tmp = test_tree_parents(node->rgt); |
if (tmp != node) { |
printf("Bad parent pointer key: %d, address: %p\n", |
printf("Bad parent pointer key: %" PRIu64 |
", address: %p\n", |
tmp->key,node->rgt); |
} |
} |
94,7 → 96,8 |
* Prints the structure of the node, which is level levels from the top of the |
* tree. |
*/ |
static void print_tree_structure_flat(avltree_node_t *node, int level) |
static void |
print_tree_structure_flat(avltree_node_t *node, int level) |
{ |
/* |
* You can set the maximum level as high as you like. |
109,7 → 112,7 |
if (node == NULL) |
return; |
printf("%d[%d]", node->key, node->balance); |
printf("%" PRIu64 "[%" PRIu8 "]", node->key, node->balance); |
if (node->lft != NULL || node->rgt != NULL) { |
printf("("); |
130,6 → 133,7 |
for (i = 0; i < NODE_COUNT - 1; i++) { |
avltree_nodes[i].par = &avltree_nodes[i + 1]; |
} |
avltree_nodes[i].par = NULL; |
/* |
* Node keys which will be used for insertion. Up to NODE_COUNT size of |
169,7 → 173,6 |
for (i = 21; i < NODE_COUNT; i++) |
avltree_nodes[i].key = i * 3; |
avltree_nodes[i].par = NULL; |
first_free_node = &avltree_nodes[0]; |
} |
191,7 → 194,7 |
avltree_create(tree); |
if (!quiet) |
printf("Inserting %d nodes...", node_count); |
printf("Inserting %" PRIc " nodes...", node_count); |
for (i = 0; i < node_count; i++) { |
newnode = alloc_avltree_node(); |
246,7 → 249,7 |
static void test_tree_delmin(avltree_t *tree, count_t node_count, bool quiet) |
{ |
int i = 0; |
unsigned int i = 0; |
if (!quiet) |
printf("Deleting minimum nodes..."); |
/branches/network/kernel/test/synch/rwlock3.c |
---|
45,14 → 45,14 |
thread_detach(THREAD); |
if (!sh_quiet) |
printf("cpu%d, tid %llu: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 ": trying to lock rwlock for reading....\n", CPU->id, THREAD->tid); |
rwlock_read_lock(&rwlock); |
rwlock_read_unlock(&rwlock); |
if (!sh_quiet) { |
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid); |
printf("cpu%d, tid %llu: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 ": success\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 ": trying to lock rwlock for writing....\n", CPU->id, THREAD->tid); |
} |
rwlock_write_lock(&rwlock); |
59,7 → 59,7 |
rwlock_write_unlock(&rwlock); |
if (!sh_quiet) |
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 ": success\n", CPU->id, THREAD->tid); |
atomic_dec(&thread_count); |
} |
88,7 → 88,7 |
while (atomic_get(&thread_count) > 0) { |
if (!quiet) |
printf("Threads left: %d\n", atomic_get(&thread_count)); |
printf("Threads left: %ld\n", atomic_get(&thread_count)); |
thread_sleep(1); |
} |
/branches/network/kernel/test/synch/rwlock4.c |
---|
74,18 → 74,18 |
to = random(40000); |
if (!sh_quiet) |
printf("cpu%d, tid %llu w+ (%d)\n", CPU->id, THREAD->tid, to); |
printf("cpu%u, tid %" PRIu64 " w+ (%d)\n", CPU->id, THREAD->tid, to); |
rc = rwlock_write_lock_timeout(&rwlock, to); |
if (SYNCH_FAILED(rc)) { |
if (!sh_quiet) |
printf("cpu%d, tid %llu w!\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " w!\n", CPU->id, THREAD->tid); |
atomic_dec(&thread_count); |
return; |
} |
if (!sh_quiet) |
printf("cpu%d, tid %llu w=\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " w=\n", CPU->id, THREAD->tid); |
if (rwlock.readers_in) { |
if (!sh_quiet) |
106,7 → 106,7 |
rwlock_write_unlock(&rwlock); |
if (!sh_quiet) |
printf("cpu%d, tid %llu w-\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " w-\n", CPU->id, THREAD->tid); |
atomic_dec(&thread_count); |
} |
119,24 → 119,24 |
to = random(2000); |
if (!sh_quiet) |
printf("cpu%d, tid %llu r+ (%d)\n", CPU->id, THREAD->tid, to); |
printf("cpu%u, tid %" PRIu64 " r+ (%d)\n", CPU->id, THREAD->tid, to); |
rc = rwlock_read_lock_timeout(&rwlock, to); |
if (SYNCH_FAILED(rc)) { |
if (!sh_quiet) |
printf("cpu%d, tid %llu r!\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " r!\n", CPU->id, THREAD->tid); |
atomic_dec(&thread_count); |
return; |
} |
if (!sh_quiet) |
printf("cpu%d, tid %llu r=\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " r=\n", CPU->id, THREAD->tid); |
thread_usleep(30000); |
rwlock_read_unlock(&rwlock); |
if (!sh_quiet) |
printf("cpu%d, tid %llu r-\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " r-\n", CPU->id, THREAD->tid); |
atomic_dec(&thread_count); |
} |
159,8 → 159,8 |
context_save(&ctx); |
if (!quiet) { |
printf("sp=%#x, readers_in=%d\n", ctx.sp, rwlock.readers_in); |
printf("Creating %d readers\n", rd); |
printf("sp=%#x, readers_in=%" PRIc "\n", ctx.sp, rwlock.readers_in); |
printf("Creating %" PRIu32 " readers\n", rd); |
} |
for (i = 0; i < rd; i++) { |
168,11 → 168,11 |
if (thrd) |
thread_ready(thrd); |
else if (!quiet) |
printf("Could not create reader %d\n", i); |
printf("Could not create reader %" PRIu32 "\n", i); |
} |
if (!quiet) |
printf("Creating %d writers\n", wr); |
printf("Creating %" PRIu32 " writers\n", wr); |
for (i = 0; i < wr; i++) { |
thrd = thread_create(writer, NULL, TASK, 0, "writer", false); |
179,7 → 179,7 |
if (thrd) |
thread_ready(thrd); |
else if (!quiet) |
printf("Could not create writer %d\n", i); |
printf("Could not create writer %" PRIu32 "\n", i); |
} |
thread_usleep(20000); |
187,7 → 187,7 |
while (atomic_get(&thread_count) > 0) { |
if (!quiet) |
printf("Threads left: %d\n", atomic_get(&thread_count)); |
printf("Threads left: %ld\n", atomic_get(&thread_count)); |
thread_sleep(1); |
} |
/branches/network/kernel/test/synch/semaphore2.c |
---|
67,18 → 67,18 |
waitq_sleep(&can_start); |
to = random(20000); |
printf("cpu%d, tid %llu down+ (%d)\n", CPU->id, THREAD->tid, to); |
printf("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to); |
rc = semaphore_down_timeout(&sem, to); |
if (SYNCH_FAILED(rc)) { |
printf("cpu%d, tid %llu down!\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid); |
return; |
} |
printf("cpu%d, tid %llu down=\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " down=\n", CPU->id, THREAD->tid); |
thread_usleep(random(30000)); |
semaphore_up(&sem); |
printf("cpu%d, tid %llu up\n", CPU->id, THREAD->tid); |
printf("cpu%u, tid %" PRIu64 " up\n", CPU->id, THREAD->tid); |
} |
char * test_semaphore2(bool quiet) |
91,7 → 91,7 |
thread_t *thrd; |
k = random(7) + 1; |
printf("Creating %d consumers\n", k); |
printf("Creating %" PRIu32 " consumers\n", k); |
for (i = 0; i < k; i++) { |
thrd = thread_create(consumer, NULL, TASK, 0, "consumer", false); |
if (thrd) |
/branches/network/kernel/test/synch/rwlock5.c |
---|
69,7 → 69,7 |
char * test_rwlock5(bool quiet) |
{ |
int i, j, k; |
count_t readers, writers; |
long readers, writers; |
waitq_initialize(&can_start); |
rwlock_initialize(&rwlock); |
108,7 → 108,7 |
waitq_wakeup(&can_start, WAKEUP_ALL); |
while ((items_read.count != readers) || (items_written.count != writers)) { |
printf("%zd readers remaining, %zd writers remaining, readers_in=%zd\n", readers - items_read.count, writers - items_written.count, rwlock.readers_in); |
printf("%d readers remaining, %d writers remaining, readers_in=%d\n", readers - items_read.count, writers - items_written.count, rwlock.readers_in); |
thread_usleep(100000); |
} |
} |
/branches/network/kernel/test/test.h |
---|
36,12 → 36,13 |
#define KERN_TEST_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
typedef char * (* test_entry_t)(bool); |
typedef char *(*test_entry_t)(bool); |
typedef struct { |
char * name; |
char * desc; |
char *name; |
char *desc; |
test_entry_t entry; |
bool safe; |
} test_t; |
/branches/network/kernel/test/thread/thread1.c |
---|
48,7 → 48,7 |
while (atomic_get(&finish)) { |
if (!sh_quiet) |
printf("%llu ", THREAD->tid); |
printf("%" PRIu64 " ", THREAD->tid); |
thread_usleep(100000); |
} |
atomic_inc(&threads_finished); |
78,7 → 78,7 |
thread_sleep(10); |
atomic_set(&finish, 0); |
while (atomic_get(&threads_finished) < total) { |
while (atomic_get(&threads_finished) < ((long) total)) { |
if (!quiet) |
printf("Threads left: %d\n", total - atomic_get(&threads_finished)); |
thread_sleep(1); |
/branches/network/kernel/test/fpu/mips2.c |
---|
72,7 → 72,7 |
if (arg != after_arg) { |
if (!sh_quiet) |
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
printf("General reg tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
atomic_inc(&threads_fault); |
break; |
} |
104,7 → 104,7 |
if (arg != after_arg) { |
if (!sh_quiet) |
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
printf("General reg tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
atomic_inc(&threads_fault); |
break; |
} |
123,7 → 123,7 |
atomic_set(&threads_fault, 0); |
if (!quiet) |
printf("Creating %d threads... ", 2 * THREADS); |
printf("Creating %u threads... ", 2 * THREADS); |
for (i = 0; i < THREADS; i++) { |
thread_t *t; |
130,7 → 130,7 |
if (!(t = thread_create(testit1, (void *) ((unative_t) 2 * i), TASK, 0, "testit1", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i); |
printf("could not create thread %u\n", 2 * i); |
break; |
} |
thread_ready(t); |
138,7 → 138,7 |
if (!(t = thread_create(testit2, (void *) ((unative_t) 2 * i + 1), TASK, 0, "testit2", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i + 1); |
printf("could not create thread %u\n", 2 * i + 1); |
break; |
} |
thread_ready(t); |
151,7 → 151,7 |
thread_sleep(1); |
waitq_wakeup(&can_start, WAKEUP_ALL); |
while (atomic_get(&threads_ok) != total) { |
while (atomic_get(&threads_ok) != (long) total) { |
if (!quiet) |
printf("Threads left: %d\n", total - atomic_get(&threads_ok)); |
thread_sleep(1); |
/branches/network/kernel/test/fpu/fpu1.c |
---|
126,7 → 126,7 |
if ((int) (100000000 * e) != E_10e8) { |
if (!sh_quiet) |
printf("tid%llu: e*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8); |
printf("tid%" PRIu64 ": e*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8); |
atomic_inc(&threads_fault); |
break; |
} |
161,7 → 161,7 |
#ifdef KERN_ia64_ARCH_H_ |
if ((int) (1000000 * pi) != PI_10e8) { |
if (!sh_quiet) |
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100)); |
printf("tid%" PRIu64 ": pi*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100)); |
atomic_inc(&threads_fault); |
break; |
} |
168,7 → 168,7 |
#else |
if ((int) (100000000 * pi) != PI_10e8) { |
if (!sh_quiet) |
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8); |
printf("tid%" PRIu64 ": pi*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8); |
atomic_inc(&threads_fault); |
break; |
} |
187,7 → 187,7 |
atomic_set(&threads_fault, 0); |
if (!quiet) |
printf("Creating %d threads... ", 2 * THREADS); |
printf("Creating %u threads... ", 2 * THREADS); |
for (i = 0; i < THREADS; i++) { |
thread_t *t; |
194,7 → 194,7 |
if (!(t = thread_create(e, NULL, TASK, 0, "e", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i); |
printf("could not create thread %u\n", 2 * i); |
break; |
} |
thread_ready(t); |
202,7 → 202,7 |
if (!(t = thread_create(pi, NULL, TASK, 0, "pi", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i + 1); |
printf("could not create thread %u\n", 2 * i + 1); |
break; |
} |
thread_ready(t); |
215,7 → 215,7 |
thread_sleep(1); |
waitq_wakeup(&can_start, WAKEUP_ALL); |
while (atomic_get(&threads_ok) != total) { |
while (atomic_get(&threads_ok) != (long) total) { |
if (!quiet) |
printf("Threads left: %d\n", total - atomic_get(&threads_ok)); |
thread_sleep(1); |
/branches/network/kernel/test/fpu/sse1.c |
---|
72,7 → 72,7 |
if (arg != after_arg) { |
if (!sh_quiet) |
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
printf("tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
atomic_inc(&threads_fault); |
break; |
} |
104,7 → 104,7 |
if (arg != after_arg) { |
if (!sh_quiet) |
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
printf("tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg); |
atomic_inc(&threads_fault); |
break; |
} |
123,7 → 123,7 |
atomic_set(&threads_fault, 0); |
if (!quiet) |
printf("Creating %d threads... ", 2 * THREADS); |
printf("Creating %u threads... ", 2 * THREADS); |
for (i = 0; i < THREADS; i++) { |
thread_t *t; |
130,7 → 130,7 |
if (!(t = thread_create(testit1, (void *) ((unative_t) 2 * i), TASK, 0, "testit1", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i); |
printf("could not create thread %u\n", 2 * i); |
break; |
} |
thread_ready(t); |
138,7 → 138,7 |
if (!(t = thread_create(testit2, (void *) ((unative_t) 2 * i + 1), TASK, 0, "testit2", false))) { |
if (!quiet) |
printf("could not create thread %d\n", 2 * i + 1); |
printf("could not create thread %u\n", 2 * i + 1); |
break; |
} |
thread_ready(t); |
151,7 → 151,7 |
thread_sleep(1); |
waitq_wakeup(&can_start, WAKEUP_ALL); |
while (atomic_get(&threads_ok) != total) { |
while (atomic_get(&threads_ok) != (long) total) { |
if (!quiet) |
printf("Threads left: %d\n", total - atomic_get(&threads_ok)); |
thread_sleep(1); |
/branches/network/kernel/test/print/print1.c |
---|
44,12 → 44,12 |
printf(" text 8.10s %8.10s \n", "text"); |
printf(" very long text 8.10s %8.10s \n", "very long text"); |
printf(" char: c '%c', 3.2c '%3.2c', -3.2c '%-3.2c', 2.3c '%2.3c', -2.3c '%-2.3c' \n",'a', 'b', 'c', 'd', 'e' ); |
printf(" int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n",1, 1, 1, 1, 1 ); |
printf(" -int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n",-1, -1, -1, -1, -1 ); |
printf(" 0xint: x '%#x', 5.3x '%#5.3x', -5.3x '%#-5.3x', 3.5x '%#3.5x', -3.5x '%#-3.5x' \n",17, 17, 17, 17, 17 ); |
printf(" char: c '%c', 3.2c '%3.2c', -3.2c '%-3.2c', 2.3c '%2.3c', -2.3c '%-2.3c' \n", 'a', 'b', 'c', 'd', 'e'); |
printf(" int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n", 1, 1, 1, 1, 1); |
printf(" -int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n", -1, -1, -1, -1, -1); |
printf(" 0xint: x '%#x', 5.3x '%#5.3x', -5.3x '%#-5.3x', 3.5x '%#3.5x', -3.5x '%#-3.5x' \n", 17, 17, 17, 17, 17); |
printf("'%#llx' 64bit, '%#x' 32bit, '%#hhx' 8bit, '%#hx' 16bit, unative_t '%#zx'. '%#llx' 64bit and '%s' string.\n", 0x1234567887654321ll, 0x12345678, 0x12, 0x1234, nat, 0x1234567887654321ull, "Lovely string" ); |
printf("'%#llx' 64bit, '%#x' 32bit, '%#hhx' 8bit, '%#hx' 16bit, unative_t '%#" PRIxn "'. '%#llx' 64bit and '%s' string.\n", 0x1234567887654321ll, 0x12345678, 0x12, 0x1234, nat, 0x1234567887654321ull, "Lovely string" ); |
printf(" Print to NULL '%s'\n", NULL); |
/branches/network/kernel/test/test.c |
---|
58,7 → 58,11 |
#include <print/print1.def> |
#include <thread/thread1.def> |
#include <sysinfo/sysinfo1.def> |
{NULL, NULL, NULL} |
{ |
.name = NULL, |
.desc = NULL, |
.entry = NULL |
} |
}; |
/** @} |