Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4489 → Rev 4490

/trunk/kernel/test/avltree/avltree1.c
194,7 → 194,7
return node;
}
 
static void test_tree_insert(avltree_t *tree, count_t node_count)
static void test_tree_insert(avltree_t *tree, size_t node_count)
{
unsigned int i;
avltree_node_t *newnode;
201,7 → 201,7
avltree_create(tree);
TPRINTF("Inserting %" PRIc " nodes...", node_count);
TPRINTF("Inserting %" PRIs " nodes...", node_count);
for (i = 0; i < node_count; i++) {
newnode = alloc_avltree_node();
214,7 → 214,7
TPRINTF("done.\n");
}
 
static void test_tree_delete(avltree_t *tree, count_t node_count,
static void test_tree_delete(avltree_t *tree, size_t node_count,
int node_position)
{
avltree_node_t *delnode;
245,7 → 245,7
TPRINTF("done.\n");
}
 
static void test_tree_delmin(avltree_t *tree, count_t node_count)
static void test_tree_delmin(avltree_t *tree, size_t node_count)
{
unsigned int i = 0;
/trunk/kernel/test/synch/rwlock4.c
148,7 → 148,7
thread_t *thrd;
context_save(&ctx);
TPRINTF("sp=%#x, readers_in=%" PRIc "\n", ctx.sp, rwlock.readers_in);
TPRINTF("sp=%#x, readers_in=%" PRIs "\n", ctx.sp, rwlock.readers_in);
TPRINTF("Creating %" PRIu32 " readers\n", rd);
for (i = 0; i < rd; i++) {
/trunk/kernel/test/mm/falloc2.c
52,7 → 52,7
{
int order, run, allocated, i;
uint8_t val = THREAD->tid % THREADS;
index_t k;
size_t k;
void **frames = (void **) malloc(MAX_FRAMES * sizeof(void *), FRAME_ATOMIC);
if (frames == NULL) {
82,9 → 82,9
TPRINTF("Thread #%" PRIu64 " (cpu%u): Deallocating ... \n", THREAD->tid, CPU->id);
for (i = 0; i < allocated; i++) {
for (k = 0; k <= (((index_t) FRAME_SIZE << order) - 1); k++) {
for (k = 0; k <= (((size_t) FRAME_SIZE << order) - 1); k++) {
if (((uint8_t *) frames[i])[k] != val) {
TPRINTF("Thread #%" PRIu64 " (cpu%u): Unexpected data (%c) in block %p offset %#" PRIi "\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
TPRINTF("Thread #%" PRIu64 " (cpu%u): Unexpected data (%c) in block %p offset %#" PRIs "\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
atomic_inc(&thread_fail);
goto cleanup;
}
/trunk/kernel/test/mm/purge1.c
37,7 → 37,7
#include <debug.h>
 
extern void tlb_invalidate_all(void);
extern void tlb_invalidate_pages(asid_t asid, uintptr_t va, count_t cnt);
extern void tlb_invalidate_pages(asid_t asid, uintptr_t va, size_t cnt);
 
char *test_purge1(void)
{
/trunk/kernel/genarch/src/mm/asid.c
65,7 → 65,7
#include <adt/list.h>
#include <debug.h>
 
static count_t asids_allocated = 0;
static size_t asids_allocated = 0;
 
/** Allocate free address space identifier.
*
120,7 → 120,7
* of TLB entries (e.g. TSB on sparc64), the
* cache must be invalidated as well.
*/
as_invalidate_translation_cache(as, 0, (count_t) -1);
as_invalidate_translation_cache(as, 0, (size_t) -1);
/*
* Get the system rid of the stolen ASID.
/trunk/kernel/genarch/src/mm/page_ht.c
51,8 → 51,8
#include <adt/hash_table.h>
#include <align.h>
 
static index_t hash(unative_t key[]);
static bool compare(unative_t key[], count_t keys, link_t *item);
static size_t hash(unative_t key[]);
static bool compare(unative_t key[], size_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
93,11 → 93,11
*
* @return Index into page hash table.
*/
index_t hash(unative_t key[])
size_t hash(unative_t key[])
{
as_t *as = (as_t *) key[KEY_AS];
uintptr_t page = (uintptr_t) key[KEY_PAGE];
index_t index;
size_t index;
/*
* Virtual page addresses have roughly the same probability
124,7 → 124,7
*
* @return true on match, false otherwise.
*/
bool compare(unative_t key[], count_t keys, link_t *item)
bool compare(unative_t key[], size_t keys, link_t *item)
{
pte_t *t;
 
/trunk/kernel/genarch/src/ofw/ebus.c
49,7 → 49,7
{
ofw_tree_property_t *prop;
ofw_ebus_range_t *range;
count_t ranges;
size_t ranges;
 
prop = ofw_tree_getprop(node, "ranges");
if (!prop)
91,7 → 91,7
return false;
 
ofw_ebus_intr_map_t *intr_map = prop->value;
count_t count = prop->size / sizeof(ofw_ebus_intr_map_t);
size_t count = prop->size / sizeof(ofw_ebus_intr_map_t);
ASSERT(count);
/trunk/kernel/genarch/src/ofw/fhc.c
46,7 → 46,7
{
ofw_tree_property_t *prop;
ofw_fhc_range_t *range;
count_t ranges;
size_t ranges;
 
prop = ofw_tree_getprop(node, "ranges");
if (!prop)
88,7 → 88,7
ofw_tree_property_t *prop;
ofw_central_range_t *range;
count_t ranges;
size_t ranges;
prop = ofw_tree_getprop(node, "ranges");
if (!prop)
/trunk/kernel/genarch/src/ofw/ofw_tree.c
247,7 → 247,8
{
char buf[NAME_BUF_LEN + 1];
ofw_tree_node_t *node = ofw_root;
index_t i, j;
size_t i;
size_t j;
if (path[0] != '/')
return NULL;
/trunk/kernel/genarch/src/ofw/pci.c
54,7 → 54,7
{
ofw_tree_property_t *prop;
ofw_pci_range_t *range;
count_t ranges;
size_t ranges;
 
prop = ofw_tree_getprop(node, "ranges");
if (!prop) {
97,7 → 97,7
ofw_tree_property_t *prop;
ofw_pci_reg_t *assigned_address;
count_t assigned_addresses;
size_t assigned_addresses;
prop = ofw_tree_getprop(node, "assigned-addresses");
if (!prop)
/trunk/kernel/genarch/src/ofw/sbus.c
43,7 → 43,7
{
ofw_tree_property_t *prop;
ofw_sbus_range_t *range;
count_t ranges;
size_t ranges;
/*
* The SBUS support is very rudimentary in that we simply assume
/trunk/kernel/genarch/src/acpi/madt.c
62,11 → 62,11
struct madt_l_apic *madt_l_apic_entries = NULL;
struct madt_io_apic *madt_io_apic_entries = NULL;
 
index_t madt_l_apic_entry_index = 0;
index_t madt_io_apic_entry_index = 0;
count_t madt_l_apic_entry_cnt = 0;
count_t madt_io_apic_entry_cnt = 0;
count_t cpu_count = 0;
size_t madt_l_apic_entry_index = 0;
size_t madt_io_apic_entry_index = 0;
size_t madt_l_apic_entry_cnt = 0;
size_t madt_io_apic_entry_cnt = 0;
size_t cpu_count = 0;
 
struct madt_apic_header * * madt_entries_index = NULL;
unsigned int madt_entries_index_cnt = 0;
86,10 → 86,10
/*
* ACPI MADT Implementation of SMP configuration interface.
*/
static count_t madt_cpu_count(void);
static bool madt_cpu_enabled(index_t i);
static bool madt_cpu_bootstrap(index_t i);
static uint8_t madt_cpu_apic_id(index_t i);
static size_t madt_cpu_count(void);
static bool madt_cpu_enabled(size_t i);
static bool madt_cpu_bootstrap(size_t i);
static uint8_t madt_cpu_apic_id(size_t i);
static int madt_irq_to_pin(unsigned int irq);
 
struct smp_config_operations madt_config_operations = {
100,12 → 100,12
.irq_to_pin = madt_irq_to_pin
};
 
count_t madt_cpu_count(void)
size_t madt_cpu_count(void)
{
return madt_l_apic_entry_cnt;
}
 
bool madt_cpu_enabled(index_t i)
bool madt_cpu_enabled(size_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->flags & 0x1;
112,13 → 112,13
 
}
 
bool madt_cpu_bootstrap(index_t i)
bool madt_cpu_bootstrap(size_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id == l_apic_id();
}
 
uint8_t madt_cpu_apic_id(index_t i)
uint8_t madt_cpu_apic_id(size_t i)
{
ASSERT(i < madt_l_apic_entry_cnt);
return ((struct madt_l_apic *) madt_entries_index[madt_l_apic_entry_index + i])->apic_id;
/trunk/kernel/generic/include/symtab.h
48,7 → 48,7
extern char *symtab_fmt_name_lookup(unative_t addr);
extern int symtab_addr_lookup(const char *name, uintptr_t *addr);
extern void symtab_print_search(const char *name);
extern int symtab_compl(char *input, count_t size);
extern int symtab_compl(char *input, size_t size);
 
#ifdef CONFIG_SYMTAB
 
/trunk/kernel/generic/include/config.h
50,7 → 50,7
} init_task_t;
 
typedef struct {
count_t cnt;
size_t cnt;
init_task_t tasks[CONFIG_INIT_TASKS];
} init_t;
 
65,8 → 65,8
} ballocs_t;
 
typedef struct {
count_t cpu_count; /**< Number of processors detected. */
volatile count_t cpu_active; /**< Number of processors that are up and running. */
size_t cpu_count; /**< Number of processors detected. */
volatile size_t cpu_active; /**< Number of processors that are up and running. */
uintptr_t base;
size_t kernel_size; /**< Size of memory in bytes taken by kernel and stack */
/trunk/kernel/generic/include/string.h
71,20 → 71,20
extern size_t str_size(const char *str);
extern size_t wstr_size(const wchar_t *str);
 
extern size_t str_lsize(const char *str, count_t max_len);
extern size_t wstr_lsize(const wchar_t *str, count_t max_len);
extern size_t str_lsize(const char *str, size_t max_len);
extern size_t wstr_lsize(const wchar_t *str, size_t max_len);
 
extern count_t str_length(const char *str);
extern count_t wstr_length(const wchar_t *wstr);
extern size_t str_length(const char *str);
extern size_t wstr_length(const wchar_t *wstr);
 
extern count_t str_nlength(const char *str, size_t size);
extern count_t wstr_nlength(const wchar_t *str, size_t size);
extern size_t str_nlength(const char *str, size_t size);
extern size_t wstr_nlength(const wchar_t *str, size_t size);
 
extern bool ascii_check(wchar_t ch);
extern bool chr_check(wchar_t ch);
 
extern int str_cmp(const char *s1, const char *s2);
extern int str_lcmp(const char *s1, const char *s2, count_t max_len);
extern int str_lcmp(const char *s1, const char *s2, size_t max_len);
 
extern void str_cpy(char *dest, size_t size, const char *src);
extern void str_ncpy(char *dest, size_t size, const char *src, size_t n);
92,8 → 92,8
 
extern const char *str_chr(const char *str, wchar_t ch);
 
extern bool wstr_linsert(wchar_t *str, wchar_t ch, count_t pos, count_t max_pos);
extern bool wstr_remove(wchar_t *str, count_t pos);
extern bool wstr_linsert(wchar_t *str, wchar_t ch, size_t pos, size_t max_pos);
extern bool wstr_remove(wchar_t *str, size_t pos);
 
#endif
 
/trunk/kernel/generic/include/proc/scheduler.h
47,7 → 47,7
typedef struct {
SPINLOCK_DECLARE(lock);
link_t rq_head; /**< List of ready threads. */
count_t n; /**< Number of threads in rq_ready. */
size_t n; /**< Number of threads in rq_ready. */
} runq_t;
 
extern atomic_t nrdy;
/trunk/kernel/generic/include/cpu.h
51,18 → 51,18
SPINLOCK_DECLARE(lock);
 
tlb_shootdown_msg_t tlb_messages[TLB_MESSAGE_QUEUE_LEN];
count_t tlb_messages_count;
size_t tlb_messages_count;
context_t saved_context;
 
atomic_t nrdy;
runq_t rq[RQ_COUNT];
volatile count_t needs_relink;
volatile size_t needs_relink;
 
SPINLOCK_DECLARE(timeoutlock);
link_t timeout_active_head;
 
count_t missed_clock_ticks; /**< When system clock loses a tick, it is recorded here
size_t missed_clock_ticks; /**< When system clock loses a tick, it is recorded here
so that clock() can react. This variable is
CPU-local and can be only accessed when interrupts
are disabled. */
/trunk/kernel/generic/include/synch/futex.h
49,7 → 49,7
/** Futex hash table link. */
link_t ht_link;
/** Number of tasks that reference this futex. */
count_t refcount;
size_t refcount;
} futex_t;
 
extern void futex_init(void);
/trunk/kernel/generic/include/synch/rwlock.h
53,7 → 53,7
*/
mutex_t exclusive;
/** Number of readers in critical section. */
count_t readers_in;
size_t readers_in;
} rwlock_t;
 
#define rwlock_write_lock(rwl) \
/trunk/kernel/generic/include/synch/spinlock.h
107,7 → 107,7
extern int printf(const char *, ...);
 
#define DEADLOCK_THRESHOLD 100000000
#define DEADLOCK_PROBE_INIT(pname) count_t pname = 0
#define DEADLOCK_PROBE_INIT(pname) size_t pname = 0
#define DEADLOCK_PROBE(pname, value) \
if ((pname)++ > (value)) { \
(pname) = 0; \
/trunk/kernel/generic/include/ddi/irq.h
104,7 → 104,7
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
size_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
162,7 → 162,7
SPINLOCK_EXTERN(irq_uspace_hash_table_lock);
extern hash_table_t irq_uspace_hash_table;
 
extern void irq_init(count_t, count_t);
extern void irq_init(size_t, size_t);
extern void irq_initialize(irq_t *);
extern void irq_register(irq_t *);
extern irq_t *irq_dispatch_and_lock(inr_t);
/trunk/kernel/generic/include/console/chardev.h
57,11 → 57,11
/** Protects everything below. */
SPINLOCK_DECLARE(lock);
wchar_t buffer[INDEV_BUFLEN];
count_t counter;
size_t counter;
/** Implementation of indev operations. */
indev_operations_t *op;
index_t index;
size_t index;
void *data;
} indev_t;
 
/trunk/kernel/generic/include/console/kconsole.h
77,7 → 77,7
/** Function implementing the command. */
int (* func)(cmd_arg_t *);
/** Number of arguments. */
count_t argc;
size_t argc;
/** Argument vector. */
cmd_arg_t *argv;
/** Function for printing detailed help. */
/trunk/kernel/generic/include/console/console.h
49,7 → 49,7
extern void klog_update(void);
 
extern wchar_t getc(indev_t *indev);
extern count_t gets(indev_t *indev, char *buf, size_t buflen);
extern size_t gets(indev_t *indev, char *buf, size_t buflen);
extern unative_t sys_klog(int fd, const void *buf, size_t size);
 
extern void grab_console(void);
/trunk/kernel/generic/include/arch.h
56,7 → 56,7
* the base address of the stack.
*/
typedef struct {
count_t preemption_disabled; /**< Preemption disabled counter. */
size_t preemption_disabled; /**< Preemption disabled counter. */
thread_t *thread; /**< Current thread. */
task_t *task; /**< Current task. */
cpu_t *cpu; /**< Executing cpu. */
/trunk/kernel/generic/include/adt/hash_table.h
47,7 → 47,7
*
* @return Index into hash table.
*/
index_t (* hash)(unative_t key[]);
size_t (* hash)(unative_t key[]);
/** Hash table item comparison function.
*
56,7 → 56,7
*
* @return true if the keys match, false otherwise.
*/
bool (*compare)(unative_t key[], count_t keys, link_t *item);
bool (*compare)(unative_t key[], size_t keys, link_t *item);
 
/** Hash table item removal callback.
*
68,8 → 68,8
/** Hash table structure. */
typedef struct {
link_t *entry;
count_t entries;
count_t max_keys;
size_t entries;
size_t max_keys;
hash_table_operations_t *op;
} hash_table_t;
 
76,11 → 76,11
#define hash_table_get_instance(item, type, member) \
list_get_instance((item), type, member)
 
extern void hash_table_create(hash_table_t *h, count_t m, count_t max_keys,
extern void hash_table_create(hash_table_t *h, size_t m, size_t max_keys,
hash_table_operations_t *op);
extern void hash_table_insert(hash_table_t *h, unative_t key[], link_t *item);
extern link_t *hash_table_find(hash_table_t *h, unative_t key[]);
extern void hash_table_remove(hash_table_t *h, unative_t key[], count_t keys);
extern void hash_table_remove(hash_table_t *h, unative_t key[], size_t keys);
 
#endif
 
/trunk/kernel/generic/include/adt/bitmap.h
41,18 → 41,19
 
typedef struct {
uint8_t *map;
count_t bits;
size_t bits;
} bitmap_t;
 
extern void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, count_t bits);
extern void bitmap_set_range(bitmap_t *bitmap, index_t start, count_t bits);
extern void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits);
extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits);
extern void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits);
extern void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits);
extern void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits);
extern void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits);
 
static inline int bitmap_get(bitmap_t *bitmap,index_t bit)
static inline int bitmap_get(bitmap_t *bitmap, size_t bit)
{
if(bit >= bitmap->bits)
return 0;
return !! ((bitmap->map)[bit/8] & (1 << (bit & 7)));
}
 
/trunk/kernel/generic/include/adt/btree.h
46,7 → 46,7
/** B-tree node structure. */
typedef struct btree_node {
/** Number of keys. */
count_t keys;
size_t keys;
 
/**
* Keys. We currently support only single keys. Additional room for one
/trunk/kernel/generic/include/adt/fifo.h
59,9 → 59,9
#define FIFO_INITIALIZE_STATIC(name, t, itms) \
struct { \
t fifo[(itms)]; \
count_t items; \
index_t head; \
index_t tail; \
size_t items; \
size_t head; \
size_t tail; \
} name = { \
.items = (itms), \
.head = 0, \
80,9 → 80,9
#define FIFO_INITIALIZE_DYNAMIC(name, t, itms) \
struct { \
t *fifo; \
count_t items; \
index_t head; \
index_t tail; \
size_t items; \
size_t head; \
size_t tail; \
} name = { \
.fifo = NULL, \
.items = (itms), \
/trunk/kernel/generic/include/mm/frame.h
80,7 → 80,7
#define FRAME_TO_ZONE_FLAGS(frame_flags) 0
 
typedef struct {
count_t refcount; /**< Tracking of shared frames */
size_t refcount; /**< Tracking of shared frames */
uint8_t buddy_order; /**< Buddy system block order */
link_t buddy_link; /**< Link to the next free block inside
one order */
90,10 → 90,10
typedef struct {
pfn_t base; /**< Frame_no of the first frame
in the frames array */
count_t count; /**< Size of zone */
count_t free_count; /**< Number of free frame_t
size_t count; /**< Size of zone */
size_t free_count; /**< Number of free frame_t
structures */
count_t busy_count; /**< Number of busy frame_t
size_t busy_count; /**< Number of busy frame_t
structures */
zone_flags_t flags; /**< Type of the zone */
108,7 → 108,7
*/
typedef struct {
SPINLOCK_DECLARE(lock);
count_t count;
size_t count;
zone_t info[ZONES_MAX];
} zones_t;
 
124,14 → 124,14
return (pfn_t) (addr >> FRAME_WIDTH);
}
 
static inline count_t SIZE2FRAMES(size_t size)
static inline size_t SIZE2FRAMES(size_t size)
{
if (!size)
return 0;
return (count_t) ((size - 1) >> FRAME_WIDTH) + 1;
return (size_t) ((size - 1) >> FRAME_WIDTH) + 1;
}
 
static inline size_t FRAMES2SIZE(count_t frames)
static inline size_t FRAMES2SIZE(size_t frames)
{
return (size_t) (frames << FRAME_WIDTH);
}
156,17 → 156,17
frame_alloc_generic(order, flags, NULL)
 
extern void frame_init(void);
extern void *frame_alloc_generic(uint8_t, frame_flags_t, count_t *);
extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *);
extern void frame_free(uintptr_t);
extern void frame_reference_add(pfn_t);
 
extern count_t find_zone(pfn_t frame, count_t count, count_t hint);
extern count_t zone_create(pfn_t, count_t, pfn_t, zone_flags_t);
extern void *frame_get_parent(pfn_t, count_t);
extern void frame_set_parent(pfn_t, void *, count_t);
extern void frame_mark_unavailable(pfn_t, count_t);
extern uintptr_t zone_conf_size(count_t);
extern bool zone_merge(count_t, count_t);
extern size_t find_zone(pfn_t frame, size_t count, size_t hint);
extern size_t zone_create(pfn_t, size_t, pfn_t, zone_flags_t);
extern void *frame_get_parent(pfn_t, size_t);
extern void frame_set_parent(pfn_t, void *, size_t);
extern void frame_mark_unavailable(pfn_t, size_t);
extern uintptr_t zone_conf_size(size_t);
extern bool zone_merge(size_t, size_t);
extern void zone_merge_all(void);
extern uint64_t zone_total_size(void);
 
174,7 → 174,7
* Console functions
*/
extern void zone_print_list(void);
extern void zone_print_one(count_t);
extern void zone_print_one(size_t);
 
#endif
 
/trunk/kernel/generic/include/mm/slab.h
72,8 → 72,8
 
typedef struct {
link_t link;
count_t busy; /**< Count of full slots in magazine */
count_t size; /**< Number of slots in magazine */
size_t busy; /**< Count of full slots in magazine */
size_t size; /**< Number of slots in magazine */
void *objs[]; /**< Slots in magazine */
} slab_magazine_t;
 
128,7 → 128,7
 
extern void * slab_alloc(slab_cache_t *, int);
extern void slab_free(slab_cache_t *, void *);
extern count_t slab_reclaim(int);
extern size_t slab_reclaim(int);
 
/* slab subsytem initialization */
extern void slab_cache_init(void);
/trunk/kernel/generic/include/mm/tlb.h
61,7 → 61,7
tlb_invalidate_type_t type; /**< Message type. */
asid_t asid; /**< Address space identifier. */
uintptr_t page; /**< Page address. */
count_t count; /**< Number of pages to invalidate. */
size_t count; /**< Number of pages to invalidate. */
} tlb_shootdown_msg_t;
 
extern void tlb_init(void);
68,7 → 68,7
 
#ifdef CONFIG_SMP
extern void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count);
uintptr_t page, size_t count);
extern void tlb_shootdown_finalize(void);
extern void tlb_shootdown_ipi_recv(void);
#else
84,7 → 84,7
 
extern void tlb_invalidate_all(void);
extern void tlb_invalidate_asid(asid_t asid);
extern void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt);
extern void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt);
#endif
 
/** @}
/trunk/kernel/generic/include/mm/as.h
94,7 → 94,7
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
size_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
132,7 → 132,7
/** This lock must be acquired only when the as_area lock is held. */
mutex_t lock;
/** This structure can be deallocated if refcount drops to 0. */
count_t refcount;
size_t refcount;
/**
* B+tree containing complete map of anonymous pages of the shared area.
*/
156,7 → 156,7
};
struct { /**< phys_backend members */
uintptr_t base;
count_t frames;
size_t frames;
};
} mem_backend_data_t;
 
175,7 → 175,7
/** Attributes related to the address space area itself. */
int attributes;
/** Size of this area in multiples of PAGE_SIZE. */
count_t pages;
size_t pages;
/** Base address of this area. */
uintptr_t base;
/** Map of used space. */
225,8 → 225,8
extern int as_area_get_flags(as_area_t *area);
extern bool as_area_check_access(as_area_t *area, pf_access_t access);
extern size_t as_area_get_size(uintptr_t base);
extern int used_space_insert(as_area_t *a, uintptr_t page, count_t count);
extern int used_space_remove(as_area_t *a, uintptr_t page, count_t count);
extern int used_space_insert(as_area_t *a, uintptr_t page, size_t count);
extern int used_space_remove(as_area_t *a, uintptr_t page, size_t count);
 
 
/* Interface to be implemented by architectures. */
/trunk/kernel/generic/include/ipc/event.h
49,7 → 49,7
/** Method to be used for the notification. */
unative_t method;
/** Counter. */
count_t counter;
size_t counter;
} event_t;
 
extern void event_init(void);
/trunk/kernel/generic/include/sort.h
40,8 → 40,8
/*
* sorting routines
*/
extern void bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b));
extern void qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b));
extern void bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b));
extern void qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b));
 
/*
* default sorting comparators
/trunk/kernel/generic/src/main/kinit.c
127,7 → 127,7
}
if (config.cpu_count > 1) {
count_t i;
size_t i;
/*
* For each CPU, create its load balancing thread.
140,7 → 140,7
spinlock_unlock(&thread->lock);
thread_ready(thread);
} else
printf("Unable to create kcpulb thread for cpu" PRIc "\n", i);
printf("Unable to create kcpulb thread for cpu" PRIs "\n", i);
}
}
#endif /* CONFIG_SMP */
168,12 → 168,12
/*
* Create user tasks, load RAM disk images.
*/
count_t i;
size_t i;
program_t programs[CONFIG_INIT_TASKS];
for (i = 0; i < init.cnt; i++) {
if (init.tasks[i].addr % FRAME_SIZE) {
printf("init[%" PRIc "].addr is not frame aligned\n", i);
printf("init[%" PRIs "].addr is not frame aligned\n", i);
continue;
}
213,7 → 213,7
int rd = init_rd((rd_header_t *) init.tasks[i].addr, init.tasks[i].size);
if (rd != RE_OK)
printf("Init binary %" PRIc " not used (error %d)\n", i, rd);
printf("Init binary %" PRIs " not used (error %d)\n", i, rd);
}
}
/trunk/kernel/generic/src/main/main.c
153,7 → 153,7
config.stack_base = config.base + config.kernel_size;
/* Avoid placing stack on top of init */
count_t i;
size_t i;
for (i = 0; i < init.cnt; i++) {
if (PA_overlaps(config.stack_base, config.stack_size,
init.tasks[i].addr, init.tasks[i].size))
233,7 → 233,7
/* Slab must be initialized after we know the number of processors. */
LOG_EXEC(slab_enable_cpucache());
printf("Detected %" PRIc " CPU(s), %" PRIu64" MiB free memory\n",
printf("Detected %" PRIs " CPU(s), %" PRIu64" MiB free memory\n",
config.cpu_count, SIZE2MB(zone_total_size()));
LOG_EXEC(cpu_init());
247,9 → 247,9
LOG_EXEC(futex_init());
if (init.cnt > 0) {
count_t i;
size_t i;
for (i = 0; i < init.cnt; i++)
LOG("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc
LOG("init[%" PRIs "].addr=%#" PRIp ", init[%" PRIs
"].size=%#" PRIs, i, init.tasks[i].addr, i,
init.tasks[i].size);
} else
/trunk/kernel/generic/src/synch/spinlock.c
75,7 → 75,7
#ifdef CONFIG_DEBUG_SPINLOCK
void spinlock_lock_debug(spinlock_t *sl)
{
count_t i = 0;
size_t i = 0;
bool deadlock_reported = false;
 
preemption_disable();
/trunk/kernel/generic/src/synch/waitq.c
415,7 → 415,7
void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
{
thread_t *t;
count_t count = 0;
size_t count = 0;
 
loop:
if (list_empty(&wq->head)) {
/trunk/kernel/generic/src/synch/futex.c
59,8 → 59,8
static void futex_initialize(futex_t *futex);
 
static futex_t *futex_find(uintptr_t paddr);
static index_t futex_ht_hash(unative_t *key);
static bool futex_ht_compare(unative_t *key, count_t keys, link_t *item);
static size_t futex_ht_hash(unative_t *key);
static bool futex_ht_compare(unative_t *key, size_t keys, link_t *item);
static void futex_ht_remove_callback(link_t *item);
 
/**
288,9 → 288,9
*
* @return Index into futex hash table.
*/
index_t futex_ht_hash(unative_t *key)
size_t futex_ht_hash(unative_t *key)
{
return *key & (FUTEX_HT_SIZE-1);
return (*key & (FUTEX_HT_SIZE - 1));
}
 
/** Compare futex hash table item with a key.
300,7 → 300,7
*
* @return True if the item matches the key. False otherwise.
*/
bool futex_ht_compare(unative_t *key, count_t keys, link_t *item)
bool futex_ht_compare(unative_t *key, size_t keys, link_t *item)
{
futex_t *futex;
 
/trunk/kernel/generic/src/debug/symtab.c
55,7 → 55,7
int symtab_name_lookup(unative_t addr, char **name)
{
#ifdef CONFIG_SYMTAB
count_t i;
size_t i;
for (i = 1; symbol_table[i].address_le; i++) {
if (addr < uint64_t_le2host(symbol_table[i].address_le))
112,11 → 112,11
* @return Pointer to the part of string that should be completed or NULL.
*
*/
static const char *symtab_search_one(const char *name, count_t *startpos)
static const char *symtab_search_one(const char *name, size_t *startpos)
{
count_t namelen = str_length(name);
size_t namelen = str_length(name);
count_t pos;
size_t pos;
for (pos = *startpos; symbol_table[pos].address_le; pos++) {
const char *curname = symbol_table[pos].symbol_name;
153,8 → 153,8
int symtab_addr_lookup(const char *name, uintptr_t *addr)
{
#ifdef CONFIG_SYMTAB
count_t found = 0;
count_t pos = 0;
size_t found = 0;
size_t pos = 0;
const char *hint;
while ((hint = symtab_search_one(name, &pos))) {
182,7 → 182,7
void symtab_print_search(const char *name)
{
#ifdef CONFIG_SYMTAB
count_t pos = 0;
size_t pos = 0;
while (symtab_search_one(name, &pos)) {
uintptr_t addr = uint64_t_le2host(symbol_table[pos].address_le);
char *realname = symbol_table[pos].symbol_name;
203,7 → 203,7
* @return 0 - nothing found, 1 - success, >1 print duplicates
*
*/
int symtab_compl(char *input, count_t size)
int symtab_compl(char *input, size_t size)
{
#ifdef CONFIG_SYMTAB
const char *name = input;
216,8 → 216,8
if (str_length(name) == 0)
return 0;
count_t found = 0;
count_t pos = 0;
size_t found = 0;
size_t pos = 0;
const char *hint;
char output[MAX_SYMBOL_NAME];
/trunk/kernel/generic/src/time/clock.c
134,7 → 134,7
timeout_t *h;
timeout_handler_t f;
void *arg;
count_t missed_clock_ticks = CPU->missed_clock_ticks;
size_t missed_clock_ticks = CPU->missed_clock_ticks;
unsigned int i;
 
/*
/trunk/kernel/generic/src/ddi/ddi.c
97,7 → 97,7
* creating address space area.
*
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags)
{
ASSERT(TASK);
ASSERT((pf % FRAME_SIZE) == 0);
118,9 → 118,9
/* Find the zone of the physical memory */
spinlock_lock(&zones.lock);
count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
size_t znum = find_zone(ADDR2PFN(pf), pages, 0);
if (znum == (count_t) -1) {
if (znum == (size_t) -1) {
/* Frames not found in any zones
* -> assume it is hardware device and allow mapping
*/
242,7 → 242,7
{
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
(count_t) pages, (int) flags);
(size_t) pages, (int) flags);
}
 
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
/trunk/kernel/generic/src/ddi/irq.c
99,8 → 99,8
* Hash table operations for cases when we know that
* there will be collisions between different keys.
*/
static index_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, count_t keys, link_t *item);
static size_t irq_ht_hash(unative_t *key);
static bool irq_ht_compare(unative_t *key, size_t keys, link_t *item);
static void irq_ht_remove(link_t *item);
 
static hash_table_operations_t irq_ht_ops = {
115,8 → 115,8
* However, there might be still collisions among
* elements with single key (sharing of one IRQ).
*/
static index_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, count_t keys, link_t *item);
static size_t irq_lin_hash(unative_t *key);
static bool irq_lin_compare(unative_t *key, size_t keys, link_t *item);
static void irq_lin_remove(link_t *item);
 
static hash_table_operations_t irq_lin_ops = {
126,7 → 126,7
};
 
/** Number of buckets in either of the hash tables. */
static count_t buckets;
static size_t buckets;
 
/** Initialize IRQ subsystem.
*
133,7 → 133,7
* @param inrs Numbers of unique IRQ numbers or INRs.
* @param chains Number of chains in the hash table.
*/
void irq_init(count_t inrs, count_t chains)
void irq_init(size_t inrs, size_t chains)
{
buckets = chains;
/*
298,7 → 298,7
*
* @return Index into the hash table.
*/
index_t irq_ht_hash(unative_t key[])
size_t irq_ht_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr % buckets;
324,7 → 324,7
*
* @return True on match or false otherwise.
*/
bool irq_ht_compare(unative_t key[], count_t keys, link_t *item)
bool irq_ht_compare(unative_t key[], size_t keys, link_t *item)
{
irq_t *irq = hash_table_get_instance(item, irq_t, link);
inr_t inr = (inr_t) key[KEY_INR];
371,7 → 371,7
*
* @return Index into the hash table.
*/
index_t irq_lin_hash(unative_t key[])
size_t irq_lin_hash(unative_t key[])
{
inr_t inr = (inr_t) key[KEY_INR];
return inr;
397,7 → 397,7
*
* @return True on match or false otherwise.
*/
bool irq_lin_compare(unative_t key[], count_t keys, link_t *item)
bool irq_lin_compare(unative_t key[], size_t keys, link_t *item)
{
irq_t *irq = list_get_instance(item, irq_t, link);
devno_t devno = (devno_t) key[KEY_DEVNO];
/trunk/kernel/generic/src/console/console.c
61,7 → 61,7
/** Kernel log initialized */
static bool klog_inited = false;
/** First kernel log characters */
static index_t klog_start = 0;
static size_t klog_start = 0;
/** Number of valid kernel log characters */
static size_t klog_len = 0;
/** Number of stored (not printed) kernel log characters */
170,10 → 170,10
* @return Number of characters read.
*
*/
count_t gets(indev_t *indev, char *buf, size_t buflen)
size_t gets(indev_t *indev, char *buf, size_t buflen)
{
size_t offset = 0;
count_t count = 0;
size_t count = 0;
buf[offset] = 0;
wchar_t ch;
226,7 → 226,7
if ((klog_stored > 0) && (stdout) && (stdout->op->write)) {
/* Print charaters stored in kernel log */
index_t i;
size_t i;
for (i = klog_len - klog_stored; i < klog_len; i++)
stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent);
klog_stored = 0;
/trunk/kernel/generic/src/console/cmd.c
513,7 → 513,7
spinlock_lock(&cmd_lock);
link_t *cur;
count_t len = 0;
size_t len = 0;
for (cur = cmd_head.next; cur != &cmd_head; cur = cur->next) {
cmd_info_t *hlp;
hlp = list_get_instance(cur, cmd_info_t, link);
651,7 → 651,7
* call the function.
*/
count_t i;
size_t i;
for (i = 0; i < config.cpu_count; i++) {
if (!cpus[i].active)
continue;
970,7 → 970,7
*/
int cmd_tests(cmd_arg_t *argv)
{
count_t len = 0;
size_t len = 0;
test_t *test;
for (test = tests; test->name != NULL; test++) {
if (str_length(test->name) > len)
/trunk/kernel/generic/src/console/kconsole.c
86,7 → 86,7
LIST_INITIALIZE(cmd_head); /**< Command list. */
 
static wchar_t history[KCONSOLE_HISTORY][MAX_CMDLINE] = {};
static count_t history_pos = 0;
static size_t history_pos = 0;
 
/** Initialize kconsole data structures
*
159,9 → 159,9
}
 
/** Print count times a character */
static void print_cc(wchar_t ch, count_t count)
static void print_cc(wchar_t ch, size_t count)
{
count_t i;
size_t i;
for (i = 0; i < count; i++)
putchar(ch);
}
169,7 → 169,7
/** Try to find a command beginning with prefix */
static const char *cmdtab_search_one(const char *name, link_t **startpos)
{
count_t namelen = str_length(name);
size_t namelen = str_length(name);
spinlock_lock(&cmd_lock);
205,7 → 205,7
{
const char *name = input;
count_t found = 0;
size_t found = 0;
link_t *pos = NULL;
const char *hint;
char output[MAX_CMDLINE];
240,7 → 240,7
{
printf("%s> ", prompt);
count_t position = 0;
size_t position = 0;
wchar_t *current = history[history_pos];
current[0] = 0;
280,7 → 280,7
/* Find the beginning of the word
and copy it to tmp */
count_t beg;
size_t beg;
for (beg = position - 1; (beg > 0) && (!isspace(current[beg]));
beg--);
313,7 → 313,7
/* We have a hint */
size_t off = 0;
count_t i = 0;
size_t i = 0;
while ((ch = str_decode(tmp, &off, STR_NO_LIMIT)) != 0) {
if (!wstr_linsert(current, ch, position + i, MAX_CMDLINE))
break;
542,7 → 542,7
if (str_lcmp(hlp->name, cmdline + start,
max(str_length(hlp->name),
str_nlength(cmdline + start, (count_t) (end - start) - 1))) == 0) {
str_nlength(cmdline + start, (size_t) (end - start) - 1))) == 0) {
cmd = hlp;
break;
}
568,7 → 568,7
*/
bool error = false;
count_t i;
size_t i;
for (i = 0; i < cmd->argc; i++) {
start = end;
if (!parse_argument(cmdline, size, &start, &end)) {
659,7 → 659,7
while (true) {
wchar_t *tmp = clever_readline((char *) prompt, stdin);
count_t len = wstr_length(tmp);
size_t len = wstr_length(tmp);
if (!len)
continue;
/trunk/kernel/generic/src/printf/vprintf.c
46,7 → 46,7
static int vprintf_str_write(const char *str, size_t size, void *data)
{
size_t offset = 0;
count_t chars = 0;
size_t chars = 0;
while (offset < size) {
putchar(str_decode(str, &offset, size));
59,7 → 59,7
static int vprintf_wstr_write(const wchar_t *str, size_t size, void *data)
{
size_t offset = 0;
count_t chars = 0;
size_t chars = 0;
while (offset < size) {
putchar(str[chars]);
73,7 → 73,7
int puts(const char *str)
{
size_t offset = 0;
count_t chars = 0;
size_t chars = 0;
wchar_t uc;
while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) {
/trunk/kernel/generic/src/printf/vsnprintf.c
82,7 → 82,7
* with the trailing zero => print only a part
* of string
*/
index_t index = 0;
size_t index = 0;
while (index < size) {
wchar_t uc = str_decode(str, &index, size);
130,7 → 130,7
*/
static int vsnprintf_wstr_write(const wchar_t *str, size_t size, vsnprintf_data_t *data)
{
index_t index = 0;
size_t index = 0;
while (index < (size / sizeof(wchar_t))) {
size_t left = data->size - data->len;
/trunk/kernel/generic/src/printf/printf_core.c
174,7 → 174,7
*/
static int print_char(const char ch, int width, uint32_t flags, printf_spec_t *ps)
{
count_t counter = 0;
size_t counter = 0;
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
while (--width > 0) {
/*
212,7 → 212,7
*/
static int print_wchar(const wchar_t ch, int width, uint32_t flags, printf_spec_t *ps)
{
count_t counter = 0;
size_t counter = 0;
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
while (--width > 0) {
/*
255,12 → 255,12
return printf_putstr(nullstr, ps);
 
/* Print leading spaces. */
count_t strw = str_length(str);
size_t strw = str_length(str);
if (precision == 0)
precision = strw;
 
/* Left padding */
count_t counter = 0;
size_t counter = 0;
width -= precision;
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
while (width-- > 0) {
311,7 → 311,7
precision = strw;
/* Left padding */
count_t counter = 0;
size_t counter = 0;
width -= precision;
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
while (width-- > 0) {
433,7 → 433,7
}
width -= precision + size - number_size;
count_t counter = 0;
size_t counter = 0;
if (!(flags & __PRINTF_FLAG_LEFTALIGNED)) {
while (width-- > 0) {
596,7 → 596,7
size_t nxt = 0; /* Index of the next character from fmt */
size_t j = 0; /* Index to the first not printed nonformating character */
count_t counter = 0; /* Number of characters printed */
size_t counter = 0; /* Number of characters printed */
int retval; /* Return values from nested functions */
while (true) {
/trunk/kernel/generic/src/proc/scheduler.c
708,7 → 708,7
continue;
 
spinlock_lock(&cpus[cpu].lock);
printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n",
printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n",
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
cpus[cpu].needs_relink);
/trunk/kernel/generic/src/lib/string.c
62,10 → 62,10
* the NULL-terminator), size_t
*
* [wide] string length number of CHARACTERS in a [wide] string (excluding
* the NULL-terminator), count_t
* the NULL-terminator), size_t
*
* [wide] string width number of display cells on a monospace display taken
* by a [wide] string, count_t
* by a [wide] string, size_t
*
*
* Overview of string metrics:@n
75,10 → 75,10
* size n size_t number of BYTES in a string (excluding the
* NULL-terminator)
*
* length l count_t number of CHARACTERS in a string (excluding the
* length l size_t number of CHARACTERS in a string (excluding the
* null terminator)
*
* width w count_t number of display cells on a monospace display
* width w size_t number of display cells on a monospace display
* taken by a string
*
*
97,7 → 97,7
*
* pointer (char *, wchar_t *)
* byte offset (size_t)
* character index (count_t)
* character index (size_t)
*
*/
 
309,9 → 309,9
* @return Number of bytes used by the characters.
*
*/
size_t str_lsize(const char *str, count_t max_len)
size_t str_lsize(const char *str, size_t max_len)
{
count_t len = 0;
size_t len = 0;
size_t offset = 0;
while (len < max_len) {
337,7 → 337,7
* @return Number of bytes used by the wide characters.
*
*/
size_t wstr_lsize(const wchar_t *str, count_t max_len)
size_t wstr_lsize(const wchar_t *str, size_t max_len)
{
return (wstr_nlength(str, max_len * sizeof(wchar_t)) * sizeof(wchar_t));
}
349,9 → 349,9
* @return Number of characters in string.
*
*/
count_t str_length(const char *str)
size_t str_length(const char *str)
{
count_t len = 0;
size_t len = 0;
size_t offset = 0;
while (str_decode(str, &offset, STR_NO_LIMIT) != 0)
367,9 → 367,9
* @return Number of characters in @a str.
*
*/
count_t wstr_length(const wchar_t *wstr)
size_t wstr_length(const wchar_t *wstr)
{
count_t len = 0;
size_t len = 0;
while (*wstr++ != 0)
len++;
385,9 → 385,9
* @return Number of characters in string.
*
*/
count_t str_nlength(const char *str, size_t size)
size_t str_nlength(const char *str, size_t size)
{
count_t len = 0;
size_t len = 0;
size_t offset = 0;
while (str_decode(str, &offset, size) != 0)
404,11 → 404,11
* @return Number of characters in string.
*
*/
count_t wstr_nlength(const wchar_t *str, size_t size)
size_t wstr_nlength(const wchar_t *str, size_t size)
{
count_t len = 0;
count_t limit = ALIGN_DOWN(size, sizeof(wchar_t));
count_t offset = 0;
size_t len = 0;
size_t limit = ALIGN_DOWN(size, sizeof(wchar_t));
size_t offset = 0;
while ((offset < limit) && (*str++ != 0)) {
len++;
496,7 → 496,7
* 1 if second smaller.
*
*/
int str_lcmp(const char *s1, const char *s2, count_t max_len)
int str_lcmp(const char *s1, const char *s2, size_t max_len)
{
wchar_t c1 = 0;
wchar_t c2 = 0;
504,7 → 504,7
size_t off1 = 0;
size_t off2 = 0;
count_t len = 0;
size_t len = 0;
 
while (true) {
if (len >= max_len)
615,7 → 615,7
return;
wchar_t ch;
count_t src_idx = 0;
size_t src_idx = 0;
size_t dst_off = 0;
while ((ch = src[src_idx++]) != 0) {
666,14 → 666,14
* is out of bounds.
*
*/
bool wstr_linsert(wchar_t *str, wchar_t ch, count_t pos, count_t max_pos)
bool wstr_linsert(wchar_t *str, wchar_t ch, size_t pos, size_t max_pos)
{
count_t len = wstr_length(str);
size_t len = wstr_length(str);
if ((pos > len) || (pos + 1 > max_pos))
return false;
count_t i;
size_t i;
for (i = len; i + 1 > pos; i--)
str[i + 1] = str[i];
694,14 → 694,14
* is out of bounds.
*
*/
bool wstr_remove(wchar_t *str, count_t pos)
bool wstr_remove(wchar_t *str, size_t pos)
{
count_t len = wstr_length(str);
size_t len = wstr_length(str);
if (pos >= len)
return false;
count_t i;
size_t i;
for (i = pos + 1; i <= len; i++)
str[i - 1] = str[i];
/trunk/kernel/generic/src/lib/sort.c
45,8 → 45,8
 
#define EBUFSIZE 32
 
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot);
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot);
void _qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot);
void _bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot);
 
/** Quicksort wrapper
*
61,7 → 61,7
* @param cmp Comparator function.
*
*/
void qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b))
void qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b))
{
uint8_t buf_tmp[EBUFSIZE];
uint8_t buf_pivot[EBUFSIZE];
93,7 → 93,7
* @param pivot Pointer to scratch memory buffer e_size bytes long.
*
*/
void _qsort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot)
void _qsort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *tmp, void *pivot)
{
if (n > 4) {
unsigned int i = 0, j = n - 1;
133,7 → 133,7
* @param cmp Comparator function.
*
*/
void bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b))
void bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b))
{
uint8_t buf_slot[EBUFSIZE];
void * slot = buf_slot;
160,7 → 160,7
* @param slot Pointer to scratch memory buffer e_size bytes long.
*
*/
void _bubblesort(void * data, count_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot)
void _bubblesort(void * data, size_t n, size_t e_size, int (* cmp) (void * a, void * b), void *slot)
{
bool done = false;
void * p;
/trunk/kernel/generic/src/adt/btree.c
63,9 → 63,9
static void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key);
static btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median);
static btree_node_t *node_combine(btree_node_t *node);
static index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right);
static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx);
static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx);
static size_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right);
static void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, size_t idx);
static void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, size_t idx);
static bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree);
static bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree);
static bool try_rotation_from_left(btree_node_t *rnode);
137,7 → 137,7
*/
void btree_destroy_subtree(btree_node_t *root)
{
count_t i;
size_t i;
 
if (root->keys) {
for (i = 0; i < root->keys + 1; i++) {
269,7 → 269,7
}
if (node->keys > FILL_FACTOR) {
count_t i;
size_t i;
 
/*
* The key can be immediatelly removed.
285,7 → 285,7
}
} else {
index_t idx;
size_t idx;
btree_node_t *rnode, *parent;
 
/*
335,7 → 335,7
continue;
} else {
void *val;
count_t i;
size_t i;
/*
* Now if the key is smaller than cur->key[i]
442,11 → 442,11
*/
void node_insert_key_and_lsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *lsubtree)
{
count_t i;
size_t i;
 
for (i = 0; i < node->keys; i++) {
if (key < node->key[i]) {
count_t j;
size_t j;
for (j = node->keys; j > i; j--) {
node->key[j] = node->key[j - 1];
478,11 → 478,11
*/
void node_insert_key_and_rsubtree(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree)
{
count_t i;
size_t i;
 
for (i = 0; i < node->keys; i++) {
if (key < node->key[i]) {
count_t j;
size_t j;
for (j = node->keys; j > i; j--) {
node->key[j] = node->key[j - 1];
510,7 → 510,7
*/
void node_remove_key_and_lsubtree(btree_node_t *node, btree_key_t key)
{
count_t i, j;
size_t i, j;
for (i = 0; i < node->keys; i++) {
if (key == node->key[i]) {
538,7 → 538,7
*/
void node_remove_key_and_rsubtree(btree_node_t *node, btree_key_t key)
{
count_t i, j;
size_t i, j;
for (i = 0; i < node->keys; i++) {
if (key == node->key[i]) {
576,7 → 576,7
btree_node_t *node_split(btree_node_t *node, btree_key_t key, void *value, btree_node_t *rsubtree, btree_key_t *median)
{
btree_node_t *rnode;
count_t i, j;
size_t i, j;
 
ASSERT(median);
ASSERT(node->keys == BTREE_MAX_KEYS);
603,7 → 603,7
* Copy big keys, values and subtree pointers to the new right sibling.
* If this is an index node, do not copy the median.
*/
i = (count_t) INDEX_NODE(node);
i = (size_t) INDEX_NODE(node);
for (i += MEDIAN_HIGH_INDEX(node), j = 0; i < node->keys; i++, j++) {
rnode->key[j] = node->key[i];
rnode->value[j] = node->value[i];
636,9 → 636,9
*/
btree_node_t *node_combine(btree_node_t *node)
{
index_t idx;
size_t idx;
btree_node_t *rnode;
count_t i;
size_t i;
 
ASSERT(!ROOT_NODE(node));
685,9 → 685,9
*
* @return Index of the key associated with the subtree.
*/
index_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right)
size_t find_key_by_subtree(btree_node_t *node, btree_node_t *subtree, bool right)
{
count_t i;
size_t i;
for (i = 0; i < node->keys + 1; i++) {
if (subtree == node->subtree[i])
706,7 → 706,7
* @param rnode Right sibling.
* @param idx Index of the parent node key that is taking part in the rotation.
*/
void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, index_t idx)
void rotate_from_left(btree_node_t *lnode, btree_node_t *rnode, size_t idx)
{
btree_key_t key;
 
743,7 → 743,7
* @param rnode Right sibling.
* @param idx Index of the parent node key that is taking part in the rotation.
*/
void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, index_t idx)
void rotate_from_right(btree_node_t *lnode, btree_node_t *rnode, size_t idx)
{
btree_key_t key;
 
786,7 → 786,7
*/
bool try_insert_by_rotation_to_left(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree)
{
index_t idx;
size_t idx;
btree_node_t *lnode;
 
/*
833,7 → 833,7
*/
bool try_insert_by_rotation_to_right(btree_node_t *node, btree_key_t inskey, void *insvalue, btree_node_t *rsubtree)
{
index_t idx;
size_t idx;
btree_node_t *rnode;
 
/*
872,7 → 872,7
*/
bool try_rotation_from_left(btree_node_t *rnode)
{
index_t idx;
size_t idx;
btree_node_t *lnode;
 
/*
907,7 → 907,7
*/
bool try_rotation_from_right(btree_node_t *lnode)
{
index_t idx;
size_t idx;
btree_node_t *rnode;
 
/*
940,7 → 940,7
*/
void btree_print(btree_t *t)
{
count_t i;
size_t i;
int depth = t->root->depth;
link_t head, *cur;
 
/trunk/kernel/generic/src/adt/hash_table.c
51,9 → 51,9
* @param max_keys Maximal number of keys needed to identify an item.
* @param op Hash table operations structure.
*/
void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op)
void hash_table_create(hash_table_t *h, size_t m, size_t max_keys, hash_table_operations_t *op)
{
index_t i;
size_t i;
 
ASSERT(h);
ASSERT(op);
83,7 → 83,7
*/
void hash_table_insert(hash_table_t *h, unative_t key[], link_t *item)
{
index_t chain;
size_t chain;
ASSERT(item);
ASSERT(h);
107,7 → 107,7
link_t *hash_table_find(hash_table_t *h, unative_t key[])
{
link_t *cur;
index_t chain;
size_t chain;
ASSERT(h);
ASSERT(h->op);
137,9 → 137,9
* @param key Array of keys that will be compared against items of the hash table.
* @param keys Number of keys in the key array.
*/
void hash_table_remove(hash_table_t *h, unative_t key[], count_t keys)
void hash_table_remove(hash_table_t *h, unative_t key[], size_t keys)
{
index_t chain;
size_t chain;
link_t *cur;
ASSERT(h);
/trunk/kernel/generic/src/adt/bitmap.c
54,7 → 54,7
* @param map Address of the memory used to hold the map.
* @param bits Number of bits stored in bitmap.
*/
void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, count_t bits)
void bitmap_initialize(bitmap_t *bitmap, uint8_t *map, size_t bits)
{
bitmap->map = map;
bitmap->bits = bits;
66,13 → 66,13
* @param start Starting bit.
* @param bits Number of bits to set.
*/
void bitmap_set_range(bitmap_t *bitmap, index_t start, count_t bits)
void bitmap_set_range(bitmap_t *bitmap, size_t start, size_t bits)
{
index_t i=0;
index_t aligned_start;
count_t lub; /* leading unaligned bits */
count_t amb; /* aligned middle bits */
count_t tab; /* trailing aligned bits */
size_t i = 0;
size_t aligned_start;
size_t lub; /* leading unaligned bits */
size_t amb; /* aligned middle bits */
size_t tab; /* trailing aligned bits */
ASSERT(start + bits <= bitmap->bits);
116,13 → 116,13
* @param start Starting bit.
* @param bits Number of bits to clear.
*/
void bitmap_clear_range(bitmap_t *bitmap, index_t start, count_t bits)
void bitmap_clear_range(bitmap_t *bitmap, size_t start, size_t bits)
{
index_t i=0;
index_t aligned_start;
count_t lub; /* leading unaligned bits */
count_t amb; /* aligned middle bits */
count_t tab; /* trailing aligned bits */
size_t i = 0;
size_t aligned_start;
size_t lub; /* leading unaligned bits */
size_t amb; /* aligned middle bits */
size_t tab; /* trailing aligned bits */
ASSERT(start + bits <= bitmap->bits);
168,9 → 168,9
* @param src Source bitmap.
* @param bits Number of bits to copy.
*/
void bitmap_copy(bitmap_t *dst, bitmap_t *src, count_t bits)
void bitmap_copy(bitmap_t *dst, bitmap_t *src, size_t bits)
{
index_t i;
size_t i;
ASSERT(bits <= dst->bits);
ASSERT(bits <= src->bits);
/trunk/kernel/generic/src/mm/slab.c
156,8 → 156,8
slab_cache_t *cache; /**< Pointer to parent cache. */
link_t link; /**< List of full/partial slabs. */
void *start; /**< Start address of first available item. */
count_t available; /**< Count of available items in this slab. */
index_t nextavail; /**< The index of next available item. */
size_t available; /**< Count of available items in this slab. */
size_t nextavail; /**< The index of next available item. */
} slab_t;
 
#ifdef CONFIG_DEBUG
177,7 → 177,7
slab_t *slab;
size_t fsize;
unsigned int i;
count_t zone = 0;
size_t zone = 0;
data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
if (!data) {
215,7 → 215,7
*
* @return number of freed frames
*/
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
static size_t slab_space_free(slab_cache_t *cache, slab_t *slab)
{
frame_free(KA2PA(slab->start));
if (! (cache->flags & SLAB_CACHE_SLINSIDE))
243,7 → 243,7
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
{
int freed = 0;
 
371,10 → 371,10
*
* @return Number of freed pages
*/
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
{
unsigned int i;
count_t frames = 0;
size_t frames = 0;
 
for (i = 0; i < mag->busy; i++) {
frames += slab_obj_destroy(cache, mag->objs[i], NULL);
649,11 → 649,11
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
* @return Number of freed pages
*/
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
static size_t _slab_reclaim(slab_cache_t *cache, int flags)
{
unsigned int i;
slab_magazine_t *mag;
count_t frames = 0;
size_t frames = 0;
int magcount;
if (cache->flags & SLAB_CACHE_NOMAGAZINE)
771,11 → 771,11
}
 
/* Go through all caches and reclaim what is possible */
count_t slab_reclaim(int flags)
size_t slab_reclaim(int flags)
{
slab_cache_t *cache;
link_t *cur;
count_t frames = 0;
size_t frames = 0;
 
spinlock_lock(&slab_cache_lock);
 
/trunk/kernel/generic/src/mm/tlb.c
79,7 → 79,7
* @param count Number of pages, if required by type.
*/
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
uintptr_t page, size_t count)
{
unsigned int i;
 
108,7 → 108,7
/*
* Enqueue the message.
*/
index_t idx = cpu->tlb_messages_count++;
size_t idx = cpu->tlb_messages_count++;
cpu->tlb_messages[idx].type = type;
cpu->tlb_messages[idx].asid = asid;
cpu->tlb_messages[idx].page = page;
143,7 → 143,7
tlb_invalidate_type_t type;
asid_t asid;
uintptr_t page;
count_t count;
size_t count;
unsigned int i;
ASSERT(CPU);
/trunk/kernel/generic/src/mm/backend_anon.c
195,7 → 195,7
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
uintptr_t base = node->key[i];
count_t count = (count_t) node->value[i];
size_t count = (size_t) node->value[i];
unsigned int j;
for (j = 0; j < count; j++) {
/trunk/kernel/generic/src/mm/as.c
418,8 → 418,8
btree_node_t, leaf_link);
if ((cond = (bool) node->keys)) {
uintptr_t b = node->key[node->keys - 1];
count_t c =
(count_t) node->value[node->keys - 1];
size_t c =
(size_t) node->value[node->keys - 1];
unsigned int i = 0;
if (overlaps(b, c * PAGE_SIZE, area->base,
555,10 → 555,10
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
uintptr_t b = node->key[i];
count_t j;
size_t j;
pte_t *pte;
for (j = 0; j < (count_t) node->value[i]; j++) {
for (j = 0; j < (size_t) node->value[i]; j++) {
page_table_lock(as, false);
pte = page_mapping_find(as, b + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
788,8 → 788,8
ipl_t ipl;
int page_flags;
uintptr_t *old_frame;
index_t frame_idx;
count_t used_pages;
size_t frame_idx;
size_t used_pages;
/* Flags for the new memory mapping */
page_flags = area_flags_to_page_flags(flags);
827,7 → 827,7
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
used_pages += (count_t) node->value[i];
used_pages += (size_t) node->value[i];
}
}
 
853,10 → 853,10
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
uintptr_t b = node->key[i];
count_t j;
size_t j;
pte_t *pte;
for (j = 0; j < (count_t) node->value[i]; j++) {
for (j = 0; j < (size_t) node->value[i]; j++) {
page_table_lock(as, false);
pte = page_mapping_find(as, b + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
903,9 → 903,9
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
uintptr_t b = node->key[i];
count_t j;
size_t j;
for (j = 0; j < (count_t) node->value[i]; j++) {
for (j = 0; j < (size_t) node->value[i]; j++) {
page_table_lock(as, false);
 
/* Insert the new mapping */
1397,16 → 1397,16
*
* @return Zero on failure and non-zero on success.
*/
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
int used_space_insert(as_area_t *a, uintptr_t page, size_t count)
{
btree_node_t *leaf, *node;
count_t pages;
size_t pages;
unsigned int i;
 
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
ASSERT(count);
 
pages = (count_t) btree_search(&a->used_space, page, &leaf);
pages = (size_t) btree_search(&a->used_space, page, &leaf);
if (pages) {
/*
* We hit the beginning of some used space.
1423,8 → 1423,8
if (node) {
uintptr_t left_pg = node->key[node->keys - 1];
uintptr_t right_pg = leaf->key[0];
count_t left_cnt = (count_t) node->value[node->keys - 1];
count_t right_cnt = (count_t) leaf->value[0];
size_t left_cnt = (size_t) node->value[node->keys - 1];
size_t right_cnt = (size_t) leaf->value[0];
/*
* Examine the possibility that the interval fits
1478,7 → 1478,7
}
} else if (page < leaf->key[0]) {
uintptr_t right_pg = leaf->key[0];
count_t right_cnt = (count_t) leaf->value[0];
size_t right_cnt = (size_t) leaf->value[0];
/*
* Investigate the border case in which the left neighbour does
1513,8 → 1513,8
if (node) {
uintptr_t left_pg = leaf->key[leaf->keys - 1];
uintptr_t right_pg = node->key[0];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
count_t right_cnt = (count_t) node->value[0];
size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
size_t right_cnt = (size_t) node->value[0];
/*
* Examine the possibility that the interval fits
1568,7 → 1568,7
}
} else if (page >= leaf->key[leaf->keys - 1]) {
uintptr_t left_pg = leaf->key[leaf->keys - 1];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
/*
* Investigate the border case in which the right neighbour
1606,8 → 1606,8
if (page < leaf->key[i]) {
uintptr_t left_pg = leaf->key[i - 1];
uintptr_t right_pg = leaf->key[i];
count_t left_cnt = (count_t) leaf->value[i - 1];
count_t right_cnt = (count_t) leaf->value[i];
size_t left_cnt = (size_t) leaf->value[i - 1];
size_t right_cnt = (size_t) leaf->value[i];
 
/*
* The interval fits between left_pg and right_pg.
1665,7 → 1665,7
}
}
 
panic("Inconsistency detected while adding %" PRIc " pages of used "
panic("Inconsistency detected while adding %" PRIs " pages of used "
"space at %p.", count, page);
}
 
1679,16 → 1679,16
*
* @return Zero on failure and non-zero on success.
*/
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
int used_space_remove(as_area_t *a, uintptr_t page, size_t count)
{
btree_node_t *leaf, *node;
count_t pages;
size_t pages;
unsigned int i;
 
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
ASSERT(count);
 
pages = (count_t) btree_search(&a->used_space, page, &leaf);
pages = (size_t) btree_search(&a->used_space, page, &leaf);
if (pages) {
/*
* We are lucky, page is the beginning of some interval.
1717,7 → 1717,7
node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
if (node && page < leaf->key[0]) {
uintptr_t left_pg = node->key[node->keys - 1];
count_t left_cnt = (count_t) node->value[node->keys - 1];
size_t left_cnt = (size_t) node->value[node->keys - 1];
 
if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
count * PAGE_SIZE)) {
1733,7 → 1733,7
return 1;
} else if (page + count * PAGE_SIZE <
left_pg + left_cnt*PAGE_SIZE) {
count_t new_cnt;
size_t new_cnt;
/*
* The interval is contained in the rightmost
1757,7 → 1757,7
if (page > leaf->key[leaf->keys - 1]) {
uintptr_t left_pg = leaf->key[leaf->keys - 1];
count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
size_t left_cnt = (size_t) leaf->value[leaf->keys - 1];
 
if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
count * PAGE_SIZE)) {
1772,7 → 1772,7
return 1;
} else if (page + count * PAGE_SIZE < left_pg +
left_cnt * PAGE_SIZE) {
count_t new_cnt;
size_t new_cnt;
/*
* The interval is contained in the rightmost
1799,7 → 1799,7
for (i = 1; i < leaf->keys - 1; i++) {
if (page < leaf->key[i]) {
uintptr_t left_pg = leaf->key[i - 1];
count_t left_cnt = (count_t) leaf->value[i - 1];
size_t left_cnt = (size_t) leaf->value[i - 1];
 
/*
* Now the interval is between intervals corresponding
1819,7 → 1819,7
return 1;
} else if (page + count * PAGE_SIZE <
left_pg + left_cnt * PAGE_SIZE) {
count_t new_cnt;
size_t new_cnt;
/*
* The interval is contained in the
1844,7 → 1844,7
}
 
error:
panic("Inconsistency detected while removing %" PRIc " pages of used "
panic("Inconsistency detected while removing %" PRIs " pages of used "
"space from %p.", count, page);
}
 
1943,7 → 1943,7
as_area_t *area = node->value[i];
mutex_lock(&area->lock);
printf("as_area: %p, base=%p, pages=%" PRIc
printf("as_area: %p, base=%p, pages=%" PRIs
" (%p - %p)\n", area, area->base, area->pages,
area->base, area->base + FRAMES2SIZE(area->pages));
mutex_unlock(&area->lock);
/trunk/kernel/generic/src/mm/frame.c
67,29 → 67,29
*/
mutex_t mem_avail_mtx;
condvar_t mem_avail_cv;
count_t mem_avail_req = 0; /**< Number of frames requested. */
count_t mem_avail_gen = 0; /**< Generation counter. */
size_t mem_avail_req = 0; /**< Number of frames requested. */
size_t mem_avail_gen = 0; /**< Generation counter. */
 
/********************/
/* Helper functions */
/********************/
 
static inline index_t frame_index(zone_t *zone, frame_t *frame)
static inline size_t frame_index(zone_t *zone, frame_t *frame)
{
return (index_t) (frame - zone->frames);
return (size_t) (frame - zone->frames);
}
 
static inline index_t frame_index_abs(zone_t *zone, frame_t *frame)
static inline size_t frame_index_abs(zone_t *zone, frame_t *frame)
{
return (index_t) (frame - zone->frames) + zone->base;
return (size_t) (frame - zone->frames) + zone->base;
}
 
static inline bool frame_index_valid(zone_t *zone, index_t index)
static inline bool frame_index_valid(zone_t *zone, size_t index)
{
return (index < zone->count);
}
 
static inline index_t make_frame_index(zone_t *zone, frame_t *frame)
static inline size_t make_frame_index(zone_t *zone, frame_t *frame)
{
return (frame - zone->frames);
}
120,20 → 120,20
* @return Zone number on success, -1 on error.
*
*/
static count_t zones_insert_zone(pfn_t base, count_t count)
static size_t zones_insert_zone(pfn_t base, size_t count)
{
if (zones.count + 1 == ZONES_MAX) {
printf("Maximum zone count %u exceeded!\n", ZONES_MAX);
return (count_t) -1;
return (size_t) -1;
}
count_t i;
size_t i;
for (i = 0; i < zones.count; i++) {
/* Check for overlap */
if (overlaps(base, count,
zones.info[i].base, zones.info[i].count)) {
printf("Zones overlap!\n");
return (count_t) -1;
return (size_t) -1;
}
if (base < zones.info[i].base)
break;
140,7 → 140,7
}
/* Move other zones up */
count_t j;
size_t j;
for (j = zones.count; j > i; j--) {
zones.info[j] = zones.info[j - 1];
zones.info[j].buddy_system->data =
161,10 → 161,10
*
*/
#ifdef CONFIG_DEBUG
static count_t total_frames_free(void)
static size_t total_frames_free(void)
{
count_t total = 0;
count_t i;
size_t total = 0;
size_t i;
for (i = 0; i < zones.count; i++)
total += zones.info[i].free_count;
184,12 → 184,12
* @return Zone index or -1 if not found.
*
*/
count_t find_zone(pfn_t frame, count_t count, count_t hint)
size_t find_zone(pfn_t frame, size_t count, size_t hint)
{
if (hint >= zones.count)
hint = 0;
count_t i = hint;
size_t i = hint;
do {
if ((zones.info[i].base <= frame)
&& (zones.info[i].base + zones.info[i].count >= frame + count))
200,7 → 200,7
i = 0;
} while (i != hint);
return (count_t) -1;
return (size_t) -1;
}
 
/** @return True if zone can allocate specified order */
220,12 → 220,12
* @param hind Preferred zone.
*
*/
static count_t find_free_zone(uint8_t order, zone_flags_t flags, count_t hint)
static size_t find_free_zone(uint8_t order, zone_flags_t flags, size_t hint)
{
if (hint >= zones.count)
hint = 0;
count_t i = hint;
size_t i = hint;
do {
/*
* Check whether the zone meets the search criteria.
243,7 → 243,7
i = 0;
} while (i != hint);
return (count_t) -1;
return (size_t) -1;
}
 
/**************************/
265,7 → 265,7
frame_t *frame = list_get_instance(child, frame_t, buddy_link);
zone_t *zone = (zone_t *) buddy->data;
index_t index = frame_index(zone, frame);
size_t index = frame_index(zone, frame);
do {
if (zone->frames[index].buddy_order != order)
return &zone->frames[index].buddy_link;
291,7 → 291,7
bool is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
index_t index;
size_t index;
if (is_left) {
index = (frame_index(zone, frame)) +
(1 << frame->buddy_order);
446,7 → 446,7
* @param frame_idx Frame index relative to zone.
*
*/
static void zone_frame_free(zone_t *zone, index_t frame_idx)
static void zone_frame_free(zone_t *zone, size_t frame_idx)
{
ASSERT(zone_flags_available(zone->flags));
467,7 → 467,7
}
 
/** Return frame from zone. */
static frame_t *zone_get_frame(zone_t *zone, index_t frame_idx)
static frame_t *zone_get_frame(zone_t *zone, size_t frame_idx)
{
ASSERT(frame_idx < zone->count);
return &zone->frames[frame_idx];
474,7 → 474,7
}
 
/** Mark frame in zone unavailable to allocation. */
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx)
static void zone_mark_unavailable(zone_t *zone, size_t frame_idx)
{
ASSERT(zone_flags_available(zone->flags));
503,7 → 503,7
* @param buddy Merged zone buddy.
*
*/
static void zone_merge_internal(count_t z1, count_t z2, zone_t *old_z1, buddy_system_t *buddy)
static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1, buddy_system_t *buddy)
{
ASSERT(zone_flags_available(zones.info[z1].flags));
ASSERT(zone_flags_available(zones.info[z2].flags));
529,7 → 529,7
+ buddy_conf_size(order));
/* This marks all frames busy */
count_t i;
size_t i;
for (i = 0; i < zones.info[z1].count; i++)
frame_initialize(&zones.info[z1].frames[i]);
599,11 → 599,11
* @param count Old zone frame count.
*
*/
static void return_config_frames(count_t znum, pfn_t pfn, count_t count)
static void return_config_frames(size_t znum, pfn_t pfn, size_t count)
{
ASSERT(zone_flags_available(zones.info[znum].flags));
count_t cframes = SIZE2FRAMES(zone_conf_size(count));
size_t cframes = SIZE2FRAMES(zone_conf_size(count));
if ((pfn < zones.info[znum].base)
|| (pfn >= zones.info[znum].base + zones.info[znum].count))
614,7 → 614,7
frame = &zones.info[znum].frames[pfn - zones.info[znum].base];
ASSERT(!frame->buddy_order);
count_t i;
size_t i;
for (i = 0; i < cframes; i++) {
zones.info[znum].busy_count++;
zone_frame_free(&zones.info[znum],
634,17 → 634,17
* @param count Allocated frames in block.
*
*/
static void zone_reduce_region(count_t znum, pfn_t frame_idx, count_t count)
static void zone_reduce_region(size_t znum, pfn_t frame_idx, size_t count)
{
ASSERT(zone_flags_available(zones.info[znum].flags));
ASSERT(frame_idx + count < zones.info[znum].count);
uint8_t order = zones.info[znum].frames[frame_idx].buddy_order;
ASSERT((count_t) (1 << order) >= count);
ASSERT((size_t) (1 << order) >= count);
/* Reduce all blocks to order 0 */
count_t i;
for (i = 0; i < (count_t) (1 << order); i++) {
size_t i;
for (i = 0; i < (size_t) (1 << order); i++) {
frame_t *frame = &zones.info[znum].frames[i + frame_idx];
frame->buddy_order = 0;
if (!frame->refcount)
653,7 → 653,7
}
/* Free unneeded frames */
for (i = count; i < (count_t) (1 << order); i++)
for (i = count; i < (size_t) (1 << order); i++)
zone_frame_free(&zones.info[znum], i + frame_idx);
}
 
670,7 → 670,7
* The function uses
*
*/
bool zone_merge(count_t z1, count_t z2)
bool zone_merge(size_t z1, size_t z2)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
733,7 → 733,7
zones.info[z2].count);
/* Move zones down */
count_t i;
size_t i;
for (i = z2 + 1; i < zones.count; i++) {
zones.info[i - 1] = zones.info[i];
zones.info[i - 1].buddy_system->data =
758,7 → 758,7
*/
void zone_merge_all(void)
{
count_t i = 0;
size_t i = 0;
while (i < zones.count) {
if (!zone_merge(i, i + 1))
i++;
776,7 → 776,7
* @return Initialized zone.
*
*/
static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, count_t count, zone_flags_t flags)
static void zone_construct(zone_t *zone, buddy_system_t *buddy, pfn_t start, size_t count, zone_flags_t flags)
{
zone->base = start;
zone->count = count;
799,7 → 799,7
zone->frames = (frame_t *) ((uint8_t *) zone->buddy_system +
buddy_conf_size(order));
count_t i;
size_t i;
for (i = 0; i < count; i++)
frame_initialize(&zone->frames[i]);
819,7 → 819,7
* @return Size of zone configuration info (in bytes).
*
*/
uintptr_t zone_conf_size(count_t count)
uintptr_t zone_conf_size(size_t count)
{
return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count)));
}
840,7 → 840,7
* @return Zone number or -1 on error.
*
*/
count_t zone_create(pfn_t start, count_t count, pfn_t confframe, zone_flags_t flags)
size_t zone_create(pfn_t start, size_t count, pfn_t confframe, zone_flags_t flags)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
855,7 → 855,7
/* If confframe is supposed to be inside our zone, then make sure
* it does not span kernel & init
*/
count_t confcount = SIZE2FRAMES(zone_conf_size(count));
size_t confcount = SIZE2FRAMES(zone_conf_size(count));
if ((confframe >= start) && (confframe < start + count)) {
for (; confframe < start + count; confframe++) {
uintptr_t addr = PFN2ADDR(confframe);
868,7 → 868,7
continue;
bool overlap = false;
count_t i;
size_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(init.tasks[i].addr),
886,11 → 886,11
panic("Cannot find configuration data for zone.");
}
count_t znum = zones_insert_zone(start, count);
if (znum == (count_t) -1) {
size_t znum = zones_insert_zone(start, count);
if (znum == (size_t) -1) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
return (count_t) -1;
return (size_t) -1;
}
buddy_system_t *buddy = (buddy_system_t *) PA2KA(PFN2ADDR(confframe));
898,7 → 898,7
/* If confdata in zone, mark as unavailable */
if ((confframe >= start) && (confframe < start + count)) {
count_t i;
size_t i;
for (i = confframe; i < confframe + confcount; i++)
zone_mark_unavailable(&zones.info[znum],
i - zones.info[znum].base);
911,11 → 911,11
}
/* Non-available zone */
count_t znum = zones_insert_zone(start, count);
if (znum == (count_t) -1) {
size_t znum = zones_insert_zone(start, count);
if (znum == (size_t) -1) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
return (count_t) -1;
return (size_t) -1;
}
zone_construct(&zones.info[znum], NULL, start, count, flags);
930,14 → 930,14
/*******************/
 
/** Set parent of frame. */
void frame_set_parent(pfn_t pfn, void *data, count_t hint)
void frame_set_parent(pfn_t pfn, void *data, size_t hint)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
count_t znum = find_zone(pfn, 1, hint);
size_t znum = find_zone(pfn, 1, hint);
ASSERT(znum != (count_t) -1);
ASSERT(znum != (size_t) -1);
zone_get_frame(&zones.info[znum],
pfn - zones.info[znum].base)->parent = data;
946,14 → 946,14
interrupts_restore(ipl);
}
 
void *frame_get_parent(pfn_t pfn, count_t hint)
void *frame_get_parent(pfn_t pfn, size_t hint)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
count_t znum = find_zone(pfn, 1, hint);
size_t znum = find_zone(pfn, 1, hint);
ASSERT(znum != (count_t) -1);
ASSERT(znum != (size_t) -1);
void *res = zone_get_frame(&zones.info[znum],
pfn - zones.info[znum].base)->parent;
973,11 → 973,11
* @return Physical address of the allocated frame.
*
*/
void *frame_alloc_generic(uint8_t order, frame_flags_t flags, count_t *pzone)
void *frame_alloc_generic(uint8_t order, frame_flags_t flags, size_t *pzone)
{
count_t size = ((count_t) 1) << order;
size_t size = ((size_t) 1) << order;
ipl_t ipl;
count_t hint = pzone ? (*pzone) : 0;
size_t hint = pzone ? (*pzone) : 0;
loop:
ipl = interrupts_disable();
986,16 → 986,16
/*
* First, find suitable frame zone.
*/
count_t znum = find_free_zone(order,
size_t znum = find_free_zone(order,
FRAME_TO_ZONE_FLAGS(flags), hint);
/* If no memory, reclaim some slab memory,
if it does not help, reclaim all */
if ((znum == (count_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
count_t freed = slab_reclaim(0);
size_t freed = slab_reclaim(0);
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
1004,7 → 1004,7
znum = find_free_zone(order,
FRAME_TO_ZONE_FLAGS(flags), hint);
if (znum == (count_t) -1) {
if (znum == (size_t) -1) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
1019,7 → 1019,7
}
}
if (znum == (count_t) -1) {
if (znum == (size_t) -1) {
if (flags & FRAME_ATOMIC) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
1027,7 → 1027,7
}
#ifdef CONFIG_DEBUG
count_t avail = total_frames_free();
size_t avail = total_frames_free();
#endif
spinlock_unlock(&zones.lock);
1038,8 → 1038,8
*/
#ifdef CONFIG_DEBUG
printf("Thread %" PRIu64 " waiting for %" PRIc " frames, "
"%" PRIc " available.\n", THREAD->tid, size, avail);
printf("Thread %" PRIu64 " waiting for %" PRIs " frames, "
"%" PRIs " available.\n", THREAD->tid, size, avail);
#endif
mutex_lock(&mem_avail_mtx);
1048,7 → 1048,7
mem_avail_req = min(mem_avail_req, size);
else
mem_avail_req = size;
count_t gen = mem_avail_gen;
size_t gen = mem_avail_gen;
while (gen == mem_avail_gen)
condvar_wait(&mem_avail_cv, &mem_avail_mtx);
1095,9 → 1095,9
* First, find host frame zone for addr.
*/
pfn_t pfn = ADDR2PFN(frame);
count_t znum = find_zone(pfn, 1, NULL);
size_t znum = find_zone(pfn, 1, NULL);
ASSERT(znum != (count_t) -1);
ASSERT(znum != (size_t) -1);
zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base);
1134,9 → 1134,9
/*
* First, find host frame zone for addr.
*/
count_t znum = find_zone(pfn, 1, NULL);
size_t znum = find_zone(pfn, 1, NULL);
ASSERT(znum != (count_t) -1);
ASSERT(znum != (size_t) -1);
zones.info[znum].frames[pfn - zones.info[znum].base].refcount++;
1145,15 → 1145,15
}
 
/** Mark given range unavailable in frame zones. */
void frame_mark_unavailable(pfn_t start, count_t count)
void frame_mark_unavailable(pfn_t start, size_t count)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
count_t i;
size_t i;
for (i = 0; i < count; i++) {
count_t znum = find_zone(start + i, 1, 0);
if (znum == (count_t) -1) /* PFN not found */
size_t znum = find_zone(start + i, 1, 0);
if (znum == (size_t) -1) /* PFN not found */
continue;
zone_mark_unavailable(&zones.info[znum],
1182,7 → 1182,7
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
SIZE2FRAMES(config.stack_size));
count_t i;
size_t i;
for (i = 0; i < init.cnt; i++) {
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr));
frame_mark_unavailable(pfn,
1207,7 → 1207,7
spinlock_lock(&zones.lock);
uint64_t total = 0;
count_t i;
size_t i;
for (i = 0; i < zones.count; i++)
total += (uint64_t) FRAMES2SIZE(zones.info[i].count);
1241,7 → 1241,7
* the listing).
*/
count_t i;
size_t i;
for (i = 0;; i++) {
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
1253,10 → 1253,10
}
uintptr_t base = PFN2ADDR(zones.info[i].base);
count_t count = zones.info[i].count;
size_t count = zones.info[i].count;
zone_flags_t flags = zones.info[i].flags;
count_t free_count = zones.info[i].free_count;
count_t busy_count = zones.info[i].busy_count;
size_t free_count = zones.info[i].free_count;
size_t busy_count = zones.info[i].busy_count;
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
1263,7 → 1263,7
bool available = zone_flags_available(flags);
printf("%-2" PRIc, i);
printf("%-2" PRIs, i);
#ifdef __32_BITS__
printf(" %10p", base);
1273,13 → 1273,13
printf(" %18p", base);
#endif
printf(" %12" PRIc " %c%c%c ", count,
printf(" %12" PRIs " %c%c%c ", count,
available ? 'A' : ' ',
(flags & ZONE_RESERVED) ? 'R' : ' ',
(flags & ZONE_FIRMWARE) ? 'F' : ' ');
if (available)
printf("%12" PRIc " %12" PRIc,
printf("%12" PRIs " %12" PRIs,
free_count, busy_count);
printf("\n");
1291,13 → 1291,13
* @param num Zone base address or zone number.
*
*/
void zone_print_one(count_t num)
void zone_print_one(size_t num)
{
ipl_t ipl = interrupts_disable();
spinlock_lock(&zones.lock);
count_t znum = (count_t) -1;
size_t znum = (size_t) -1;
count_t i;
size_t i;
for (i = 0; i < zones.count; i++) {
if ((i == num) || (PFN2ADDR(zones.info[i].base) == num)) {
znum = i;
1305,7 → 1305,7
}
}
if (znum == (count_t) -1) {
if (znum == (size_t) -1) {
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
printf("Zone not found.\n");
1314,9 → 1314,9
uintptr_t base = PFN2ADDR(zones.info[i].base);
zone_flags_t flags = zones.info[i].flags;
count_t count = zones.info[i].count;
count_t free_count = zones.info[i].free_count;
count_t busy_count = zones.info[i].busy_count;
size_t count = zones.info[i].count;
size_t free_count = zones.info[i].free_count;
size_t busy_count = zones.info[i].busy_count;
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
1323,9 → 1323,9
bool available = zone_flags_available(flags);
printf("Zone number: %" PRIc "\n", znum);
printf("Zone number: %" PRIs "\n", znum);
printf("Zone base address: %p\n", base);
printf("Zone size: %" PRIc " frames (%" PRIs " KiB)\n", count,
printf("Zone size: %" PRIs " frames (%" PRIs " KiB)\n", count,
SIZE2KB(FRAMES2SIZE(count)));
printf("Zone flags: %c%c%c\n",
available ? 'A' : ' ',
1333,9 → 1333,9
(flags & ZONE_FIRMWARE) ? 'F' : ' ');
if (available) {
printf("Allocated space: %" PRIc " frames (%" PRIs " KiB)\n",
printf("Allocated space: %" PRIs " frames (%" PRIs " KiB)\n",
busy_count, SIZE2KB(FRAMES2SIZE(busy_count)));
printf("Available space: %" PRIc " frames (%" PRIs " KiB)\n",
printf("Available space: %" PRIs " frames (%" PRIs " KiB)\n",
free_count, SIZE2KB(FRAMES2SIZE(free_count)));
}
}
/trunk/kernel/generic/src/mm/backend_elf.c
82,7 → 82,7
elf_segment_header_t *entry = area->backend_data.segment;
btree_node_t *leaf;
uintptr_t base, frame, page, start_anon;
index_t i;
size_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
234,7 → 234,7
elf_header_t *elf = area->backend_data.elf;
elf_segment_header_t *entry = area->backend_data.segment;
uintptr_t base, start_anon;
index_t i;
size_t i;
 
ASSERT((page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) &&
(page < entry->p_vaddr + entry->p_memsz));
304,7 → 304,7
for (i = 0; i < node->keys; i++) {
uintptr_t base = node->key[i];
count_t count = (count_t) node->value[i];
size_t count = (size_t) node->value[i];
unsigned int j;
/*
/trunk/kernel/generic/src/ipc/event.c
64,8 → 64,8
}
}
 
static int
event_subscribe(event_type_t evno, unative_t method, answerbox_t *answerbox)
static int event_subscribe(event_type_t evno, unative_t method,
answerbox_t *answerbox)
{
if (evno >= EVENT_END)
return ELIMIT;
122,8 → 122,7
}
}
 
void
event_notify(event_type_t evno, unative_t a1, unative_t a2, unative_t a3,
void event_notify(event_type_t evno, unative_t a1, unative_t a2, unative_t a3,
unative_t a4, unative_t a5)
{
ASSERT(evno < EVENT_END);
/trunk/kernel/arch/sparc64/include/types.h
46,8 → 46,6
typedef unsigned long uint64_t;
 
typedef uint64_t size_t;
typedef uint64_t count_t;
typedef uint64_t index_t;
 
typedef uint64_t uintptr_t;
typedef uint64_t pfn_t;
60,11 → 58,9
typedef struct {
} fncptr_t;
 
/**< Formats for uintptr_t, size_t, count_t and index_t */
/**< Formats for uintptr_t, size_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
/trunk/kernel/arch/sparc64/include/mm/tlb.h
322,7 → 322,7
* @return Current value of specified IMMU TLB Data Access
* Register.
*/
static inline uint64_t itlb_data_access_read(index_t entry)
static inline uint64_t itlb_data_access_read(size_t entry)
{
itlb_data_access_addr_t reg;
336,7 → 336,7
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void itlb_data_access_write(index_t entry, uint64_t value)
static inline void itlb_data_access_write(size_t entry, uint64_t value)
{
itlb_data_access_addr_t reg;
353,7 → 353,7
* @return Current value of specified DMMU TLB Data Access
* Register.
*/
static inline uint64_t dtlb_data_access_read(index_t entry)
static inline uint64_t dtlb_data_access_read(size_t entry)
{
dtlb_data_access_addr_t reg;
367,7 → 367,7
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void dtlb_data_access_write(index_t entry, uint64_t value)
static inline void dtlb_data_access_write(size_t entry, uint64_t value)
{
dtlb_data_access_addr_t reg;
383,7 → 383,7
*
* @return Current value of specified IMMU TLB Tag Read Register.
*/
static inline uint64_t itlb_tag_read_read(index_t entry)
static inline uint64_t itlb_tag_read_read(size_t entry)
{
itlb_tag_read_addr_t tag;
 
398,7 → 398,7
*
* @return Current value of specified DMMU TLB Tag Read Register.
*/
static inline uint64_t dtlb_tag_read_read(index_t entry)
static inline uint64_t dtlb_tag_read_read(size_t entry)
{
dtlb_tag_read_addr_t tag;
 
418,7 → 418,7
* @return Current value of specified IMMU TLB Data Access
* Register.
*/
static inline uint64_t itlb_data_access_read(int tlb, index_t entry)
static inline uint64_t itlb_data_access_read(int tlb, size_t entry)
{
itlb_data_access_addr_t reg;
433,7 → 433,7
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void itlb_data_access_write(int tlb, index_t entry,
static inline void itlb_data_access_write(int tlb, size_t entry,
uint64_t value)
{
itlb_data_access_addr_t reg;
453,7 → 453,7
* @return Current value of specified DMMU TLB Data Access
* Register.
*/
static inline uint64_t dtlb_data_access_read(int tlb, index_t entry)
static inline uint64_t dtlb_data_access_read(int tlb, size_t entry)
{
dtlb_data_access_addr_t reg;
469,7 → 469,7
* @param entry TLB Entry index.
* @param value Value to be written.
*/
static inline void dtlb_data_access_write(int tlb, index_t entry,
static inline void dtlb_data_access_write(int tlb, size_t entry,
uint64_t value)
{
dtlb_data_access_addr_t reg;
488,7 → 488,7
*
* @return Current value of specified IMMU TLB Tag Read Register.
*/
static inline uint64_t itlb_tag_read_read(int tlb, index_t entry)
static inline uint64_t itlb_tag_read_read(int tlb, size_t entry)
{
itlb_tag_read_addr_t tag;
 
505,7 → 505,7
*
* @return Current value of specified DMMU TLB Tag Read Register.
*/
static inline uint64_t dtlb_tag_read_read(int tlb, index_t entry)
static inline uint64_t dtlb_tag_read_read(int tlb, size_t entry)
{
dtlb_tag_read_addr_t tag;
 
/trunk/kernel/arch/sparc64/include/mm/tsb.h
160,9 → 160,9
struct as;
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
extern void tsb_invalidate(struct as *as, uintptr_t page, size_t pages);
extern void itsb_pte_copy(struct pte *t, size_t index);
extern void dtsb_pte_copy(struct pte *t, size_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/trunk/kernel/arch/sparc64/src/smp/smp.c
61,7 → 61,7
void smp_init(void)
{
ofw_tree_node_t *node;
count_t cnt = 0;
size_t cnt = 0;
if (is_us() || is_us_iii()) {
node = ofw_tree_find_child_by_device_type(cpus_parent(), "cpu");
/trunk/kernel/arch/sparc64/src/mm/tlb.c
54,8 → 54,8
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *, index_t, bool);
static void itlb_pte_copy(pte_t *, index_t);
static void dtlb_pte_copy(pte_t *, size_t, bool);
static void itlb_pte_copy(pte_t *, size_t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
const char *);
130,7 → 130,7
* @param ro If true, the entry will be created read-only, regardless
* of its w field.
*/
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
167,7 → 167,7
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t, index_t index)
void itlb_pte_copy(pte_t *t, size_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
200,7 → 200,7
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
{
uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
245,7 → 245,7
{
uintptr_t page_8k;
uintptr_t page_16k;
index_t index;
size_t index;
pte_t *t;
 
page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
309,7 → 309,7
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
uintptr_t page_16k;
index_t index;
size_t index;
pte_t *t;
 
page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
579,7 → 579,7
* @param page First page which to sweep out from ITLB and DTLB.
* @param cnt Number of ITLB and DTLB entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
{
unsigned int i;
tlb_context_reg_t pc_save, ctx;
/trunk/kernel/arch/sparc64/src/mm/as.c
89,7 → 89,7
* The count must be calculated with respect to the emualted 16K page
* size.
*/
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
101,7 → 101,7
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
tsb_invalidate(as, 0, (count_t) -1);
tsb_invalidate(as, 0, (size_t) -1);
#endif
return 0;
}
/trunk/kernel/arch/sparc64/src/mm/tsb.c
50,13 → 50,14
*
* @param as Address space.
* @param page First page to invalidate in TSB.
* @param pages Number of pages to invalidate. Value of (count_t) -1 means the
* @param pages Number of pages to invalidate. Value of (size_t) -1 means the
* whole TSB.
*/
void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
void tsb_invalidate(as_t *as, uintptr_t page, size_t pages)
{
index_t i0, i;
count_t cnt;
size_t i0;
size_t i;
size_t cnt;
ASSERT(as->arch.itsb && as->arch.dtsb);
63,7 → 64,7
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
 
if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
cnt = ITSB_ENTRY_COUNT;
else
cnt = pages * 2;
81,11 → 82,11
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t, index_t index)
void itsb_pte_copy(pte_t *t, size_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
size_t entry;
 
ASSERT(index <= 1);
127,11 → 128,11
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
void dtsb_pte_copy(pte_t *t, size_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
size_t entry;
ASSERT(index <= 1);
 
/trunk/kernel/arch/sparc64/src/drivers/fhc.c
71,7 → 71,7
if (!prop || !prop->value)
return NULL;
count_t regs = prop->size / sizeof(ofw_central_reg_t);
size_t regs = prop->size / sizeof(ofw_central_reg_t);
if (regs + 1 < UART_IMAP_REG)
return NULL;
 
/trunk/kernel/arch/sparc64/src/drivers/pci.c
91,7 → 91,7
return NULL;
 
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
size_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < SABRE_INTERNAL_REG + 1)
return NULL;
138,7 → 138,7
return NULL;
 
ofw_upa_reg_t *reg = prop->value;
count_t regs = prop->size / sizeof(ofw_upa_reg_t);
size_t regs = prop->size / sizeof(ofw_upa_reg_t);
 
if (regs < PSYCHO_INTERNAL_REG + 1)
return NULL;
/trunk/kernel/arch/ia64/include/types.h
54,8 → 54,6
} uint128_t;
 
typedef uint64_t size_t;
typedef uint64_t count_t;
typedef uint64_t index_t;
 
typedef uint64_t uintptr_t;
typedef uint64_t pfn_t;
72,8 → 70,6
 
#define PRIp "lx" /**< Format for uintptr_t. */
#define PRIs "lu" /**< Format for size_t. */
#define PRIc "lu" /**< Format for count_t. */
#define PRIi "lu" /**< Format for index_t. */
 
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
/trunk/kernel/arch/ia64/include/mm/page.h
240,7 → 240,7
*
* @return Current contents of rr[i].
*/
static inline uint64_t rr_read(index_t i)
static inline uint64_t rr_read(size_t i)
{
uint64_t ret;
ASSERT(i < REGION_REGISTERS);
253,7 → 253,7
* @param i Region register index.
* @param v Value to be written to rr[i].
*/
static inline void rr_write(index_t i, uint64_t v)
static inline void rr_write(size_t i, uint64_t v)
{
ASSERT(i < REGION_REGISTERS);
asm volatile (
/trunk/kernel/arch/ia64/include/mm/tlb.h
76,12 → 76,12
extern void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry);
extern void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry);
 
extern void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr);
extern void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr);
extern void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, size_t tr);
extern void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr);
extern void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr);
 
extern void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr);
extern void dtr_purge(uintptr_t page, count_t width);
extern void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, size_t tr);
extern void dtr_purge(uintptr_t page, size_t width);
 
extern void dtc_pte_copy(pte_t *t);
extern void itc_pte_copy(pte_t *t);
/trunk/kernel/arch/ia64/src/mm/tlb.c
100,7 → 100,7
}
 
 
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
{
region_register rr;
bool restore_rr = false;
267,7 → 267,7
* @param tr Translation register.
*/
void
itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
{
tr_mapping_insert(va, asid, entry, false, tr);
}
281,7 → 281,7
* @param tr Translation register.
*/
void
dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
{
tr_mapping_insert(va, asid, entry, true, tr);
}
298,7 → 298,7
*/
void
tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
index_t tr)
size_t tr)
{
region_register rr;
bool restore_rr = false;
353,7 → 353,7
*/
void
dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
index_t tr)
size_t tr)
{
tlb_entry_t entry;
382,7 → 382,7
* @param page Virtual page address including VRN bits.
* @param width Width of the purge in bits.
*/
void dtr_purge(uintptr_t page, count_t width)
void dtr_purge(uintptr_t page, size_t width)
{
asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2));
}
/trunk/kernel/arch/ia64/src/mm/vhpt.c
53,7 → 53,7
void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
{
region_register rr_save, rr;
index_t vrn;
size_t vrn;
rid_t rid;
uint64_t tag;
 
/trunk/kernel/arch/ia64/src/mm/page.c
131,7 → 131,7
vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid)
{
region_register rr_save, rr;
index_t vrn;
size_t vrn;
rid_t rid;
vhpt_entry_t *v;
 
176,7 → 176,7
bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v)
{
region_register rr_save, rr;
index_t vrn;
size_t vrn;
rid_t rid;
bool match;
 
223,7 → 223,7
int flags)
{
region_register rr_save, rr;
index_t vrn;
size_t vrn;
rid_t rid;
uint64_t tag;
 
/trunk/kernel/arch/arm32/include/types.h
53,8 → 53,6
typedef unsigned long long uint64_t;
 
typedef uint32_t size_t;
typedef uint32_t count_t;
typedef uint32_t index_t;
 
typedef uint32_t uintptr_t;
typedef uint32_t pfn_t;
69,8 → 67,6
 
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
/trunk/kernel/arch/arm32/include/mm/page.h
94,21 → 94,21
 
/* Get PTE flags accessors for each level. */
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
get_pt_level0_flags((pte_level0_t *) (ptl0), (index_t) (i))
get_pt_level0_flags((pte_level0_t *) (ptl0), (size_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
PAGE_PRESENT
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
PAGE_PRESENT
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
get_pt_level1_flags((pte_level1_t *) (ptl3), (index_t) (i))
get_pt_level1_flags((pte_level1_t *) (ptl3), (size_t) (i))
 
/* Set PTE flags accessors for each level. */
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
set_pt_level0_flags((pte_level0_t *) (ptl0), (index_t) (i), (x))
set_pt_level0_flags((pte_level0_t *) (ptl0), (size_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
set_pt_level1_flags((pte_level1_t *) (ptl3), (index_t) (i), (x))
set_pt_level1_flags((pte_level1_t *) (ptl3), (size_t) (i), (x))
 
/* Macros for querying the last-level PTE entries. */
#define PTE_VALID_ARCH(pte) \
204,7 → 204,7
* @param pt Level 0 page table.
* @param i Index of the entry to return.
*/
static inline int get_pt_level0_flags(pte_level0_t *pt, index_t i)
static inline int get_pt_level0_flags(pte_level0_t *pt, size_t i)
{
pte_level0_t *p = &pt[i];
int np = (p->descriptor_type == PTE_DESCRIPTOR_NOT_PRESENT);
219,7 → 219,7
* @param pt Level 1 page table.
* @param i Index of the entry to return.
*/
static inline int get_pt_level1_flags(pte_level1_t *pt, index_t i)
static inline int get_pt_level1_flags(pte_level1_t *pt, size_t i)
{
pte_level1_t *p = &pt[i];
 
244,7 → 244,7
* @param i index of the entry to be changed
* @param flags new flags
*/
static inline void set_pt_level0_flags(pte_level0_t *pt, index_t i, int flags)
static inline void set_pt_level0_flags(pte_level0_t *pt, size_t i, int flags)
{
pte_level0_t *p = &pt[i];
 
272,7 → 272,7
* @param i Index of the entry to be changed.
* @param flags New flags.
*/
static inline void set_pt_level1_flags(pte_level1_t *pt, index_t i, int flags)
static inline void set_pt_level1_flags(pte_level1_t *pt, size_t i, int flags)
{
pte_level1_t *p = &pt[i];
/trunk/kernel/arch/arm32/src/mm/tlb.c
80,7 → 80,7
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt)
{
unsigned int i;
 
/trunk/kernel/arch/ppc32/include/types.h
46,8 → 46,6
typedef unsigned long long uint64_t;
 
typedef uint32_t size_t;
typedef uint32_t count_t;
typedef uint32_t index_t;
 
typedef uint32_t uintptr_t;
typedef uint32_t pfn_t;
60,11 → 58,9
typedef struct {
} fncptr_t;
 
/**< Formats for uintptr_t, size_t, count_t and index_t */
/**< Formats for uintptr_t, size_t */
#define PRIp "x"
#define PRIs "u"
#define PRIc "u"
#define PRIi "u"
 
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
/trunk/kernel/arch/ppc32/include/mm/page.h
102,21 → 102,21
 
/* Get PTE flags accessors for each level. */
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
get_pt_flags((pte_t *) (ptl0), (index_t) (i))
get_pt_flags((pte_t *) (ptl0), (size_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
PAGE_PRESENT
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
PAGE_PRESENT
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
get_pt_flags((pte_t *) (ptl3), (index_t) (i))
get_pt_flags((pte_t *) (ptl3), (size_t) (i))
 
/* Set PTE flags accessors for each level. */
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl0), (size_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl3), (size_t) (i), (x))
 
/* Macros for querying the last-level PTEs. */
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0)
130,7 → 130,7
#include <mm/mm.h>
#include <arch/interrupt.h>
 
static inline int get_pt_flags(pte_t *pt, index_t i)
static inline int get_pt_flags(pte_t *pt, size_t i)
{
pte_t *p = &pt[i];
143,7 → 143,7
(p->global << PAGE_GLOBAL_SHIFT));
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
{
pte_t *p = &pt[i];
/trunk/kernel/arch/ppc32/src/mm/tlb.c
549,7 → 549,7
}
 
 
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
{
// TODO
tlb_invalidate_all();
/trunk/kernel/arch/ppc32/src/mm/frame.c
57,7 → 57,7
void frame_arch_init(void)
{
pfn_t minconf = 2;
count_t i;
size_t i;
pfn_t start, conf;
size_t size;
/trunk/kernel/arch/amd64/include/types.h
46,8 → 46,6
typedef unsigned long long uint64_t;
 
typedef uint64_t size_t;
typedef uint64_t count_t;
typedef uint64_t index_t;
 
typedef uint64_t uintptr_t;
typedef uint64_t pfn_t;
60,11 → 58,9
typedef struct {
} fncptr_t;
 
/**< Formats for uintptr_t, size_t, count_t and index_t */
/**< Formats for uintptr_t, size_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
/trunk/kernel/arch/amd64/include/proc/task.h
40,7 → 40,7
 
typedef struct {
/** I/O Permission bitmap Generation counter. */
count_t iomapver;
size_t iomapver;
/** I/O Permission bitmap. */
bitmap_t iomap;
} task_arch_t;
/trunk/kernel/arch/amd64/include/mm/page.h
112,33 → 112,33
#define SET_PTL0_ADDRESS_ARCH(ptl0) \
(write_cr3((uintptr_t) (ptl0)))
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) \
set_pt_addr((pte_t *) (ptl0), (index_t) (i), a)
set_pt_addr((pte_t *) (ptl0), (size_t) (i), a)
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) \
set_pt_addr((pte_t *) (ptl1), (index_t) (i), a)
set_pt_addr((pte_t *) (ptl1), (size_t) (i), a)
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) \
set_pt_addr((pte_t *) (ptl2), (index_t) (i), a)
set_pt_addr((pte_t *) (ptl2), (size_t) (i), a)
#define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) \
set_pt_addr((pte_t *) (ptl3), (index_t) (i), a)
set_pt_addr((pte_t *) (ptl3), (size_t) (i), a)
 
/* Get PTE flags accessors for each level. */
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
get_pt_flags((pte_t *) (ptl0), (index_t) (i))
get_pt_flags((pte_t *) (ptl0), (size_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
get_pt_flags((pte_t *) (ptl1), (index_t) (i))
get_pt_flags((pte_t *) (ptl1), (size_t) (i))
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
get_pt_flags((pte_t *) (ptl2), (index_t) (i))
get_pt_flags((pte_t *) (ptl2), (size_t) (i))
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
get_pt_flags((pte_t *) (ptl3), (index_t) (i))
get_pt_flags((pte_t *) (ptl3), (size_t) (i))
 
/* Set PTE flags accessors for each level. */
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl0), (size_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x) \
set_pt_flags((pte_t *) (ptl1), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl1), (size_t) (i), (x))
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) \
set_pt_flags((pte_t *) (ptl2), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl2), (size_t) (i), (x))
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl3), (size_t) (i), (x))
 
/* Macros for querying the last-level PTE entries. */
#define PTE_VALID_ARCH(p) \
176,7 → 176,7
*/
#define PFERR_CODE_ID (1 << 4)
 
static inline int get_pt_flags(pte_t *pt, index_t i)
static inline int get_pt_flags(pte_t *pt, size_t i)
{
pte_t *p = &pt[i];
189,7 → 189,7
p->global << PAGE_GLOBAL_SHIFT);
}
 
static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a)
static inline void set_pt_addr(pte_t *pt, size_t i, uintptr_t a)
{
pte_t *p = &pt[i];
 
197,7 → 197,7
p->addr_32_51 = a >> 32;
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
{
pte_t *p = &pt[i];
/trunk/kernel/arch/amd64/include/cpu.h
64,7 → 64,7
int stepping;
tss_t *tss;
count_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
size_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
} cpu_arch_t;
 
struct star_msr {
/trunk/kernel/arch/amd64/src/ddi/ddi.c
56,7 → 56,7
*/
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
count_t bits;
size_t bits;
bits = ioaddr + size;
if (bits > IO_PORTS)
98,7 → 98,7
/*
* Enable the range and we are done.
*/
bitmap_clear_range(&task->arch.iomap, (index_t) ioaddr, (count_t) size);
bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t) size);
/*
* Increment I/O Permission bitmap generation counter.
117,11 → 117,11
*/
void io_perm_bitmap_install(void)
{
count_t bits;
size_t bits;
ptr_16_64_t cpugdtr;
descriptor_t *gdt_p;
tss_descriptor_t *tss_desc;
count_t ver;
size_t ver;
/* First, copy the I/O Permission Bitmap. */
spinlock_lock(&TASK->lock);
/trunk/kernel/arch/amd64/src/interrupt.c
101,7 → 101,7
static void gp_fault(int n, istate_t *istate)
{
if (TASK) {
count_t ver;
size_t ver;
 
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;
/trunk/kernel/arch/mips32/include/types.h
46,8 → 46,6
typedef unsigned long long uint64_t;
 
typedef uint32_t size_t;
typedef uint32_t count_t;
typedef uint32_t index_t;
 
typedef uint32_t uintptr_t;
typedef uint32_t pfn_t;
62,8 → 60,6
 
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
/trunk/kernel/arch/mips32/include/arch.h
42,7 → 42,7
 
#include <typedefs.h>
 
extern count_t cpu_count;
extern size_t cpu_count;
 
typedef struct {
uintptr_t addr;
/trunk/kernel/arch/mips32/include/mm/page.h
112,21 → 112,21
 
/* Get PTE flags accessors for each level. */
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
get_pt_flags((pte_t *) (ptl0), (index_t) (i))
get_pt_flags((pte_t *) (ptl0), (size_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
PAGE_PRESENT
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
PAGE_PRESENT
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
get_pt_flags((pte_t *) (ptl3), (index_t) (i))
get_pt_flags((pte_t *) (ptl3), (size_t) (i))
 
/* Set PTE flags accessors for each level. */
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl0), (size_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl3), (size_t) (i), (x))
 
/* Last-level info macros. */
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0)
140,7 → 140,7
#include <mm/mm.h>
#include <arch/exception.h>
 
static inline int get_pt_flags(pte_t *pt, index_t i)
static inline int get_pt_flags(pte_t *pt, size_t i)
{
pte_t *p = &pt[i];
153,7 → 153,7
(p->g << PAGE_GLOBAL_SHIFT));
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
{
pte_t *p = &pt[i];
/trunk/kernel/arch/mips32/include/debugger.h
53,7 → 53,7
unative_t instruction; /**< Original instruction */
unative_t nextinstruction; /**< Original instruction following break */
int flags; /**< Flags regarding breakpoint */
count_t counter;
size_t counter;
void (*bkfunc)(void *b, istate_t *istate);
} bpinfo_t;
 
/trunk/kernel/arch/mips32/src/mm/tlb.c
560,7 → 560,7
* @param page First page whose TLB entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
{
unsigned int i;
ipl_t ipl;
/trunk/kernel/arch/mips32/src/mm/frame.c
62,7 → 62,7
pfn_t count;
} phys_region_t;
 
static count_t phys_regions_count = 0;
static size_t phys_regions_count = 0;
static phys_region_t phys_regions[MAX_REGIONS];
 
/** Check whether frame is available
119,7 → 119,7
/* Init tasks */
bool safe = true;
count_t i;
size_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(frame << ZERO_PAGE_WIDTH, ZERO_PAGE_SIZE,
KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
174,7 → 174,7
cp0_entry_lo1_write(0);
cp0_entry_hi_write(0);
 
count_t i;
size_t i;
for (i = 0; i < TLB_ENTRY_COUNT; i++) {
cp0_index_write(i);
tlbwi();
251,7 → 251,7
printf("Base Size\n");
printf("---------- ----------\n");
count_t i;
size_t i;
for (i = 0; i < phys_regions_count; i++) {
printf("%#010x %10u\n",
PFN2ADDR(phys_regions[i].start), PFN2ADDR(phys_regions[i].count));
/trunk/kernel/arch/mips32/src/mips32.c
76,7 → 76,7
/* Stack pointer saved when entering user mode */
uintptr_t supervisor_sp __attribute__ ((section (".text")));
 
count_t cpu_count = 0;
size_t cpu_count = 0;
 
/** Performs mips32-specific initialization before main_bsp() is called. */
void arch_pre_main(void *entry __attribute__((unused)), bootinfo_t *bootinfo)
84,7 → 84,7
/* Setup usermode */
init.cnt = bootinfo->cnt;
count_t i;
size_t i;
for (i = 0; i < min3(bootinfo->cnt, TASKMAP_MAX_RECORDS, CONFIG_INIT_TASKS); i++) {
init.tasks[i].addr = bootinfo->tasks[i].addr;
init.tasks[i].size = bootinfo->tasks[i].size;
/trunk/kernel/arch/ia32/include/types.h
46,8 → 46,6
typedef unsigned long long uint64_t;
 
typedef uint32_t size_t;
typedef uint32_t count_t;
typedef uint32_t index_t;
 
typedef uint32_t uintptr_t;
typedef uint32_t pfn_t;
62,8 → 60,6
 
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
/trunk/kernel/arch/ia32/include/smp/smp.h
39,10 → 39,10
 
/** SMP config opertaions interface. */
struct smp_config_operations {
count_t (* cpu_count)(void); /**< Return number of detected processors. */
bool (* cpu_enabled)(index_t i); /**< Check whether the processor of index i is enabled. */
bool (*cpu_bootstrap)(index_t i); /**< Check whether the processor of index i is BSP. */
uint8_t (*cpu_apic_id)(index_t i); /**< Return APIC ID of the processor of index i. */
size_t (* cpu_count)(void); /**< Return number of detected processors. */
bool (* cpu_enabled)(size_t i); /**< Check whether the processor of index i is enabled. */
bool (*cpu_bootstrap)(size_t i); /**< Check whether the processor of index i is BSP. */
uint8_t (*cpu_apic_id)(size_t i); /**< Return APIC ID of the processor of index i. */
int (*irq_to_pin)(unsigned int irq); /**< Return mapping between irq and APIC pin. */
};
 
/trunk/kernel/arch/ia32/include/proc/task.h
40,7 → 40,7
 
typedef struct {
/** I/O Permission bitmap Generation counter. */
count_t iomapver;
size_t iomapver;
/** I/O Permission bitmap. */
bitmap_t iomap;
} task_arch_t;
/trunk/kernel/arch/ia32/include/mm/page.h
95,21 → 95,21
 
/* Get PTE flags accessors for each level. */
#define GET_PTL1_FLAGS_ARCH(ptl0, i) \
get_pt_flags((pte_t *) (ptl0), (index_t) (i))
get_pt_flags((pte_t *) (ptl0), (size_t) (i))
#define GET_PTL2_FLAGS_ARCH(ptl1, i) \
PAGE_PRESENT
#define GET_PTL3_FLAGS_ARCH(ptl2, i) \
PAGE_PRESENT
#define GET_FRAME_FLAGS_ARCH(ptl3, i) \
get_pt_flags((pte_t *) (ptl3), (index_t) (i))
get_pt_flags((pte_t *) (ptl3), (size_t) (i))
 
/* Set PTE flags accessors for each level. */
#define SET_PTL1_FLAGS_ARCH(ptl0, i, x) \
set_pt_flags((pte_t *) (ptl0), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl0), (size_t) (i), (x))
#define SET_PTL2_FLAGS_ARCH(ptl1, i, x)
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x)
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) \
set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x))
set_pt_flags((pte_t *) (ptl3), (size_t) (i), (x))
 
/* Macros for querying the last level entries. */
#define PTE_VALID_ARCH(p) \
145,7 → 145,7
/** When bit on this position is 1, a reserved bit was set in page directory. */
#define PFERR_CODE_RSVD (1 << 3)
 
static inline int get_pt_flags(pte_t *pt, index_t i)
static inline int get_pt_flags(pte_t *pt, size_t i)
{
pte_t *p = &pt[i];
158,7 → 158,7
p->global << PAGE_GLOBAL_SHIFT);
}
 
static inline void set_pt_flags(pte_t *pt, index_t i, int flags)
static inline void set_pt_flags(pte_t *pt, size_t i, int flags)
{
pte_t *p = &pt[i];
/trunk/kernel/arch/ia32/include/cpu.h
57,7 → 57,7
unsigned int stepping;
tss_t *tss;
count_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
size_t iomapver_copy; /** Copy of TASK's I/O Permission bitmap generation count. */
} cpu_arch_t;
 
#endif
/trunk/kernel/arch/ia32/src/smp/mps.c
86,10 → 86,10
/*
* Implementation of IA-32 SMP configuration interface.
*/
static count_t get_cpu_count(void);
static bool is_cpu_enabled(index_t i);
static bool is_bsp(index_t i);
static uint8_t get_cpu_apic_id(index_t i);
static size_t get_cpu_count(void);
static bool is_cpu_enabled(size_t i);
static bool is_bsp(size_t i);
static uint8_t get_cpu_apic_id(size_t i);
static int mps_irq_to_pin(unsigned int irq);
 
struct smp_config_operations mps_config_operations = {
100,24 → 100,24
.irq_to_pin = mps_irq_to_pin
};
 
count_t get_cpu_count(void)
size_t get_cpu_count(void)
{
return processor_entry_cnt;
}
 
bool is_cpu_enabled(index_t i)
bool is_cpu_enabled(size_t i)
{
ASSERT(i < processor_entry_cnt);
return (bool) ((processor_entries[i].cpu_flags & 0x01) == 0x01);
}
 
bool is_bsp(index_t i)
bool is_bsp(size_t i)
{
ASSERT(i < processor_entry_cnt);
return (bool) ((processor_entries[i].cpu_flags & 0x02) == 0x02);
}
 
uint8_t get_cpu_apic_id(index_t i)
uint8_t get_cpu_apic_id(size_t i)
{
ASSERT(i < processor_entry_cnt);
return processor_entries[i].l_apic_id;
/trunk/kernel/arch/ia32/src/ddi/ddi.c
57,7 → 57,7
*/
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
count_t bits;
size_t bits;
 
bits = ioaddr + size;
if (bits > IO_PORTS)
99,7 → 99,7
/*
* Enable the range and we are done.
*/
bitmap_clear_range(&task->arch.iomap, (index_t) ioaddr, (count_t) size);
bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, (size_t) size);
 
/*
* Increment I/O Permission bitmap generation counter.
118,10 → 118,10
*/
void io_perm_bitmap_install(void)
{
count_t bits;
size_t bits;
ptr_16_32_t cpugdtr;
descriptor_t *gdt_p;
count_t ver;
size_t ver;
 
/* First, copy the I/O Permission Bitmap. */
spinlock_lock(&TASK->lock);
/trunk/kernel/arch/ia32/src/mm/tlb.c
59,7 → 59,7
* @param page Address of the first page whose entry is to be invalidated.
* @param cnt Number of entries to invalidate.
*/
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, count_t cnt)
void tlb_invalidate_pages(asid_t asid __attribute__((unused)), uintptr_t page, size_t cnt)
{
unsigned int i;
 
/trunk/kernel/arch/ia32/src/mm/frame.c
70,7 → 70,7
#endif
pfn_t pfn;
count_t count;
size_t count;
if (e820table[i].type == MEMMAP_MEMORY_AVAILABLE) {
/* To be safe, make available zone possibly smaller */
/trunk/kernel/arch/ia32/src/interrupt.c
101,7 → 101,7
static void gp_fault(int n __attribute__((unused)), istate_t *istate)
{
if (TASK) {
count_t ver;
size_t ver;
spinlock_lock(&TASK->lock);
ver = TASK->arch.iomapver;