Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3622 → Rev 3623

/branches/tracing/kernel/generic/include/proc/task.h
53,6 → 53,7
#include <mm/tlb.h>
#include <proc/scheduler.h>
#include <udebug/udebug.h>
#include <ipc/kbox.h>
 
#define TASK_NAME_BUFLEN 20
 
98,19 → 99,13
atomic_t active_calls;
 
#ifdef CONFIG_UDEBUG
/** Debugging stuff */
/** Debugging stuff. */
udebug_task_t udebug;
 
/** Kernel answerbox */
answerbox_t kernel_box;
/** Thread used to service kernel answerbox */
struct thread *kb_thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion */
mutex_t kb_cleanup_lock;
/** True if cleanup of kbox has already started */
bool kb_finished;
/** Kernel answerbox. */
kbox_t kb;
#endif
 
/** Architecture specific task data. */
task_arch_t arch;
/branches/tracing/kernel/generic/include/udebug/udebug.h
185,9 → 185,7
/** BEGIN operation in progress (waiting for threads to stop) */
UDEBUG_TS_BEGINNING,
/** Debugger fully connected */
UDEBUG_TS_ACTIVE,
/** Task is shutting down, no more debug activities allowed */
UDEBUG_TS_SHUTDOWN
UDEBUG_TS_ACTIVE
} udebug_task_state_t;
 
/** Debugging part of task_t structure.
232,7 → 230,7
unative_t a4, unative_t a5, unative_t a6, unative_t id, unative_t rc,
bool end_variant);
 
void udebug_thread_b_event(struct thread *t);
void udebug_thread_b_event_attach(struct thread *t, struct task *ta);
void udebug_thread_e_event(void);
 
void udebug_stoppable_begin(void);
/branches/tracing/kernel/generic/include/ipc/kbox.h
37,6 → 37,18
 
#include <typedefs.h>
 
/** Kernel answerbox structure. */
typedef struct kbox {
/** The answerbox itself. */
answerbox_t box;
/** Thread used to service the answerbox. */
struct thread *thread;
/** Kbox thread creation vs. begin of cleanup mutual exclusion. */
mutex_t cleanup_lock;
/** True if cleanup of kbox has already started. */
bool finished;
} kbox_t;
 
extern int ipc_connect_kbox(task_id_t);
extern void ipc_kbox_cleanup(void);
 
/branches/tracing/kernel/generic/src/proc/task.c
164,10 → 164,10
udebug_task_init(&ta->udebug);
 
/* Init kbox stuff */
ipc_answerbox_init(&ta->kernel_box, ta);
ta->kb_thread = NULL;
mutex_initialize(&ta->kb_cleanup_lock, MUTEX_PASSIVE);
ta->kb_finished = false;
ipc_answerbox_init(&ta->kb.box, ta);
ta->kb.thread = NULL;
mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE);
ta->kb.finished = false;
#endif
 
ipc_answerbox_init(&ta->answerbox, ta);
/branches/tracing/kernel/generic/src/proc/thread.c
763,14 → 763,20
return (unative_t) rc;
}
}
#ifdef CONFIG_UDEBUG
/*
* Generate udebug THREAD_B event and attach the thread.
* This must be done atomically (with the debug locks held),
* otherwise we would either miss some thread or receive
* THREAD_B events for threads that already existed
* and could be detected with THREAD_READ before.
*/
udebug_thread_b_event_attach(t, TASK);
#else
thread_attach(t, TASK);
#endif
thread_ready(t);
 
#ifdef CONFIG_UDEBUG
/* Generate udebug THREAD_B event */
udebug_thread_b_event(t);
#endif
 
return 0;
} else
free(kernel_uarg);
/branches/tracing/kernel/generic/src/syscall/syscall.c
103,9 → 103,7
unative_t rc;
 
#ifdef CONFIG_UDEBUG
istate_t fake_state;
 
THREAD->udebug.uspace_state = &fake_state;
THREAD->udebug.uspace_state = NULL;
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false);
#endif
if (id < SYSCALL_END) {
121,9 → 119,14
 
#ifdef CONFIG_UDEBUG
udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true);
THREAD->udebug.uspace_state = NULL;
#endif
 
/*
* Stopping point needed for tasks that only invoke non-blocking
* system calls.
*/
udebug_stoppable_begin();
udebug_stoppable_end();
#endif
return rc;
}
 
/branches/tracing/kernel/generic/src/ipc/kbox.c
48,14 → 48,20
ipl_t ipl;
bool have_kb_thread;
 
/* Only hold kb_cleanup_lock while setting kb_finished - this is enough */
mutex_lock(&TASK->kb_cleanup_lock);
TASK->kb_finished = true;
mutex_unlock(&TASK->kb_cleanup_lock);
/*
* Only hold kb.cleanup_lock while setting kb.finished -
* this is enough.
*/
mutex_lock(&TASK->kb.cleanup_lock);
TASK->kb.finished = true;
mutex_unlock(&TASK->kb.cleanup_lock);
 
have_kb_thread = (TASK->kb_thread != NULL);
have_kb_thread = (TASK->kb.thread != NULL);
 
/* From now on nobody will try to connect phones or attach kbox threads */
/*
* From now on nobody will try to connect phones or attach
* kbox threads
*/
 
/*
* Disconnect all phones connected to our kbox. Passing true for
63,7 → 69,7
* disconnected phone. This ensures the kbox thread is going to
* wake up and terminate.
*/
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread);
ipc_answerbox_slam_phones(&TASK->kb.box, have_kb_thread);
 
/*
* If the task was being debugged, clean up debugging session.
77,18 → 83,18
interrupts_restore(ipl);
if (have_kb_thread) {
LOG("join kb_thread..\n");
thread_join(TASK->kb_thread);
thread_detach(TASK->kb_thread);
LOG("join kb.thread..\n");
thread_join(TASK->kb.thread);
thread_detach(TASK->kb.thread);
LOG("join done\n");
TASK->kb_thread = NULL;
TASK->kb.thread = NULL;
}
 
/* Answer all messages in 'calls' and 'dispatched_calls' queues */
spinlock_lock(&TASK->kernel_box.lock);
ipc_cleanup_call_list(&TASK->kernel_box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kernel_box.calls);
spinlock_unlock(&TASK->kernel_box.lock);
/* Answer all messages in 'calls' and 'dispatched_calls' queues. */
spinlock_lock(&TASK->kb.box.lock);
ipc_cleanup_call_list(&TASK->kb.box.dispatched_calls);
ipc_cleanup_call_list(&TASK->kb.box.calls);
spinlock_unlock(&TASK->kb.box.lock);
}
 
/** Handle hangup message in kbox.
105,7 → 111,7
 
/* Was it our debugger, who hung up? */
if (call->sender == TASK->udebug.debugger) {
/* Terminate debugging session (if any) */
/* Terminate debugging session (if any). */
LOG("kbox: terminate debug session\n");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
118,7 → 124,7
 
LOG("kbox: continue with hangup message\n");
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
 
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
130,13 → 136,13
*/
 
/* Only detach kbox thread unless already terminating. */
mutex_lock(&TASK->kb_cleanup_lock);
if (&TASK->kb_finished == false) {
mutex_lock(&TASK->kb.cleanup_lock);
if (&TASK->kb.finished == false) {
/* Detach kbox thread so it gets freed from memory. */
thread_detach(TASK->kb_thread);
TASK->kb_thread = NULL;
thread_detach(TASK->kb.thread);
TASK->kb.thread = NULL;
}
mutex_unlock(&TASK->kb_cleanup_lock);
mutex_unlock(&TASK->kb.cleanup_lock);
 
LOG("phone list is empty\n");
*last = true;
166,7 → 172,7
done = false;
 
while (!done) {
call = ipc_wait_for_call(&TASK->kernel_box, SYNCH_NO_TIMEOUT,
call = ipc_wait_for_call(&TASK->kb.box, SYNCH_NO_TIMEOUT,
SYNCH_FLAGS_NONE);
 
if (call == NULL)
201,10 → 207,10
/**
* Connect phone to a task kernel-box specified by id.
*
* Note that this is not completely atomic. For optimisation reasons,
* The task might start cleaning up kbox after the phone has been connected
* and before a kbox thread has been created. This must be taken into account
* in the cleanup code.
* Note that this is not completely atomic. For optimisation reasons, the task
* might start cleaning up kbox after the phone has been connected and before
* a kbox thread has been created. This must be taken into account in the
* cleanup code.
*
* @return Phone id on success, or negative error code.
*/
230,44 → 236,45
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
mutex_lock(&ta->kb_cleanup_lock);
mutex_lock(&ta->kb.cleanup_lock);
 
if (atomic_predec(&ta->refcount) == 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
task_destroy(ta);
return ENOENT;
}
 
if (ta->kb_finished != false) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.finished != false) {
mutex_unlock(&ta->kb.cleanup_lock);
return EINVAL;
}
 
newphid = phone_alloc();
if (newphid < 0) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ELIMIT;
}
 
/* Connect the newly allocated phone to the kbox */
ipc_phone_connect(&TASK->phones[newphid], &ta->kernel_box);
ipc_phone_connect(&TASK->phones[newphid], &ta->kb.box);
 
if (ta->kb_thread != NULL) {
mutex_unlock(&ta->kb_cleanup_lock);
if (ta->kb.thread != NULL) {
mutex_unlock(&ta->kb.cleanup_lock);
return newphid;
}
 
/* Create a kbox thread */
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0, "kbox", false);
kb_thread = thread_create(kbox_thread_proc, NULL, ta, 0,
"kbox", false);
if (!kb_thread) {
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
return ENOMEM;
}
 
ta->kb_thread = kb_thread;
ta->kb.thread = kb_thread;
thread_ready(kb_thread);
 
mutex_unlock(&ta->kb_cleanup_lock);
mutex_unlock(&ta->kb.cleanup_lock);
 
return newphid;
}
/branches/tracing/kernel/generic/src/udebug/udebug_ipc.c
132,7 → 132,7
rc = udebug_begin(call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
142,7 → 142,7
*/
if (rc != 0) {
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
}
 
158,7 → 158,7
rc = udebug_end();
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a SET_EVMASK call.
175,7 → 175,7
rc = udebug_set_evmask(mask);
 
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
 
194,7 → 194,7
rc = udebug_go(t, call);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
213,7 → 213,7
 
rc = udebug_stop(t, call);
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process a THREAD_READ call.
241,7 → 241,7
rc = udebug_thread_read(&buffer, buf_size, &n);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
268,7 → 268,7
IPC_SET_ARG3(call->data, total_bytes);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an ARGS_READ call.
288,7 → 288,7
rc = udebug_args_read(t, &buffer);
if (rc != EOK) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
306,7 → 306,7
IPC_SET_ARG2(call->data, 6 * sizeof(unative_t));
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
static void udebug_receive_regs_read(call_t *call)
325,7 → 325,7
rc = udebug_regs_read(t, buffer);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
345,7 → 345,7
 
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
static void udebug_receive_regs_write(call_t *call)
360,7 → 360,7
rc = udebug_regs_write(t, call->buffer);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
370,7 → 370,7
free(call->buffer);
call->buffer = NULL;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
/** Process an MEM_READ call.
393,7 → 393,7
rc = udebug_mem_read(uspace_src, size, &buffer);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
405,7 → 405,7
IPC_SET_ARG2(call->data, size);
call->buffer = buffer;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
static void udebug_receive_mem_write(call_t *call)
422,7 → 422,7
rc = udebug_mem_write(uspace_dst, call->buffer, size);
if (rc < 0) {
IPC_SET_RETVAL(call->data, rc);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
 
430,7 → 430,7
free(call->buffer);
call->buffer = NULL;
 
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
}
 
 
455,7 → 455,7
*/
if (TASK->udebug.debugger != call->sender) {
IPC_SET_RETVAL(call->data, EINVAL);
ipc_answer(&TASK->kernel_box, call);
ipc_answer(&TASK->kb.box, call);
return;
}
}
/branches/tracing/kernel/generic/src/udebug/udebug.c
305,15 → 305,23
udebug_wait_for_go(&THREAD->udebug.go_wq);
}
 
/** Thread-creation event hook.
/** Thread-creation event hook combined with attaching the thread.
*
* Must be called when a new userspace thread is created in the debugged
* task. Generates a THREAD_B event.
* task. Generates a THREAD_B event. Also attaches the thread @a t
* to the task @a ta.
*
* This is necessary to avoid a race condition where the BEGIN and THREAD_READ
* requests would be handled inbetween attaching the thread and checking it
* for being in a debugging session to send the THREAD_B event. We could then
* either miss threads or get some threads both in the thread list
* and get a THREAD_B event for them.
*
* @param t Structure of the thread being created. Not locked, as the
* thread is not executing yet.
* @param ta Task to which the thread should be attached.
*/
void udebug_thread_b_event(struct thread *t)
void udebug_thread_b_event_attach(struct thread *t, struct task *ta)
{
call_t *call;
 
320,6 → 328,8
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
thread_attach(t, ta);
 
LOG("udebug_thread_b_event\n");
LOG("- check state\n");
 
/branches/tracing/kernel/arch/ia64/include/bootinfo.h
33,6 → 33,13
 
#define CONFIG_INIT_TASKS 32
 
#define MEMMAP_ITEMS 128
 
#define EFI_MEMMAP_FREE_MEM 0
#define EFI_MEMMAP_IO 1
#define EFI_MEMMAP_IO_PORTS 2
 
 
typedef struct {
void *addr;
unsigned long size;
43,10 → 50,19
binit_task_t tasks[CONFIG_INIT_TASKS];
} binit_t;
 
typedef struct {
unsigned int type;
unsigned long base;
unsigned long size;
}efi_memmap_item_t;
 
 
typedef struct {
binit_t taskmap;
 
efi_memmap_item_t memmap[MEMMAP_ITEMS];
unsigned int memmap_items;
 
unsigned long * sapic;
unsigned long sys_freq;
unsigned long freq_scale;
/branches/tracing/kernel/arch/ia64/src/smp/smp.c
87,7 → 87,6
myid=ia64_get_cpu_id();
myeid=ia64_get_cpu_eid();
 
printf("Not sending to ID:%d,EID:%d",myid,myeid);
for(id=0;id<256;id++)
for(eid=0;eid<256;eid++)
115,7 → 114,6
for(eid=0;eid<256;eid++)
if(cpu_by_id_eid_list[id][eid]==1){
config.cpu_count++;
printf("Found CPU ID:%d EDI:%d\n",id,eid);
cpu_by_id_eid_list[id][eid]=2;
 
}
/branches/tracing/kernel/arch/ia64/src/mm/frame.c
36,14 → 36,20
#include <mm/frame.h>
#include <config.h>
#include <panic.h>
#include <arch/bootinfo.h>
#include <align.h>
#include <macros.h>
 
/*
* This is Ski-specific and certainly not sufficient
* for real ia64 systems that provide memory map.
*/
#define MEMORY_SIZE (64 * 1024 * 1024)
#define MEMORY_SIZE (256 * 1024 * 1024)
#define MEMORY_BASE (0 * 64 * 1024 * 1024)
 
#define KERNEL_RESERVED_AREA_BASE (0x4400000)
#define KERNEL_RESERVED_AREA_SIZE (16*1024*1024)
 
#define ONE_TO_ONE_MAPPING_SIZE (256*1048576) // Mapped at start
 
#define ROM_BASE 0xa0000 //For ski
50,22 → 56,41
#define ROM_SIZE (384 * 1024) //For ski
void poke_char(int x,int y,char ch, char c);
 
#define MIN_ZONE_SIZE (64*1024)
 
uintptr_t last_frame;
#define MINCONF 1
 
void frame_arch_init(void)
{
 
if(config.cpu_active==1)
{
zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
if(config.cpu_active==1){
unsigned int i;
for(i=0;i<bootinfo->memmap_items;i++){
if (bootinfo->memmap[i].type==EFI_MEMMAP_FREE_MEM){
uint64_t base=bootinfo->memmap[i].base;
uint64_t size=bootinfo->memmap[i].size;
uint64_t abase=ALIGN_UP(base,FRAME_SIZE);
if(size>FRAME_SIZE) size -=abase-base;
 
if(size>MIN_ZONE_SIZE) {
zone_create(abase >> FRAME_WIDTH, (size) >> FRAME_WIDTH, max(MINCONF,((abase) >> FRAME_WIDTH)), 0);
}
}
}
//zone_create(MEMORY_BASE >> FRAME_WIDTH, SIZE2FRAMES(MEMORY_SIZE), (MEMORY_SIZE) >> FRAME_WIDTH, 0);
/*
* Blacklist ROM regions.
*/
//frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
frame_mark_unavailable(ADDR2PFN(ROM_BASE), SIZE2FRAMES(ROM_SIZE));
 
frame_mark_unavailable(ADDR2PFN(0), SIZE2FRAMES(1048576));
last_frame=SIZE2FRAMES((VRN_KERNEL<<VRN_SHIFT)+ONE_TO_ONE_MAPPING_SIZE);
frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE));
}
}
 
/branches/tracing/uspace/srv/fs/fat/fat_idx.c
214,7 → 214,7
};
 
/** Allocate a VFS index which is not currently in use. */
static bool fat_idx_alloc(dev_handle_t dev_handle, fs_index_t *index)
static bool fat_index_alloc(dev_handle_t dev_handle, fs_index_t *index)
{
unused_t *u;
276,7 → 276,7
}
 
/** Free a VFS index, which is no longer in use. */
static void fat_idx_free(dev_handle_t dev_handle, fs_index_t index)
static void fat_index_free(dev_handle_t dev_handle, fs_index_t index)
{
unused_t *u;
 
338,7 → 338,7
futex_up(&unused_futex);
}
 
static fat_idx_t *fat_idx_get_new_core(dev_handle_t dev_handle)
static fat_idx_t *fat_idx_create(dev_handle_t dev_handle)
{
fat_idx_t *fidx;
 
345,7 → 345,7
fidx = (fat_idx_t *) malloc(sizeof(fat_idx_t));
if (!fidx)
return NULL;
if (!fat_idx_alloc(dev_handle, &fidx->index)) {
if (!fat_index_alloc(dev_handle, &fidx->index)) {
free(fidx);
return NULL;
}
366,7 → 366,7
fat_idx_t *fidx;
 
futex_down(&used_futex);
fidx = fat_idx_get_new_core(dev_handle);
fidx = fat_idx_create(dev_handle);
if (!fidx) {
futex_up(&used_futex);
return NULL;
400,7 → 400,7
if (l) {
fidx = hash_table_get_instance(l, fat_idx_t, uph_link);
} else {
fidx = fat_idx_get_new_core(dev_handle);
fidx = fat_idx_create(dev_handle);
if (!fidx) {
futex_up(&used_futex);
return NULL;
444,6 → 444,33
return fidx;
}
 
/** Destroy the index structure.
*
* @param idx The index structure to be destroyed.
*/
void fat_idx_destroy(fat_idx_t *idx)
{
unsigned long ikey[] = {
[UIH_DH_KEY] = idx->dev_handle,
[UIH_INDEX_KEY] = idx->index,
};
 
assert(idx->pfc == FAT_CLST_RES0);
 
futex_down(&used_futex);
/*
* Since we can only free unlinked nodes, the index structure is not
* present in the position hash (uph). We therefore hash it out from
* the index hash only.
*/
hash_table_remove(&ui_hash, ikey, 2);
futex_up(&used_futex);
/* Release the VFS index. */
fat_index_free(idx->dev_handle, idx->index);
/* Deallocate the structure. */
free(idx);
}
 
int fat_idx_init(void)
{
if (!hash_table_create(&up_hash, UPH_BUCKETS, 3, &uph_ops))
/branches/tracing/uspace/srv/fs/fat/fat.h
203,10 → 203,12
extern void fat_read(ipc_callid_t, ipc_call_t *);
extern void fat_write(ipc_callid_t, ipc_call_t *);
extern void fat_truncate(ipc_callid_t, ipc_call_t *);
extern void fat_destroy(ipc_callid_t, ipc_call_t *);
 
extern fat_idx_t *fat_idx_get_new(dev_handle_t);
extern fat_idx_t *fat_idx_get_by_pos(dev_handle_t, fat_cluster_t, unsigned);
extern fat_idx_t *fat_idx_get_by_index(dev_handle_t, fs_index_t);
extern void fat_idx_destroy(fat_idx_t *);
 
extern int fat_idx_init(void);
extern void fat_idx_fini(void);
/branches/tracing/uspace/srv/fs/fat/fat_ops.c
216,8 → 216,31
return nodep;
}
 
/*
* Forward declarations of FAT libfs operations.
*/
static void *fat_node_get(dev_handle_t, fs_index_t);
static void fat_node_put(void *);
static void *fat_create_node(dev_handle_t, int);
static int fat_destroy_node(void *);
static bool fat_link(void *, void *, const char *);
static int fat_unlink(void *, void *);
static void *fat_match(void *, const char *);
static fs_index_t fat_index_get(void *);
static size_t fat_size_get(void *);
static unsigned fat_lnkcnt_get(void *);
static bool fat_has_children(void *);
static void *fat_root_get(dev_handle_t);
static char fat_plb_get_char(unsigned);
static bool fat_is_directory(void *);
static bool fat_is_file(void *node);
 
/*
* FAT libfs operations.
*/
 
/** Instantiate a FAT in-core node. */
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
{
void *node;
fat_idx_t *idxp;
231,7 → 254,7
return node;
}
 
static void fat_node_put(void *node)
void fat_node_put(void *node)
{
fat_node_t *nodep = (fat_node_t *)node;
bool destroy = false;
257,7 → 280,7
free(node);
}
 
static void *fat_create_node(dev_handle_t dev_handle, int flags)
void *fat_create_node(dev_handle_t dev_handle, int flags)
{
fat_idx_t *idxp;
fat_node_t *nodep;
288,22 → 311,47
return nodep;
}
 
static int fat_destroy_node(void *node)
int fat_destroy_node(void *node)
{
return ENOTSUP; /* not supported at the moment */
fat_node_t *nodep = (fat_node_t *)node;
fat_bs_t *bs;
 
/*
* The node is not reachable from the file system. This means that the
* link count should be zero and that the index structure cannot be
* found in the position hash. Obviously, we don't need to lock the node
* nor its index structure.
*/
assert(nodep->lnkcnt == 0);
 
/*
* The node may not have any children.
*/
assert(fat_has_children(node) == false);
 
bs = block_bb_get(nodep->idx->dev_handle);
if (nodep->firstc != FAT_CLST_RES0) {
assert(nodep->size);
/* Free all clusters allocated to the node. */
fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
}
 
fat_idx_destroy(nodep->idx);
free(nodep);
return EOK;
}
 
static bool fat_link(void *prnt, void *chld, const char *name)
bool fat_link(void *prnt, void *chld, const char *name)
{
return false; /* not supported at the moment */
}
 
static int fat_unlink(void *prnt, void *chld)
int fat_unlink(void *prnt, void *chld)
{
return ENOTSUP; /* not supported at the moment */
}
 
static void *fat_match(void *prnt, const char *component)
void *fat_match(void *prnt, const char *component)
{
fat_bs_t *bs;
fat_node_t *parentp = (fat_node_t *)prnt;
370,7 → 418,7
return NULL;
}
 
static fs_index_t fat_index_get(void *node)
fs_index_t fat_index_get(void *node)
{
fat_node_t *fnodep = (fat_node_t *)node;
if (!fnodep)
378,17 → 426,17
return fnodep->idx->index;
}
 
static size_t fat_size_get(void *node)
size_t fat_size_get(void *node)
{
return ((fat_node_t *)node)->size;
}
 
static unsigned fat_lnkcnt_get(void *node)
unsigned fat_lnkcnt_get(void *node)
{
return ((fat_node_t *)node)->lnkcnt;
}
 
static bool fat_has_children(void *node)
bool fat_has_children(void *node)
{
fat_bs_t *bs;
fat_node_t *nodep = (fat_node_t *)node;
438,22 → 486,22
return false;
}
 
static void *fat_root_get(dev_handle_t dev_handle)
void *fat_root_get(dev_handle_t dev_handle)
{
return fat_node_get(dev_handle, 0);
}
 
static char fat_plb_get_char(unsigned pos)
char fat_plb_get_char(unsigned pos)
{
return fat_reg.plb_ro[pos % PLB_SIZE];
}
 
static bool fat_is_directory(void *node)
bool fat_is_directory(void *node)
{
return ((fat_node_t *)node)->type == FAT_DIRECTORY;
}
 
static bool fat_is_file(void *node)
bool fat_is_file(void *node)
{
return ((fat_node_t *)node)->type == FAT_FILE;
}
838,6 → 886,22
return;
}
 
void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
int rc;
 
fat_node_t *nodep = fat_node_get(dev_handle, index);
if (!nodep) {
ipc_answer_0(rid, ENOENT);
return;
}
 
rc = fat_destroy_node(nodep);
ipc_answer_0(rid, rc);
}
 
/**
* @}
*/
/branches/tracing/boot/arch/ia64/loader/gefi/HelenOS/hello.c
5,8 → 5,14
 
#define KERNEL_LOAD_ADDRESS 0x4400000
 
#define MEM_MAP_DESCRIPTOR_OFFSET_TYPE 0
#define MEM_MAP_DESCRIPTOR_OFFSET_BASE 8
#define MEM_MAP_DESCRIPTOR_OFFSET_PAGES 24
 
 
 
//Link image as a data array into hello - usefull with network boot
//#define IMAGE_LINKED
#define IMAGE_LINKED
 
bootinfo_t *bootinfo=(bootinfo_t *)BOOTINFO_ADDRESS;
 
177,25 → 183,21
//bootinfo->sapic=sapic;
 
 
int wakeup_intno;
wakeup_intno=0xf0;
UINT64 wakeup_intno;
LibGetSalWakeupVector(&wakeup_intno);
Print (L"WAKEUP INTNO:%X\n", wakeup_intno);
//bootinfo->wakeup_intno=wakeup_intno;
 
 
 
 
 
{
UINTN cookie;
void *p=(void *)KERNEL_LOAD_ADDRESS;
UINTN mapsize,descsize;
UINT32 desver;
EFI_STATUS status;
EFI_MEMORY_DESCRIPTOR emd[1024];
mapsize=1024*sizeof(emd);
status=BS->AllocatePages(AllocateAnyPages,EfiLoaderData,/*(HOSSize>>12)+1*/ 1,p);
if(EFI_ERROR(status)){
206,10 → 208,18
return EFI_SUCCESS;
}
status=BS->GetMemoryMap(&mapsize,emd,&cookie,&descsize,&desver);
if(EFI_ERROR(status)){
Print(L"Error 1\n");
return EFI_SUCCESS;
UINTN no_entryes;
void * mds;
mds=LibMemoryMap(&no_entryes,&cookie,&descsize,&desver);
for(i=0;i<no_entryes;i++)
{
unsigned int type=*((unsigned int *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_TYPE));
unsigned long long base=*((unsigned long long *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_BASE));
unsigned long long pages=*((unsigned long long *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_PAGES));
Print(L"T:%02d %016llX %016llX\n",type,base,pages*EFI_PAGE_SIZE);
}
status=BS->ExitBootServices(image,cookie);
if(EFI_ERROR(status)){
217,7 → 227,7
return EFI_SUCCESS;
}
}
int a;
for(a=0;a<HOSSize;a++){
227,6 → 237,51
bootinfo->wakeup_intno=wakeup_intno;
bootinfo->sys_freq=sys_freq;
bootinfo->freq_scale=freq_scale;
 
 
bootinfo->memmap_items=0;
for(i=0;i<no_entryes;i++)
{
unsigned int type=*((unsigned int *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_TYPE));
unsigned long long base=*((unsigned long long *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_BASE));
unsigned long long pages=*((unsigned long long *)(mds+i*descsize+MEM_MAP_DESCRIPTOR_OFFSET_PAGES));
switch (type)
{
case EfiConventionalMemory:
bootinfo->memmap[bootinfo->memmap_items].type=EFI_MEMMAP_FREE_MEM;
bootinfo->memmap[bootinfo->memmap_items].base=base;
bootinfo->memmap[bootinfo->memmap_items].size=pages*EFI_PAGE_SIZE;
bootinfo->memmap_items++;
break;
case EfiMemoryMappedIO:
bootinfo->memmap[bootinfo->memmap_items].type=EFI_MEMMAP_IO;
bootinfo->memmap[bootinfo->memmap_items].base=base;
bootinfo->memmap[bootinfo->memmap_items].size=pages*EFI_PAGE_SIZE;
bootinfo->memmap_items++;
break;
case EfiMemoryMappedIOPortSpace:
bootinfo->memmap[bootinfo->memmap_items].type=EFI_MEMMAP_IO_PORTS;
bootinfo->memmap[bootinfo->memmap_items].base=base;
bootinfo->memmap[bootinfo->memmap_items].size=pages*EFI_PAGE_SIZE;
bootinfo->memmap_items++;
break;
default :
break;
}
 
}
 
 
//Run Kernel
asm volatile(