/branches/dynload/kernel/generic/src/lib/elf.c |
---|
101,6 → 101,10 |
if (header->e_type != ET_EXEC) |
return EE_UNSUPPORTED; |
/* Check if the ELF image starts on a page boundary */ |
if (ALIGN_UP((uintptr_t)header, PAGE_SIZE) != (uintptr_t)header) |
return EE_UNSUPPORTED; |
/* Walk through all segment headers and process them. */ |
for (i = 0; i < header->e_phnum; i++) { |
elf_segment_header_t *seghdr; |
193,6 → 197,8 |
as_area_t *a; |
int flags = 0; |
mem_backend_data_t backend_data; |
uintptr_t base; |
size_t mem_sz; |
backend_data.elf = elf; |
backend_data.segment = entry; |
212,13 → 218,14 |
flags |= AS_AREA_READ; |
flags |= AS_AREA_CACHEABLE; |
/* |
* Check if the virtual address starts on page boundary. |
/* |
* Align vaddr down, inserting a little "gap" at the beginning. |
* Adjust area size, so that its end remains in place. |
*/ |
if (ALIGN_UP(entry->p_vaddr, PAGE_SIZE) != entry->p_vaddr) |
return EE_UNSUPPORTED; |
base = ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE); |
mem_sz = entry->p_memsz + (entry->p_vaddr - base); |
a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, |
a = as_area_create(as, flags, mem_sz, base, |
AS_AREA_ATTR_NONE, &elf_backend, &backend_data); |
if (!a) |
return EE_MEMORY; |
/branches/dynload/kernel/generic/src/mm/backend_elf.c |
---|
79,7 → 79,7 |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
btree_node_t *leaf; |
uintptr_t base, frame; |
uintptr_t base, frame, page, start_anon; |
index_t i; |
bool dirty = false; |
86,12 → 86,18 |
if (!as_area_check_access(area, access)) |
return AS_PF_FAULT; |
ASSERT((addr >= entry->p_vaddr) && |
ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && |
(addr < entry->p_vaddr + entry->p_memsz)); |
i = (addr - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; |
base = (uintptr_t) |
(((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); |
/* Virtual address of faulting page*/ |
page = ALIGN_DOWN(addr, PAGE_SIZE); |
/* Virtual address of the end of initialized part of segment */ |
start_anon = entry->p_vaddr + entry->p_filesz; |
if (area->sh_info) { |
bool found = false; |
98,10 → 104,10 |
/* |
* The address space area is shared. |
*/ |
mutex_lock(&area->sh_info->lock); |
frame = (uintptr_t) btree_search(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); |
page - area->base, &leaf); |
if (!frame) { |
unsigned int i; |
110,8 → 116,7 |
*/ |
for (i = 0; i < leaf->keys; i++) { |
if (leaf->key[i] == |
ALIGN_DOWN(addr, PAGE_SIZE)) { |
if (leaf->key[i] == page) { |
found = true; |
break; |
} |
121,21 → 126,18 |
frame_reference_add(ADDR2PFN(frame)); |
page_mapping_insert(AS, addr, frame, |
as_area_get_flags(area)); |
if (!used_space_insert(area, |
ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
if (!used_space_insert(area, page, 1)) |
panic("Could not insert used space.\n"); |
mutex_unlock(&area->sh_info->lock); |
return AS_PF_OK; |
} |
} |
/* |
* The area is either not shared or the pagemap does not contain the |
* mapping. |
*/ |
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < |
entry->p_vaddr + entry->p_filesz) { |
if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { |
/* |
* Initialized portion of the segment. The memory is backed |
* directly by the content of the ELF image. Pages are |
149,19 → 151,10 |
memcpy((void *) PA2KA(frame), |
(void *) (base + i * FRAME_SIZE), FRAME_SIZE); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
frame = KA2PA(base + i*FRAME_SIZE); |
frame = KA2PA(base + i * FRAME_SIZE); |
} |
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
} else if (page >= start_anon) { |
/* |
* This is the uninitialized portion of the segment. |
* It is not physically present in the ELF image. |
171,42 → 164,45 |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
dirty = true; |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
} else { |
size_t size; |
size_t pad_lo, pad_hi; |
/* |
* The mixed case. |
* The lower part is backed by the ELF image and |
* the upper part is anonymous memory. |
* |
* The middle part is backed by the ELF image and |
* the lower and upper parts are anonymous memory. |
* (The segment can be and often is shorter than 1 page). |
*/ |
size = entry->p_filesz - (i<<PAGE_WIDTH); |
if (page < entry->p_vaddr) |
pad_lo = entry->p_vaddr - page; |
else |
pad_lo = 0; |
if (start_anon < page + PAGE_SIZE) |
pad_hi = page + PAGE_SIZE - start_anon; |
else |
pad_hi = 0; |
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); |
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); |
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE), |
size); |
memcpy((void *) (PA2KA(frame) + pad_lo), |
(void *) (base + i * FRAME_SIZE + pad_lo), |
FRAME_SIZE - pad_lo - pad_hi); |
memsetb(PA2KA(frame), pad_lo, 0); |
memsetb(PA2KA(frame) + FRAME_SIZE - pad_hi, pad_hi, 0); |
dirty = true; |
} |
if (area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, |
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, |
(void *) frame, leaf); |
} |
if (dirty && area->sh_info) { |
frame_reference_add(ADDR2PFN(frame)); |
btree_insert(&area->sh_info->pagemap, page - area->base, |
(void *) frame, leaf); |
} |
} |
if (area->sh_info) |
mutex_unlock(&area->sh_info->lock); |
page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); |
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) |
if (!used_space_insert(area, page, 1)) |
panic("Could not insert used space.\n"); |
return AS_PF_OK; |
225,17 → 221,17 |
{ |
elf_header_t *elf = area->backend_data.elf; |
elf_segment_header_t *entry = area->backend_data.segment; |
uintptr_t base; |
uintptr_t base, start_anon; |
index_t i; |
ASSERT((page >= entry->p_vaddr) && |
ASSERT((page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && |
(page < entry->p_vaddr + entry->p_memsz)); |
i = (page - entry->p_vaddr) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + entry->p_offset); |
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); |
if (page + PAGE_SIZE < |
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { |
i = (page - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; |
base = (uintptr_t) (((void *) elf) + |
ALIGN_DOWN(entry->p_offset, FRAME_SIZE)); |
start_anon = entry->p_vaddr + entry->p_filesz; |
if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { |
if (entry->p_flags & PF_W) { |
/* |
* Free the frame with the copy of writable segment |
304,7 → 300,8 |
* by the ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + count * PAGE_SIZE <= start_anon) |
if (base >= entry->p_vaddr && |
base + count * PAGE_SIZE <= start_anon) |
continue; |
for (j = 0; j < count; j++) { |
315,7 → 312,8 |
* ELF image. |
*/ |
if (!(area->flags & AS_AREA_WRITE)) |
if (base + (j + 1) * PAGE_SIZE <= |
if (base >= entry->p_vaddr && |
base + (j + 1) * PAGE_SIZE <= |
start_anon) |
continue; |
/branches/dynload/uspace/lib/libfs/libfs.c |
---|
155,10 → 155,10 |
if (ops->plb_get_char(next) == '/') |
next++; /* eat slash */ |
while (ops->has_children(cur) && next <= last) { |
while (next <= last && ops->has_children(cur)) { |
/* collect the component */ |
len = 0; |
while ((ops->plb_get_char(next) != '/') && (next <= last)) { |
while ((next <= last) && (ops->plb_get_char(next) != '/')) { |
if (len + 1 == NAME_MAX) { |
/* comopnent length overflow */ |
ipc_answer_0(rid, ENAMETOOLONG); |
197,8 → 197,10 |
index); |
if (nodep) { |
if (!ops->link(cur, nodep, component)) { |
if (lflag & L_CREATE) |
(void)ops->destroy(nodep); |
if (lflag & L_CREATE) { |
(void)ops->destroy( |
nodep); |
} |
ipc_answer_0(rid, ENOSPC); |
} else { |
ipc_answer_5(rid, EOK, |
232,7 → 234,7 |
} |
/* handle miss: excessive components */ |
if (!ops->has_children(cur) && next <= last) { |
if (next <= last && !ops->has_children(cur)) { |
if (lflag & (L_CREATE | L_LINK)) { |
if (!ops->is_directory(cur)) { |
ipc_answer_0(rid, ENOTDIR); |
/branches/dynload/uspace/lib/libc/arch/sparc64/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x4000; |
. = 0x4000 + SIZEOF_HEADERS; |
.init ALIGN(0x4000) : SUBALIGN(0x4000) { |
.init : { |
*(.init); |
} :text |
.text : { |
16,12 → 16,14 |
*(.text); |
*(.rodata*); |
} :text |
.got ALIGN(0x4000) : SUBALIGN(0x4000) { |
. = . + 0x4000; |
.got : { |
_gp = .; |
*(.got*); |
} :data |
.data ALIGN(0x4000) : SUBALIGN(0x4000) { |
.data : { |
*(.data); |
*(.sdata); |
} :data |
/branches/dynload/uspace/lib/libc/arch/ia64/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x4000; |
. = 0x4000 + SIZEOF_HEADERS; |
.init ALIGN(0x4000): SUBALIGN(0x4000) { |
.init : { |
*(.init); |
} : text |
.text : { |
17,7 → 17,9 |
*(.rodata*); |
} :text |
.got ALIGN(0x4000) : SUBALIGN(0x4000) { |
. = . + 0x4000; |
.got : { |
_gp = .; |
*(.got*); |
} :data |
/branches/dynload/uspace/lib/libc/arch/arm32/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x1000; |
. = 0x1000 + SIZEOF_HEADERS; |
.init ALIGN(0x1000): SUBALIGN(0x1000) { |
.init : { |
*(.init); |
} : text |
.text : { |
16,8 → 16,10 |
*(.text); |
*(.rodata*); |
} :text |
.data ALIGN(0x1000) : SUBALIGN(0x1000) { |
. = . + 0x1000; |
.data : { |
*(.opd); |
*(.data .data.*); |
*(.sdata); |
/branches/dynload/uspace/lib/libc/arch/ppc32/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x1000; |
. = 0x1000 + SIZEOF_HEADERS; |
.init ALIGN(0x1000) : SUBALIGN(0x1000) { |
.init : { |
*(.init); |
} :text |
.text : { |
16,8 → 16,10 |
*(.text); |
*(.rodata*); |
} :text |
.data ALIGN(0x1000) : SUBALIGN(0x1000) { |
. = . + 0x1000; |
.data : { |
*(.data); |
*(.sdata); |
} :data |
/branches/dynload/uspace/lib/libc/arch/amd64/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x1000; |
. = 0x1000 + SIZEOF_HEADERS; |
.init ALIGN(0x1000) : SUBALIGN(0x1000) { |
.init : { |
*(.init); |
} :text |
.text : { |
16,8 → 16,10 |
*(.text); |
*(.rodata*); |
} :text |
.data ALIGN(0x1000) : SUBALIGN(0x1000) { |
. = . + 0x1000; |
.data : { |
*(.data); |
} :data |
.tdata : { |
/branches/dynload/uspace/lib/libc/arch/ppc64/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x1000; |
. = 0x1000 + SIZEOF_HEADERS; |
.init ALIGN(0x1000) : SUBALIGN(0x1000) { |
.init : { |
*(.init); |
} :text |
.text : { |
17,8 → 17,10 |
*(.toc); |
*(.rodata*); |
} :text |
.data ALIGN(0x1000) : SUBALIGN(0x1000) { |
. = . + 0x1000; |
.data : { |
*(.opd); |
*(.data*); |
*(.sdata); |
/branches/dynload/uspace/lib/libc/arch/mips32/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x4000; |
. = 0x4000 + SIZEOF_HEADERS; |
.init ALIGN(0x4000) : SUBALIGN(0x4000) { |
.init : { |
*(.init); |
} :text |
.text : { |
17,6 → 17,8 |
*(.rodata*); |
} :text |
. = . + 0x4000; |
.data : { |
*(.data); |
*(.data.rel*); |
/branches/dynload/uspace/lib/libc/arch/ia32/_link.ld.in |
---|
7,9 → 7,9 |
} |
SECTIONS { |
. = 0x1000; |
. = 0x1000 + SIZEOF_HEADERS; |
.init ALIGN(0x1000) : SUBALIGN(0x1000) { |
.init : { |
*(.init); |
} :text |
.text : { |
16,8 → 16,10 |
*(.text); |
*(.rodata*); |
} :text |
.data ALIGN(0x1000) : SUBALIGN(0x1000) { |
. = . + 0x1000; |
.data : { |
*(.data); |
} :data |
.tdata : { |
/branches/dynload/uspace/srv/fs/tmpfs/tmpfs.h |
---|
60,6 → 60,7 |
extern fs_reg_t tmpfs_reg; |
extern void tmpfs_mount(ipc_callid_t, ipc_call_t *); |
extern void tmpfs_lookup(ipc_callid_t, ipc_call_t *); |
extern void tmpfs_read(ipc_callid_t, ipc_call_t *); |
extern void tmpfs_write(ipc_callid_t, ipc_call_t *); |
/branches/dynload/uspace/srv/fs/tmpfs/tmpfs.c |
---|
58,7 → 58,7 |
[IPC_METHOD_TO_VFS_OP(VFS_READ)] = VFS_OP_DEFINED, |
[IPC_METHOD_TO_VFS_OP(VFS_WRITE)] = VFS_OP_DEFINED, |
[IPC_METHOD_TO_VFS_OP(VFS_TRUNCATE)] = VFS_OP_DEFINED, |
[IPC_METHOD_TO_VFS_OP(VFS_MOUNT)] = VFS_OP_NULL, |
[IPC_METHOD_TO_VFS_OP(VFS_MOUNT)] = VFS_OP_DEFINED, |
[IPC_METHOD_TO_VFS_OP(VFS_UNMOUNT)] = VFS_OP_NULL, |
[IPC_METHOD_TO_VFS_OP(VFS_DESTROY)] = VFS_OP_DEFINED, |
} |
103,6 → 103,9 |
callid = async_get_call(&call); |
switch (IPC_GET_METHOD(call)) { |
case VFS_MOUNT: |
tmpfs_mount(callid, &call); |
break; |
case VFS_LOOKUP: |
tmpfs_lookup(callid, &call); |
break; |
/branches/dynload/uspace/srv/fs/tmpfs/tmpfs_ops.c |
---|
393,6 → 393,21 |
return EOK; |
} |
void tmpfs_mount(ipc_callid_t rid, ipc_call_t *request) |
{ |
dev_handle_t mr_dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
fs_index_t mr_index = (fs_index_t)IPC_GET_ARG2(*request); |
fs_handle_t mp_fs_handle = (fs_handle_t)IPC_GET_ARG3(*request); |
dev_handle_t mp_dev_handle = (dev_handle_t)IPC_GET_ARG4(*request); |
fs_index_t mp_index = (fs_index_t)IPC_GET_ARG5(*request); |
if ((mr_index == root->index) && |
(mp_fs_handle == tmpfs_reg.fs_handle) && |
(mp_index == mr_index)) |
ipc_answer_0(rid, EOK); |
else |
ipc_answer_0(rid, ENOTSUP); |
} |
void tmpfs_lookup(ipc_callid_t rid, ipc_call_t *request) |
{ |
/* Initialize TMPFS. */ |
/branches/dynload/uspace/srv/fs/fat/fat_idx.c |
---|
74,15 → 74,7 |
/** List of unused structures. */ |
static LIST_INITIALIZE(unused_head); |
/** Futex protecting the up_hash and ui_hash. |
* |
* The locking strategy assumes that there will be at most one fibril for each |
* dev_handle. Therefore it will be sufficient to hold the futex for shorter |
* times (i.e. only during hash table operations as opposed to holding it the |
* whole time between an unsuccessful find and the following insert). Should the |
* assumption break, the locking strategy for this futex will have to be |
* reconsidered. |
*/ |
/** Futex protecting the up_hash and ui_hash. */ |
static futex_t used_futex = FUTEX_INITIALIZER; |
/** |
352,16 → 344,17 |
futex_down(&used_futex); |
l = hash_table_find(&up_hash, pkey); |
futex_up(&used_futex); |
if (l) { |
fidx = hash_table_get_instance(l, fat_idx_t, uph_link); |
} else { |
fidx = (fat_idx_t *) malloc(sizeof(fat_idx_t)); |
if (!fidx) { |
futex_up(&used_futex); |
return NULL; |
} |
if (!fat_idx_alloc(dev_handle, &fidx->index)) { |
free(fidx); |
futex_up(&used_futex); |
return NULL; |
} |
372,16 → 365,17 |
link_initialize(&fidx->uph_link); |
link_initialize(&fidx->uih_link); |
futex_initialize(&fidx->lock, 1); |
fidx->dev_handle = dev_handle; |
fidx->pfc = pfc; |
fidx->pdi = pdi; |
fidx->nodep = NULL; |
futex_down(&used_futex); |
hash_table_insert(&up_hash, pkey, &fidx->uph_link); |
hash_table_insert(&ui_hash, ikey, &fidx->uih_link); |
futex_up(&used_futex); |
} |
futex_down(&fidx->lock); |
futex_up(&used_futex); |
return fidx; |
} |
398,10 → 392,11 |
futex_down(&used_futex); |
l = hash_table_find(&ui_hash, ikey); |
futex_up(&used_futex); |
if (l) { |
fidx = hash_table_get_instance(l, fat_idx_t, uih_link); |
futex_down(&fidx->lock); |
} |
futex_up(&used_futex); |
return fidx; |
} |
/branches/dynload/uspace/srv/fs/fat/fat.h |
---|
180,6 → 180,7 |
/** Used indices (index) hash table link. */ |
link_t uih_link; |
futex_t lock; |
dev_handle_t dev_handle; |
fs_index_t index; |
/** |
197,6 → 198,7 |
/** FAT in-core node. */ |
typedef struct fat_node { |
futex_t lock; |
fat_node_type_t type; |
fat_idx_t *idx; |
/** |
/branches/dynload/uspace/srv/fs/fat/fat_ops.c |
---|
50,9 → 50,12 |
#define BS_BLOCK 0 |
/** List of free FAT nodes that still contain valid data. */ |
LIST_INITIALIZE(ffn_head); |
/** Futex protecting the list of cached free FAT nodes. */ |
static futex_t ffn_futex = FUTEX_INITIALIZER; |
/** List of cached free FAT nodes. */ |
static LIST_INITIALIZE(ffn_head); |
#define FAT_NAME_LEN 8 |
#define FAT_EXT_LEN 3 |
179,6 → 182,7 |
static void fat_node_initialize(fat_node_t *node) |
{ |
futex_initialize(&node->lock, 1); |
node->idx = NULL; |
node->type = 0; |
link_initialize(&node->ffn_link); |
236,10 → 240,12 |
/* TODO */ |
} |
/** Instantiate a FAT in-core node. */ |
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
/** Internal version of fat_node_get(). |
* |
* @param idxp Locked index structure. |
*/ |
static void *fat_node_get_core(fat_idx_t *idxp) |
{ |
fat_idx_t *idx; |
block_t *b; |
fat_dentry_t *d; |
fat_node_t *nodep; |
246,18 → 252,16 |
unsigned bps; |
unsigned dps; |
idx = fat_idx_get_by_index(dev_handle, index); |
if (!idx) |
return NULL; |
if (idx->nodep) { |
if (idxp->nodep) { |
/* |
* We are lucky. |
* The node is already instantiated in memory. |
*/ |
if (!idx->nodep->refcnt++) |
futex_down(&idxp->nodep->lock); |
if (!idxp->nodep->refcnt++) |
list_remove(&nodep->ffn_link); |
return idx->nodep; |
futex_up(&idxp->nodep->lock); |
return idxp->nodep; |
} |
/* |
264,17 → 268,31 |
* We must instantiate the node from the file system. |
*/ |
assert(idx->pfc); |
assert(idxp->pfc); |
futex_down(&ffn_futex); |
if (!list_empty(&ffn_head)) { |
/* Try to use a cached unused node structure. */ |
/* Try to use a cached free node structure. */ |
fat_idx_t *idxp_tmp; |
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
goto skip_cache; |
idxp_tmp = nodep->idx; |
if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
futex_up(&nodep->lock); |
goto skip_cache; |
} |
list_remove(&nodep->ffn_link); |
futex_up(&ffn_futex); |
if (nodep->dirty) |
fat_node_sync(nodep); |
list_remove(&nodep->ffn_link); |
nodep->idx->nodep = NULL; |
idxp_tmp->nodep = NULL; |
futex_up(&nodep->lock); |
futex_up(&idxp_tmp->lock); |
} else { |
skip_cache: |
/* Try to allocate a new node structure. */ |
futex_up(&ffn_futex); |
nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
if (!nodep) |
return NULL; |
281,15 → 299,15 |
} |
fat_node_initialize(nodep); |
bps = fat_bps_get(dev_handle); |
bps = fat_bps_get(idxp->dev_handle); |
dps = bps / sizeof(fat_dentry_t); |
/* Read the block that contains the dentry of interest. */ |
b = _fat_block_get(dev_handle, idx->pfc, |
(idx->pdi * sizeof(fat_dentry_t)) / bps); |
b = _fat_block_get(idxp->dev_handle, idxp->pfc, |
(idxp->pdi * sizeof(fat_dentry_t)) / bps); |
assert(b); |
d = ((fat_dentry_t *)b->data) + (idx->pdi % dps); |
d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
if (d->attr & FAT_ATTR_SUBDIR) { |
/* |
* The only directory which does not have this bit set is the |
308,19 → 326,38 |
block_put(b); |
/* Link the idx structure with the node structure. */ |
nodep->idx = idx; |
idx->nodep = nodep; |
nodep->idx = idxp; |
idxp->nodep = nodep; |
return nodep; |
} |
/** Instantiate a FAT in-core node. */ |
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
{ |
void *node; |
fat_idx_t *idxp; |
idxp = fat_idx_get_by_index(dev_handle, index); |
if (!idxp) |
return NULL; |
/* idxp->lock held */ |
node = fat_node_get_core(idxp); |
futex_up(&idxp->lock); |
return node; |
} |
static void fat_node_put(void *node) |
{ |
fat_node_t *nodep = (fat_node_t *)node; |
futex_down(&nodep->lock); |
if (!--nodep->refcnt) { |
futex_down(&ffn_futex); |
list_append(&nodep->ffn_link, &ffn_head); |
futex_up(&ffn_futex); |
} |
futex_up(&nodep->lock); |
} |
static void *fat_create(int flags) |
354,6 → 391,7 |
fat_dentry_t *d; |
block_t *b; |
futex_down(&parentp->idx->lock); |
bps = fat_bps_get(parentp->idx->dev_handle); |
dps = bps / sizeof(fat_dentry_t); |
blocks = parentp->size / bps + (parentp->size % bps != 0); |
371,6 → 409,7 |
continue; |
case FAT_DENTRY_LAST: |
block_put(b); |
futex_up(&parentp->idx->lock); |
return NULL; |
default: |
case FAT_DENTRY_VALID: |
379,9 → 418,17 |
} |
if (strcmp(name, component) == 0) { |
/* hit */ |
void *node; |
/* |
* Assume tree hierarchy for locking. We |
* already have the parent and now we are going |
* to lock the child. Never lock in the oposite |
* order. |
*/ |
fat_idx_t *idx = fat_idx_get_by_pos( |
parentp->idx->dev_handle, parentp->firstc, |
i * dps + j); |
futex_up(&parentp->idx->lock); |
if (!idx) { |
/* |
* Can happen if memory is low or if we |
390,8 → 437,8 |
block_put(b); |
return NULL; |
} |
void *node = fat_node_get(idx->dev_handle, |
idx->index); |
node = fat_node_get_core(idx); |
futex_up(&idx->lock); |
block_put(b); |
return node; |
} |
398,7 → 445,7 |
} |
block_put(b); |
} |
futex_up(&parentp->idx->lock); |
return NULL; |
} |
432,6 → 479,7 |
if (nodep->type != FAT_DIRECTORY) |
return false; |
futex_down(&nodep->idx->lock); |
bps = fat_bps_get(nodep->idx->dev_handle); |
dps = bps / sizeof(fat_dentry_t); |
452,18 → 500,22 |
continue; |
case FAT_DENTRY_LAST: |
block_put(b); |
futex_up(&nodep->idx->lock); |
return false; |
default: |
case FAT_DENTRY_VALID: |
block_put(b); |
futex_up(&nodep->idx->lock); |
return true; |
} |
block_put(b); |
futex_up(&nodep->idx->lock); |
return true; |
} |
block_put(b); |
} |
futex_up(&nodep->idx->lock); |
return false; |
} |
/branches/dynload/uspace/srv/vfs/vfs_ops.c |
---|
84,6 → 84,8 |
{ |
dev_handle_t dev_handle; |
vfs_node_t *mp_node = NULL; |
int rc; |
int phone; |
/* |
* We expect the library to do the device-name to device-handle |
167,7 → 169,6 |
* node cannot be removed. However, we do take a reference to it so |
* that we can track how many times it has been mounted. |
*/ |
int rc; |
vfs_lookup_res_t mr_res; |
rc = lookup_root(fs_handle, dev_handle, &mr_res); |
if (rc != EOK) { |
225,11 → 226,29 |
} else { |
/* We still don't have the root file system mounted. */ |
if ((size == 1) && (buf[0] == '/')) { |
/* For this simple, but important case, we are done. */ |
rootfs = mr_res.triplet; |
/* |
* For this simple, but important case, |
* we are almost done. |
*/ |
free(buf); |
/* Inform the mount point about the root mount. */ |
phone = vfs_grab_phone(mr_res.triplet.fs_handle); |
rc = async_req_5_0(phone, VFS_MOUNT, |
(ipcarg_t) mr_res.triplet.dev_handle, |
(ipcarg_t) mr_res.triplet.index, |
(ipcarg_t) mr_res.triplet.fs_handle, |
(ipcarg_t) mr_res.triplet.dev_handle, |
(ipcarg_t) mr_res.triplet.index); |
vfs_release_phone(phone); |
if (rc == EOK) |
rootfs = mr_res.triplet; |
else |
vfs_node_put(mr_node); |
futex_up(&rootfs_futex); |
free(buf); |
ipc_answer_0(rid, EOK); |
ipc_answer_0(rid, rc); |
return; |
} else { |
/* |
253,24 → 272,20 |
* of the file system being mounted. |
*/ |
int phone = vfs_grab_phone(mp_res.triplet.fs_handle); |
/* Later we can use ARG3 to pass mode/flags. */ |
aid_t req1 = async_send_3(phone, VFS_MOUNT, |
/** |
* @todo |
* Add more IPC parameters so that we can send mount mode/flags. |
*/ |
phone = vfs_grab_phone(mp_res.triplet.fs_handle); |
rc = async_req_5_0(phone, VFS_MOUNT, |
(ipcarg_t) mp_res.triplet.dev_handle, |
(ipcarg_t) mp_res.triplet.index, 0, NULL); |
/* The second call uses the same method. */ |
aid_t req2 = async_send_3(phone, VFS_MOUNT, |
(ipcarg_t) mp_res.triplet.index, |
(ipcarg_t) mr_res.triplet.fs_handle, |
(ipcarg_t) mr_res.triplet.dev_handle, |
(ipcarg_t) mr_res.triplet.index, NULL); |
(ipcarg_t) mr_res.triplet.index); |
vfs_release_phone(phone); |
ipcarg_t rc1; |
ipcarg_t rc2; |
async_wait_for(req1, &rc1); |
async_wait_for(req2, &rc2); |
if ((rc1 != EOK) || (rc2 != EOK)) { |
if (rc != EOK) { |
/* Mount failed, drop references to mr_node and mp_node. */ |
vfs_node_put(mr_node); |
if (mp_node) |
277,12 → 292,7 |
vfs_node_put(mp_node); |
} |
if (rc2 == EOK) |
ipc_answer_0(rid, rc1); |
else if (rc1 == EOK) |
ipc_answer_0(rid, rc2); |
else |
ipc_answer_0(rid, rc1); |
ipc_answer_0(rid, rc); |
} |
void vfs_open(ipc_callid_t rid, ipc_call_t *request) |