Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3596 → Rev 3597

/branches/tracing/uspace/srv/fs/fat/fat_ops.c
89,7 → 89,7
/* Read the block that contains the dentry of interest. */
b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
(node->idx->pdi * sizeof(fat_dentry_t)) / bps);
(node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
 
d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
 
102,6 → 102,42
block_put(b);
}
 
static fat_node_t *fat_node_get_new(void)
{
fat_node_t *nodep;
 
futex_down(&ffn_futex);
if (!list_empty(&ffn_head)) {
/* Try to use a cached free node structure. */
fat_idx_t *idxp_tmp;
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
goto skip_cache;
idxp_tmp = nodep->idx;
if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
futex_up(&nodep->lock);
goto skip_cache;
}
list_remove(&nodep->ffn_link);
futex_up(&ffn_futex);
if (nodep->dirty)
fat_node_sync(nodep);
idxp_tmp->nodep = NULL;
futex_up(&nodep->lock);
futex_up(&idxp_tmp->lock);
} else {
skip_cache:
/* Try to allocate a new node structure. */
futex_up(&ffn_futex);
nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
if (!nodep)
return NULL;
}
fat_node_initialize(nodep);
return nodep;
}
 
/** Internal version of fat_node_get().
*
* @param idxp Locked index structure.
113,6 → 149,7
fat_dentry_t *d;
fat_node_t *nodep = NULL;
unsigned bps;
unsigned spc;
unsigned dps;
 
if (idxp->nodep) {
133,42 → 170,18
assert(idxp->pfc);
 
futex_down(&ffn_futex);
if (!list_empty(&ffn_head)) {
/* Try to use a cached free node structure. */
fat_idx_t *idxp_tmp;
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
goto skip_cache;
idxp_tmp = nodep->idx;
if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
futex_up(&nodep->lock);
goto skip_cache;
}
list_remove(&nodep->ffn_link);
futex_up(&ffn_futex);
if (nodep->dirty)
fat_node_sync(nodep);
idxp_tmp->nodep = NULL;
futex_up(&nodep->lock);
futex_up(&idxp_tmp->lock);
} else {
skip_cache:
/* Try to allocate a new node structure. */
futex_up(&ffn_futex);
nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
if (!nodep)
return NULL;
}
fat_node_initialize(nodep);
nodep = fat_node_get_new();
if (!nodep)
return NULL;
 
bs = block_bb_get(idxp->dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
dps = bps / sizeof(fat_dentry_t);
 
/* Read the block that contains the dentry of interest. */
b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
(idxp->pdi * sizeof(fat_dentry_t)) / bps);
(idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
assert(b);
 
d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
184,8 → 197,8
* defined for the directory entry type. We must determine the
* size of the directory by walking the FAT.
*/
nodep->size = bps * _fat_blcks_get(bs, idxp->dev_handle,
uint16_t_le2host(d->firstc), NULL);
nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
uint16_t_le2host(d->firstc));
} else {
nodep->type = FAT_FILE;
nodep->size = uint32_t_le2host(d->size);
231,7 → 244,7
futex_up(&nodep->lock);
}
 
static void *fat_create(int flags)
static void *fat_create(dev_handle_t dev_handle, int flags)
{
return NULL; /* not supported at the moment */
}
269,7 → 282,7
dps = bps / sizeof(fat_dentry_t);
blocks = parentp->size / bps;
for (i = 0; i < blocks; i++) {
b = fat_block_get(bs, parentp, i);
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
for (j = 0; j < dps; j++) {
d = ((fat_dentry_t *)b->data) + j;
switch (fat_classify_dentry(d)) {
359,7 → 372,7
for (i = 0; i < blocks; i++) {
fat_dentry_t *d;
b = fat_block_get(bs, nodep, i);
b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
for (j = 0; j < dps; j++) {
d = ((fat_dentry_t *)b->data) + j;
switch (fat_classify_dentry(d)) {
434,12 → 447,20
int rc;
 
/* initialize libblock */
rc = block_init(dev_handle, BS_SIZE, BS_BLOCK * BS_SIZE, BS_SIZE);
rc = block_init(dev_handle, BS_SIZE);
if (rc != EOK) {
ipc_answer_0(rid, 0);
ipc_answer_0(rid, rc);
return;
}
 
/* prepare the boot block */
rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
if (rc != EOK) {
block_fini(dev_handle);
ipc_answer_0(rid, rc);
return;
}
 
/* get the buffer with the boot sector */
bs = block_bb_get(dev_handle);
453,6 → 474,14
return;
}
 
/* Initialize the block cache */
rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
if (rc != EOK) {
block_fini(dev_handle);
ipc_answer_0(rid, rc);
return;
}
 
rc = fat_idx_init_by_dev_handle(dev_handle);
if (rc != EOK) {
block_fini(dev_handle);
545,7 → 574,8
} else {
bytes = min(len, bps - pos % bps);
bytes = min(bytes, nodep->size - pos);
b = fat_block_get(bs, nodep, pos / bps);
b = fat_block_get(bs, nodep, pos / bps,
BLOCK_FLAGS_NONE);
(void) ipc_data_read_finalize(callid, b->data + pos % bps,
bytes);
block_put(b);
570,7 → 600,7
while (bnum < nodep->size / bps) {
off_t o;
 
b = fat_block_get(bs, nodep, bnum);
b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
for (o = pos % (bps / sizeof(fat_dentry_t));
o < bps / sizeof(fat_dentry_t);
o++, pos++) {
616,7 → 646,9
block_t *b;
uint16_t bps;
unsigned spc;
unsigned bpc; /* bytes per cluster */
off_t boundary;
int flags = BLOCK_FLAGS_NONE;
if (!nodep) {
ipc_answer_0(rid, ENOENT);
623,13 → 655,6
return;
}
/* XXX remove me when you are ready */
{
ipc_answer_0(rid, ENOTSUP);
fat_node_put(nodep);
return;
}
 
ipc_callid_t callid;
size_t len;
if (!ipc_data_write_receive(&callid, &len)) {
639,6 → 664,11
return;
}
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
bpc = bps * spc;
 
/*
* In all scenarios, we will attempt to write out only one block worth
* of data at maximum. There might be some more efficient approaches,
647,12 → 677,10
* value signalizing a smaller number of bytes written.
*/
bytes = min(len, bps - pos % bps);
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
if (bytes == bps)
flags |= BLOCK_FLAGS_NOREAD;
boundary = ROUND_UP(nodep->size, bps * spc);
boundary = ROUND_UP(nodep->size, bpc);
if (pos < boundary) {
/*
* This is the easier case - we are either overwriting already
661,7 → 689,7
* next block size boundary.
*/
fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
b = fat_block_get(bs, nodep, pos / bps);
b = fat_block_get(bs, nodep, pos / bps, flags);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
670,8 → 698,8
nodep->size = pos + bytes;
nodep->dirty = true; /* need to sync node */
}
ipc_answer_2(rid, EOK, bytes, nodep->size);
fat_node_put(nodep);
ipc_answer_1(rid, EOK, bytes);
return;
} else {
/*
680,13 → 708,11
*/
int status;
unsigned nclsts;
fat_cluster_t mcl, lcl;
nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) /
bps * spc;
fat_cluster_t mcl, lcl;
nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
/* create an independent chain of nclsts clusters in all FATs */
status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl,
&lcl);
status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
if (status != EOK) {
/* could not allocate a chain of nclsts clusters */
fat_node_put(nodep);
696,8 → 722,8
}
/* zero fill any gaps */
fat_fill_gap(bs, nodep, mcl, pos);
b = _fat_block_get(bs, dev_handle, lcl,
(pos / bps) % spc);
b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
flags);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
709,12 → 735,70
fat_append_clusters(bs, nodep, mcl);
nodep->size = pos + bytes;
nodep->dirty = true; /* need to sync node */
ipc_answer_2(rid, EOK, bytes, nodep->size);
fat_node_put(nodep);
ipc_answer_1(rid, EOK, bytes);
return;
}
}
 
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
size_t size = (off_t)IPC_GET_ARG3(*request);
fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
fat_bs_t *bs;
uint16_t bps;
uint8_t spc;
unsigned bpc; /* bytes per cluster */
int rc;
 
if (!nodep) {
ipc_answer_0(rid, ENOENT);
return;
}
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
bpc = bps * spc;
 
if (nodep->size == size) {
rc = EOK;
} else if (nodep->size < size) {
/*
* The standard says we have the freedom to grow the node.
* For now, we simply return an error.
*/
rc = EINVAL;
} else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
/*
* The node will be shrunk, but no clusters will be deallocated.
*/
nodep->size = size;
nodep->dirty = true; /* need to sync node */
rc = EOK;
} else {
/*
* The node will be shrunk, clusters will be deallocated.
*/
if (size == 0) {
fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
} else {
fat_cluster_t lastc;
(void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
&lastc, (size - 1) / bpc);
fat_chop_clusters(bs, nodep, lastc);
}
nodep->size = size;
nodep->dirty = true; /* need to sync node */
rc = EOK;
}
fat_node_put(nodep);
ipc_answer_0(rid, rc);
return;
}
 
/**
* @}
*/