Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3515 → Rev 3516

/trunk/uspace/srv/fs/fat/fat.h
50,7 → 50,7
#define BS_BLOCK 0
#define BS_SIZE 512
 
typedef struct {
typedef struct fat_bs {
uint8_t ji[3]; /**< Jump instruction. */
uint8_t oem_name[8];
/* BIOS Parameter Block */
/trunk/uspace/srv/fs/fat/fat_fat.c
46,9 → 46,9
#include <assert.h>
 
block_t *
_fat_block_get(dev_handle_t dev_handle, fat_cluster_t firstc, off_t offset)
_fat_block_get(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc,
off_t offset)
{
block_t *bb;
block_t *b;
unsigned bps;
unsigned spc;
62,14 → 62,12
fat_cluster_t clst = firstc;
unsigned i;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
fatcnt = FAT_BS(bb)->fatcnt;
rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
block_put(bb);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
rscnt = uint16_t_le2host(bs->rscnt);
fatcnt = bs->fatcnt;
rde = uint16_t_le2host(bs->root_ent_max);
sf = uint16_t_le2host(bs->sec_per_fat);
 
rds = (sizeof(fat_dentry_t) * rde) / bps;
rds += ((sizeof(fat_dentry_t) * rde) % bps != 0);
106,6 → 104,7
 
/** Return number of blocks allocated to a file.
*
* @param bs Buffer holding the boot sector for the file.
* @param dev_handle Device handle of the device with the file.
* @param firstc First cluster of the file.
* @param lastc If non-NULL, output argument holding the
114,10 → 113,9
* @return Number of blocks allocated to the file.
*/
uint16_t
_fat_blcks_get(dev_handle_t dev_handle, fat_cluster_t firstc,
_fat_blcks_get(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc,
fat_cluster_t *lastc)
{
block_t *bb;
block_t *b;
unsigned bps;
unsigned spc;
125,11 → 123,9
unsigned clusters = 0;
fat_cluster_t clst = firstc;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
block_put(bb);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
rscnt = uint16_t_le2host(bs->rscnt);
 
if (firstc == FAT_CLST_RES0) {
/* No space allocated to the file. */
160,21 → 156,9
return clusters * spc;
}
 
uint16_t fat_bps_get(dev_handle_t dev_handle)
{
block_t *bb;
uint16_t bps;
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
assert(bb != NULL);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
block_put(bb);
 
return bps;
}
 
/** Fill the gap between EOF and a new file position.
*
* @param bs Buffer holding the boot sector for nodep.
* @param nodep FAT node with the gap.
* @param mcl First cluster in an independent cluster chain that will
* be later appended to the end of the node's own cluster
182,17 → 166,15
* this argument is ignored.
* @param pos Position in the last node block.
*/
void fat_fill_gap(fat_node_t *nodep, fat_cluster_t mcl, off_t pos)
void fat_fill_gap(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, off_t pos)
{
uint16_t bps;
unsigned spc;
block_t *bb, *b;
block_t *b;
off_t o, boundary;
 
bb = block_get(nodep->idx->dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
block_put(bb);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
boundary = ROUND_UP(nodep->size, bps * spc);
 
199,7 → 181,7
/* zero out already allocated space */
for (o = nodep->size - 1; o < pos && o < boundary;
o = ALIGN_DOWN(o + bps, bps)) {
b = fat_block_get(nodep, o / bps);
b = fat_block_get(bs, nodep, o / bps);
memset(b->data + o % bps, 0, bps - o % bps);
b->dirty = true; /* need to sync node */
block_put(b);
210,7 → 192,7
/* zero out the initial part of the new cluster chain */
for (o = boundary; o < pos; o += bps) {
b = _fat_block_get(nodep->idx->dev_handle, mcl,
b = _fat_block_get(bs, nodep->idx->dev_handle, mcl,
(o - boundary) / bps);
memset(b->data, 0, min(bps, pos - o));
b->dirty = true; /* need to sync node */
219,47 → 201,37
}
 
void
fat_mark_cluster(dev_handle_t dev_handle, unsigned fatno, fat_cluster_t clst,
fat_cluster_t value)
fat_mark_cluster(fat_bs_t *bs, dev_handle_t dev_handle, unsigned fatno,
fat_cluster_t clst, fat_cluster_t value)
{
block_t *bb, *blk;
block_t *b;
uint16_t bps;
uint16_t rscnt;
uint16_t sf;
uint8_t fatcnt;
fat_cluster_t *cp;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
fatcnt = FAT_BS(bb)->fatcnt;
block_put(bb);
bps = uint16_t_le2host(bs->bps);
rscnt = uint16_t_le2host(bs->rscnt);
sf = uint16_t_le2host(bs->sec_per_fat);
 
assert(fatno < fatcnt);
blk = block_get(dev_handle, rscnt + sf * fatno +
assert(fatno < bs->fatcnt);
b = block_get(dev_handle, rscnt + sf * fatno +
(clst * sizeof(fat_cluster_t)) / bps, bps);
cp = (fat_cluster_t *)blk->data + clst % (bps / sizeof(fat_cluster_t));
cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t));
*cp = host2uint16_t_le(value);
blk->dirty = true; /* need to sync block */
block_put(blk);
b->dirty = true; /* need to sync block */
block_put(b);
}
 
void fat_alloc_shadow_clusters(dev_handle_t dev_handle, fat_cluster_t *lifo,
unsigned nclsts)
void fat_alloc_shadow_clusters(fat_bs_t *bs, dev_handle_t dev_handle,
fat_cluster_t *lifo, unsigned nclsts)
{
uint8_t fatcnt;
uint8_t fatno;
unsigned c;
block_t *bb;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
fatcnt = FAT_BS(bb)->fatcnt;
block_put(bb);
for (fatno = FAT1 + 1; fatno < fatcnt; fatno++) {
for (fatno = FAT1 + 1; fatno < bs->fatcnt; fatno++) {
for (c = 0; c < nclsts; c++) {
fat_mark_cluster(dev_handle, fatno, lifo[c],
fat_mark_cluster(bs, dev_handle, fatno, lifo[c],
c == 0 ? FAT_CLST_LAST1 : lifo[c - 1]);
}
}
266,13 → 238,13
}
 
int
fat_alloc_clusters(dev_handle_t dev_handle, unsigned nclsts, fat_cluster_t *mcl,
fat_cluster_t *lcl)
fat_alloc_clusters(fat_bs_t *bs, dev_handle_t dev_handle, unsigned nclsts,
fat_cluster_t *mcl, fat_cluster_t *lcl)
{
uint16_t bps;
uint16_t rscnt;
uint16_t sf;
block_t *bb, *blk;
block_t *blk;
fat_cluster_t *lifo; /* stack for storing free cluster numbers */
unsigned found = 0; /* top of the free cluster number stack */
unsigned b, c, cl;
281,11 → 253,9
if (lifo)
return ENOMEM;
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
block_put(bb);
bps = uint16_t_le2host(bs->bps);
rscnt = uint16_t_le2host(bs->rscnt);
sf = uint16_t_le2host(bs->sec_per_fat);
/*
* Search FAT1 for unused clusters.
308,8 → 278,8
/* we are almost done */
block_put(blk);
/* update the shadow copies of FAT */
fat_alloc_shadow_clusters(dev_handle,
lifo, nclsts);
fat_alloc_shadow_clusters(bs,
dev_handle, lifo, nclsts);
*mcl = lifo[found - 1];
*lcl = lifo[0];
free(lifo);
324,31 → 294,29
* We could not find enough clusters. Now we need to free the clusters
* we have allocated so far.
*/
while (found--)
fat_mark_cluster(dev_handle, FAT1, lifo[found], FAT_CLST_RES0);
while (found--) {
fat_mark_cluster(bs, dev_handle, FAT1, lifo[found],
FAT_CLST_RES0);
}
free(lifo);
return ENOSPC;
}
 
void fat_append_clusters(fat_node_t *nodep, fat_cluster_t mcl)
void fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl)
{
block_t *bb;
dev_handle_t dev_handle = nodep->idx->dev_handle;
fat_cluster_t lcl;
uint8_t fatcnt, fatno;
uint8_t fatno;
 
if (_fat_blcks_get(nodep->idx->dev_handle, nodep->firstc, &lcl) == 0) {
if (_fat_blcks_get(bs, dev_handle, nodep->firstc, &lcl) == 0) {
nodep->firstc = host2uint16_t_le(mcl);
nodep->dirty = true; /* need to sync node */
return;
}
 
bb = block_get(nodep->idx->dev_handle, BS_BLOCK, BS_SIZE);
fatcnt = FAT_BS(bb)->fatcnt;
block_put(bb);
 
for (fatno = FAT1; fatno < fatcnt; fatno++)
fat_mark_cluster(nodep->idx->dev_handle, fatno, lcl, mcl);
for (fatno = FAT1; fatno < bs->fatcnt; fatno++)
fat_mark_cluster(bs, nodep->idx->dev_handle, fatno, lcl, mcl);
}
 
/**
/trunk/uspace/srv/fs/fat/fat_fat.h
50,27 → 50,31
/* internally used to mark root directory */
#define FAT_CLST_ROOT FAT_CLST_RES1
 
 
/* forward declarations */
struct block;
struct fat_node;
struct fat_bs;
 
typedef uint16_t fat_cluster_t;
 
#define fat_block_get(np, off) \
_fat_block_get((np)->idx->dev_handle, (np)->firstc, (off))
#define fat_block_get(bs, np, off) \
_fat_block_get((bs), (np)->idx->dev_handle, (np)->firstc, (off))
extern struct block *_fat_block_get(dev_handle_t, fat_cluster_t, off_t);
extern uint16_t _fat_blcks_get(dev_handle_t, fat_cluster_t, fat_cluster_t *);
extern uint16_t fat_bps_get(dev_handle_t);
extern struct block *_fat_block_get(struct fat_bs *, dev_handle_t,
fat_cluster_t, off_t);
extern uint16_t _fat_blcks_get(struct fat_bs *, dev_handle_t, fat_cluster_t,
fat_cluster_t *);
extern void fat_append_clusters(struct fat_node *, fat_cluster_t);
extern int fat_alloc_clusters(dev_handle_t, unsigned, fat_cluster_t *,
fat_cluster_t *);
extern void fat_alloc_shadow_clusters(dev_handle_t, fat_cluster_t *, unsigned);
extern void fat_mark_cluster(dev_handle_t, unsigned, fat_cluster_t,
extern void fat_append_clusters(struct fat_bs *, struct fat_node *,
fat_cluster_t);
extern void fat_fill_gap(struct fat_node *, fat_cluster_t, off_t);
extern int fat_alloc_clusters(struct fat_bs *, dev_handle_t, unsigned,
fat_cluster_t *, fat_cluster_t *);
extern void fat_alloc_shadow_clusters(struct fat_bs *, dev_handle_t,
fat_cluster_t *, unsigned);
extern void fat_mark_cluster(struct fat_bs *, dev_handle_t, unsigned,
fat_cluster_t, fat_cluster_t);
extern void fat_fill_gap(struct fat_bs *, struct fat_node *, fat_cluster_t,
off_t);
 
#endif
 
/trunk/uspace/srv/fs/fat/fat_ops.c
125,7 → 125,7
*/
static void *fat_node_get_core(fat_idx_t *idxp)
{
block_t *b;
block_t *bb, *b;
fat_dentry_t *d;
fat_node_t *nodep = NULL;
unsigned bps;
178,11 → 178,12
}
fat_node_initialize(nodep);
 
bps = fat_bps_get(idxp->dev_handle);
bb = block_get(idxp->dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
dps = bps / sizeof(fat_dentry_t);
 
/* Read the block that contains the dentry of interest. */
b = _fat_block_get(idxp->dev_handle, idxp->pfc,
b = _fat_block_get(bb->data, idxp->dev_handle, idxp->pfc,
(idxp->pdi * sizeof(fat_dentry_t)) / bps);
assert(b);
 
199,7 → 200,7
* defined for the directory entry type. We must determine the
* size of the directory by walking the FAT.
*/
nodep->size = bps * _fat_blcks_get(idxp->dev_handle,
nodep->size = bps * _fat_blcks_get(bb->data, idxp->dev_handle,
uint16_t_le2host(d->firstc), NULL);
} else {
nodep->type = FAT_FILE;
210,6 → 211,7
nodep->refcnt = 1;
 
block_put(b);
block_put(bb);
 
/* Link the idx structure with the node structure. */
nodep->idx = idxp;
275,16 → 277,17
unsigned dps; /* dentries per sector */
unsigned blocks;
fat_dentry_t *d;
block_t *b;
block_t *bb, *b;
 
futex_down(&parentp->idx->lock);
bps = fat_bps_get(parentp->idx->dev_handle);
bb = block_get(parentp->idx->dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
dps = bps / sizeof(fat_dentry_t);
blocks = parentp->size / bps + (parentp->size % bps != 0);
for (i = 0; i < blocks; i++) {
unsigned dentries;
b = fat_block_get(parentp, i);
b = fat_block_get(bb->data, parentp, i);
dentries = (i == blocks - 1) ?
parentp->size % sizeof(fat_dentry_t) :
dps;
295,6 → 298,7
continue;
case FAT_DENTRY_LAST:
block_put(b);
block_put(bb);
futex_up(&parentp->idx->lock);
return NULL;
default:
321,16 → 325,20
* run out of 32-bit indices.
*/
block_put(b);
block_put(bb);
return NULL;
}
node = fat_node_get_core(idx);
futex_up(&idx->lock);
block_put(b);
block_put(bb);
return node;
}
}
block_put(b);
}
block_put(bb);
 
futex_up(&parentp->idx->lock);
return NULL;
}
359,7 → 367,7
unsigned bps;
unsigned dps;
unsigned blocks;
block_t *b;
block_t *bb, *b;
unsigned i, j;
 
if (nodep->type != FAT_DIRECTORY)
366,7 → 374,8
return false;
 
futex_down(&nodep->idx->lock);
bps = fat_bps_get(nodep->idx->dev_handle);
bb = block_get(nodep->idx->dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
dps = bps / sizeof(fat_dentry_t);
 
blocks = nodep->size / bps + (nodep->size % bps != 0);
375,7 → 384,7
unsigned dentries;
fat_dentry_t *d;
b = fat_block_get(nodep, i);
b = fat_block_get(bb->data, nodep, i);
dentries = (i == blocks - 1) ?
nodep->size % sizeof(fat_dentry_t) :
dps;
386,20 → 395,24
continue;
case FAT_DENTRY_LAST:
block_put(b);
block_put(bb);
futex_up(&nodep->idx->lock);
return false;
default:
case FAT_DENTRY_VALID:
block_put(b);
block_put(bb);
futex_up(&nodep->idx->lock);
return true;
}
block_put(b);
block_put(bb);
futex_up(&nodep->idx->lock);
return true;
}
block_put(b);
}
block_put(bb);
 
futex_up(&nodep->idx->lock);
return false;
552,9 → 565,9
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
off_t pos = (off_t)IPC_GET_ARG3(*request);
fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
uint16_t bps = fat_bps_get(dev_handle);
uint16_t bps;
size_t bytes;
block_t *b;
block_t *bb, *b;
 
if (!nodep) {
ipc_answer_0(rid, ENOENT);
570,6 → 583,9
return;
}
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
 
if (nodep->type == FAT_FILE) {
/*
* Our strategy for regular file reads is to read one block at
577,7 → 593,7
* requested. This keeps the code very simple.
*/
bytes = min(len, bps - pos % bps);
b = fat_block_get(nodep, pos / bps);
b = fat_block_get(bb->data, nodep, pos / bps);
(void) ipc_data_read_finalize(callid, b->data + pos % bps,
bytes);
block_put(b);
601,7 → 617,7
while (bnum < nodep->size / bps) {
off_t o;
 
b = fat_block_get(nodep, bnum);
b = fat_block_get(bb->data, nodep, bnum);
for (o = pos % (bps / sizeof(fat_dentry_t));
o < bps / sizeof(fat_dentry_t);
o++, pos++) {
624,6 → 640,7
}
miss:
fat_node_put(nodep);
block_put(bb);
ipc_answer_0(callid, ENOENT);
ipc_answer_1(rid, ENOENT, 0);
return;
633,6 → 650,7
}
 
fat_node_put(nodep);
block_put(bb);
ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
}
 
681,7 → 699,6
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
block_put(bb);
boundary = ROUND_UP(nodep->size, bps * spc);
if (pos < boundary) {
691,8 → 708,8
* the limits of the last cluster. The node size may grow to the
* next block size boundary.
*/
fat_fill_gap(nodep, FAT_CLST_RES0, pos);
b = fat_block_get(nodep, pos / bps);
fat_fill_gap(bb->data, nodep, FAT_CLST_RES0, pos);
b = fat_block_get(bb->data, nodep, pos / bps);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
702,6 → 719,7
nodep->dirty = true; /* need to sync node */
}
fat_node_put(nodep);
block_put(bb);
ipc_answer_1(rid, EOK, bytes);
return;
} else {
716,17 → 734,20
nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) /
bps * spc;
/* create an independent chain of nclsts clusters in all FATs */
status = fat_alloc_clusters(dev_handle, nclsts, &mcl, &lcl);
status = fat_alloc_clusters(bb->data, dev_handle, nclsts, &mcl,
&lcl);
if (status != EOK) {
/* could not allocate a chain of nclsts clusters */
fat_node_put(nodep);
block_put(bb);
ipc_answer_0(callid, status);
ipc_answer_0(rid, status);
return;
}
/* zero fill any gaps */
fat_fill_gap(nodep, mcl, pos);
b = _fat_block_get(dev_handle, lcl, (pos / bps) % spc);
fat_fill_gap(bb->data, nodep, mcl, pos);
b = _fat_block_get(bb->data, dev_handle, lcl,
(pos / bps) % spc);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
735,10 → 756,11
* Append the cluster chain starting in mcl to the end of the
* node's cluster chain.
*/
fat_append_clusters(nodep, mcl);
fat_append_clusters(bb->data, nodep, mcl);
nodep->size = pos + bytes;
nodep->dirty = true; /* need to sync node */
fat_node_put(nodep);
block_put(bb);
ipc_answer_1(rid, EOK, bytes);
return;
}