Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 4152 → Rev 4153

/branches/network/uspace/srv/fs/fat/fat_ops.c
36,8 → 36,11
*/
 
#include "fat.h"
#include "fat_dentry.h"
#include "fat_fat.h"
#include "../../vfs/vfs.h"
#include <libfs.h>
#include <libblock.h>
#include <ipc/ipc.h>
#include <ipc/services.h>
#include <ipc/devmap.h>
50,10 → 53,8
#include <assert.h>
#include <futex.h>
#include <sys/mman.h>
#include <align.h>
 
#define BS_BLOCK 0
#define BS_SIZE 512
 
/** Futex protecting the list of cached free FAT nodes. */
static futex_t ffn_futex = FUTEX_INITIALIZER;
 
60,216 → 61,6
/** List of cached free FAT nodes. */
static LIST_INITIALIZE(ffn_head);
 
#define FAT_NAME_LEN 8
#define FAT_EXT_LEN 3
 
#define FAT_PAD ' '
 
#define FAT_DENTRY_UNUSED 0x00
#define FAT_DENTRY_E5_ESC 0x05
#define FAT_DENTRY_DOT 0x2e
#define FAT_DENTRY_ERASED 0xe5
 
#define min(a, b) ((a) < (b) ? (a) : (b))
 
static void dentry_name_canonify(fat_dentry_t *d, char *buf)
{
int i;
 
for (i = 0; i < FAT_NAME_LEN; i++) {
if (d->name[i] == FAT_PAD)
break;
if (d->name[i] == FAT_DENTRY_E5_ESC)
*buf++ = 0xe5;
else
*buf++ = d->name[i];
}
if (d->ext[0] != FAT_PAD)
*buf++ = '.';
for (i = 0; i < FAT_EXT_LEN; i++) {
if (d->ext[i] == FAT_PAD) {
*buf = '\0';
return;
}
if (d->ext[i] == FAT_DENTRY_E5_ESC)
*buf++ = 0xe5;
else
*buf++ = d->ext[i];
}
*buf = '\0';
}
 
static int dev_phone = -1; /* FIXME */
static void *dev_buffer = NULL; /* FIXME */
 
/* TODO move somewhere else */
typedef struct {
void *data;
size_t size;
} block_t;
 
static block_t *block_get(dev_handle_t dev_handle, off_t offset, size_t bs)
{
/* FIXME */
block_t *b;
off_t bufpos = 0;
size_t buflen = 0;
off_t pos = offset * bs;
 
assert(dev_phone != -1);
assert(dev_buffer);
 
b = malloc(sizeof(block_t));
if (!b)
return NULL;
b->data = malloc(bs);
if (!b->data) {
free(b);
return NULL;
}
b->size = bs;
 
if (!libfs_blockread(dev_phone, dev_buffer, &bufpos, &buflen, &pos,
b->data, bs, bs)) {
free(b->data);
free(b);
return NULL;
}
 
return b;
}
 
static void block_put(block_t *block)
{
/* FIXME */
free(block->data);
free(block);
}
 
#define FAT_BS(b) ((fat_bs_t *)((b)->data))
 
#define FAT_CLST_RES0 0x0000
#define FAT_CLST_RES1 0x0001
#define FAT_CLST_FIRST 0x0002
#define FAT_CLST_BAD 0xfff7
#define FAT_CLST_LAST1 0xfff8
#define FAT_CLST_LAST8 0xffff
 
/* internally used to mark root directory's parent */
#define FAT_CLST_ROOTPAR FAT_CLST_RES0
/* internally used to mark root directory */
#define FAT_CLST_ROOT FAT_CLST_RES1
 
#define fat_block_get(np, off) \
_fat_block_get((np)->idx->dev_handle, (np)->firstc, (off))
 
static block_t *
_fat_block_get(dev_handle_t dev_handle, fat_cluster_t firstc, off_t offset)
{
block_t *bb;
block_t *b;
unsigned bps;
unsigned spc;
unsigned rscnt; /* block address of the first FAT */
unsigned fatcnt;
unsigned rde;
unsigned rds; /* root directory size */
unsigned sf;
unsigned ssa; /* size of the system area */
unsigned clusters;
fat_cluster_t clst = firstc;
unsigned i;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
fatcnt = FAT_BS(bb)->fatcnt;
rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
block_put(bb);
 
rds = (sizeof(fat_dentry_t) * rde) / bps;
rds += ((sizeof(fat_dentry_t) * rde) % bps != 0);
ssa = rscnt + fatcnt * sf + rds;
 
if (firstc == FAT_CLST_ROOT) {
/* root directory special case */
assert(offset < rds);
b = block_get(dev_handle, rscnt + fatcnt * sf + offset, bps);
return b;
}
 
clusters = offset / spc;
for (i = 0; i < clusters; i++) {
unsigned fsec; /* sector offset relative to FAT1 */
unsigned fidx; /* FAT1 entry index */
 
assert(clst >= FAT_CLST_FIRST && clst < FAT_CLST_BAD);
fsec = (clst * sizeof(fat_cluster_t)) / bps;
fidx = clst % (bps / sizeof(fat_cluster_t));
/* read FAT1 */
b = block_get(dev_handle, rscnt + fsec, bps);
clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
assert(clst != FAT_CLST_BAD);
assert(clst < FAT_CLST_LAST1);
block_put(b);
}
 
b = block_get(dev_handle, ssa + (clst - FAT_CLST_FIRST) * spc +
offset % spc, bps);
 
return b;
}
 
/** Return number of blocks allocated to a file.
*
* @param dev_handle Device handle of the device with the file.
* @param firstc First cluster of the file.
*
* @return Number of blocks allocated to the file.
*/
static uint16_t
_fat_blcks_get(dev_handle_t dev_handle, fat_cluster_t firstc)
{
block_t *bb;
block_t *b;
unsigned bps;
unsigned spc;
unsigned rscnt; /* block address of the first FAT */
unsigned clusters = 0;
fat_cluster_t clst = firstc;
 
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
spc = FAT_BS(bb)->spc;
rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
block_put(bb);
 
if (firstc == FAT_CLST_RES0) {
/* No space allocated to the file. */
return 0;
}
 
while (clst < FAT_CLST_LAST1) {
unsigned fsec; /* sector offset relative to FAT1 */
unsigned fidx; /* FAT1 entry index */
 
assert(clst >= FAT_CLST_FIRST);
fsec = (clst * sizeof(fat_cluster_t)) / bps;
fidx = clst % (bps / sizeof(fat_cluster_t));
/* read FAT1 */
b = block_get(dev_handle, rscnt + fsec, bps);
clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
assert(clst != FAT_CLST_BAD);
block_put(b);
clusters++;
}
 
return clusters * spc;
}
 
static void fat_node_initialize(fat_node_t *node)
{
futex_initialize(&node->lock, 1);
282,52 → 73,73
node->dirty = false;
}
 
static uint16_t fat_bps_get(dev_handle_t dev_handle)
static void fat_node_sync(fat_node_t *node)
{
block_t *bb;
block_t *b;
fat_bs_t *bs;
fat_dentry_t *d;
uint16_t bps;
unsigned dps;
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
assert(bb != NULL);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
block_put(bb);
assert(node->dirty);
 
return bps;
}
bs = block_bb_get(node->idx->dev_handle);
bps = uint16_t_le2host(bs->bps);
dps = bps / sizeof(fat_dentry_t);
/* Read the block that contains the dentry of interest. */
b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
(node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
 
typedef enum {
FAT_DENTRY_SKIP,
FAT_DENTRY_LAST,
FAT_DENTRY_VALID
} fat_dentry_clsf_t;
d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
 
static fat_dentry_clsf_t fat_classify_dentry(fat_dentry_t *d)
{
if (d->attr & FAT_ATTR_VOLLABEL) {
/* volume label entry */
return FAT_DENTRY_SKIP;
d->firstc = host2uint16_t_le(node->firstc);
if (node->type == FAT_FILE) {
d->size = host2uint32_t_le(node->size);
} else if (node->type == FAT_DIRECTORY) {
d->attr = FAT_ATTR_SUBDIR;
}
if (d->name[0] == FAT_DENTRY_ERASED) {
/* not-currently-used entry */
return FAT_DENTRY_SKIP;
}
if (d->name[0] == FAT_DENTRY_UNUSED) {
/* never used entry */
return FAT_DENTRY_LAST;
}
if (d->name[0] == FAT_DENTRY_DOT) {
/*
* Most likely '.' or '..'.
* It cannot occur in a regular file name.
*/
return FAT_DENTRY_SKIP;
}
return FAT_DENTRY_VALID;
/* TODO: update other fields? (e.g time fields) */
b->dirty = true; /* need to sync block */
block_put(b);
}
 
static void fat_node_sync(fat_node_t *node)
static fat_node_t *fat_node_get_new(void)
{
/* TODO */
fat_node_t *nodep;
 
futex_down(&ffn_futex);
if (!list_empty(&ffn_head)) {
/* Try to use a cached free node structure. */
fat_idx_t *idxp_tmp;
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
goto skip_cache;
idxp_tmp = nodep->idx;
if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
futex_up(&nodep->lock);
goto skip_cache;
}
list_remove(&nodep->ffn_link);
futex_up(&ffn_futex);
if (nodep->dirty)
fat_node_sync(nodep);
idxp_tmp->nodep = NULL;
futex_up(&nodep->lock);
futex_up(&idxp_tmp->lock);
} else {
skip_cache:
/* Try to allocate a new node structure. */
futex_up(&ffn_futex);
nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
if (!nodep)
return NULL;
}
fat_node_initialize(nodep);
return nodep;
}
 
/** Internal version of fat_node_get().
337,9 → 149,11
static void *fat_node_get_core(fat_idx_t *idxp)
{
block_t *b;
fat_bs_t *bs;
fat_dentry_t *d;
fat_node_t *nodep = NULL;
unsigned bps;
unsigned spc;
unsigned dps;
 
if (idxp->nodep) {
360,41 → 174,18
assert(idxp->pfc);
 
futex_down(&ffn_futex);
if (!list_empty(&ffn_head)) {
/* Try to use a cached free node structure. */
fat_idx_t *idxp_tmp;
nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
goto skip_cache;
idxp_tmp = nodep->idx;
if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
futex_up(&nodep->lock);
goto skip_cache;
}
list_remove(&nodep->ffn_link);
futex_up(&ffn_futex);
if (nodep->dirty)
fat_node_sync(nodep);
idxp_tmp->nodep = NULL;
futex_up(&nodep->lock);
futex_up(&idxp_tmp->lock);
} else {
skip_cache:
/* Try to allocate a new node structure. */
futex_up(&ffn_futex);
nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
if (!nodep)
return NULL;
}
fat_node_initialize(nodep);
nodep = fat_node_get_new();
if (!nodep)
return NULL;
 
bps = fat_bps_get(idxp->dev_handle);
bs = block_bb_get(idxp->dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
dps = bps / sizeof(fat_dentry_t);
 
/* Read the block that contains the dentry of interest. */
b = _fat_block_get(idxp->dev_handle, idxp->pfc,
(idxp->pdi * sizeof(fat_dentry_t)) / bps);
b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
(idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
assert(b);
 
d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
410,7 → 201,7
* defined for the directory entry type. We must determine the
* size of the directory by walking the FAT.
*/
nodep->size = bps * _fat_blcks_get(idxp->dev_handle,
nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
uint16_t_le2host(d->firstc));
} else {
nodep->type = FAT_FILE;
429,8 → 220,31
return nodep;
}
 
/*
* Forward declarations of FAT libfs operations.
*/
static void *fat_node_get(dev_handle_t, fs_index_t);
static void fat_node_put(void *);
static void *fat_create_node(dev_handle_t, int);
static int fat_destroy_node(void *);
static int fat_link(void *, void *, const char *);
static int fat_unlink(void *, void *);
static void *fat_match(void *, const char *);
static fs_index_t fat_index_get(void *);
static size_t fat_size_get(void *);
static unsigned fat_lnkcnt_get(void *);
static bool fat_has_children(void *);
static void *fat_root_get(dev_handle_t);
static char fat_plb_get_char(unsigned);
static bool fat_is_directory(void *);
static bool fat_is_file(void *node);
 
/*
* FAT libfs operations.
*/
 
/** Instantiate a FAT in-core node. */
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
{
void *node;
fat_idx_t *idxp;
444,41 → 258,318
return node;
}
 
static void fat_node_put(void *node)
void fat_node_put(void *node)
{
fat_node_t *nodep = (fat_node_t *)node;
bool destroy = false;
 
futex_down(&nodep->lock);
if (!--nodep->refcnt) {
futex_down(&ffn_futex);
list_append(&nodep->ffn_link, &ffn_head);
futex_up(&ffn_futex);
if (nodep->idx) {
futex_down(&ffn_futex);
list_append(&nodep->ffn_link, &ffn_head);
futex_up(&ffn_futex);
} else {
/*
* The node does not have any index structure associated
* with itself. This can only mean that we are releasing
* the node after a failed attempt to allocate the index
* structure for it.
*/
destroy = true;
}
}
futex_up(&nodep->lock);
if (destroy)
free(node);
}
 
static void *fat_create(int flags)
void *fat_create_node(dev_handle_t dev_handle, int flags)
{
return NULL; /* not supported at the moment */
fat_idx_t *idxp;
fat_node_t *nodep;
fat_bs_t *bs;
fat_cluster_t mcl, lcl;
uint16_t bps;
int rc;
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
if (flags & L_DIRECTORY) {
/* allocate a cluster */
rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
if (rc != EOK)
return NULL;
}
 
nodep = fat_node_get_new();
if (!nodep) {
fat_free_clusters(bs, dev_handle, mcl);
return NULL;
}
idxp = fat_idx_get_new(dev_handle);
if (!idxp) {
fat_free_clusters(bs, dev_handle, mcl);
fat_node_put(nodep);
return NULL;
}
/* idxp->lock held */
if (flags & L_DIRECTORY) {
int i;
block_t *b;
 
/*
* Populate the new cluster with unused dentries.
*/
for (i = 0; i < bs->spc; i++) {
b = _fat_block_get(bs, dev_handle, mcl, i,
BLOCK_FLAGS_NOREAD);
/* mark all dentries as never-used */
memset(b->data, 0, bps);
b->dirty = false;
block_put(b);
}
nodep->type = FAT_DIRECTORY;
nodep->firstc = mcl;
nodep->size = bps * bs->spc;
} else {
nodep->type = FAT_FILE;
nodep->firstc = FAT_CLST_RES0;
nodep->size = 0;
}
nodep->lnkcnt = 0; /* not linked anywhere */
nodep->refcnt = 1;
nodep->dirty = true;
 
nodep->idx = idxp;
idxp->nodep = nodep;
 
futex_up(&idxp->lock);
return nodep;
}
 
static int fat_destroy(void *node)
int fat_destroy_node(void *node)
{
return ENOTSUP; /* not supported at the moment */
fat_node_t *nodep = (fat_node_t *)node;
fat_bs_t *bs;
 
/*
* The node is not reachable from the file system. This means that the
* link count should be zero and that the index structure cannot be
* found in the position hash. Obviously, we don't need to lock the node
* nor its index structure.
*/
assert(nodep->lnkcnt == 0);
 
/*
* The node may not have any children.
*/
assert(fat_has_children(node) == false);
 
bs = block_bb_get(nodep->idx->dev_handle);
if (nodep->firstc != FAT_CLST_RES0) {
assert(nodep->size);
/* Free all clusters allocated to the node. */
fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
}
 
fat_idx_destroy(nodep->idx);
free(nodep);
return EOK;
}
 
static bool fat_link(void *prnt, void *chld, const char *name)
int fat_link(void *prnt, void *chld, const char *name)
{
return false; /* not supported at the moment */
fat_node_t *parentp = (fat_node_t *)prnt;
fat_node_t *childp = (fat_node_t *)chld;
fat_dentry_t *d;
fat_bs_t *bs;
block_t *b;
int i, j;
uint16_t bps;
unsigned dps;
unsigned blocks;
fat_cluster_t mcl, lcl;
int rc;
 
futex_down(&childp->lock);
if (childp->lnkcnt == 1) {
/*
* On FAT, we don't support multiple hard links.
*/
futex_up(&childp->lock);
return EMLINK;
}
assert(childp->lnkcnt == 0);
futex_up(&childp->lock);
 
if (!fat_dentry_name_verify(name)) {
/*
* Attempt to create unsupported name.
*/
return ENOTSUP;
}
 
/*
* Get us an unused parent node's dentry or grow the parent and allocate
* a new one.
*/
futex_down(&parentp->idx->lock);
bs = block_bb_get(parentp->idx->dev_handle);
bps = uint16_t_le2host(bs->bps);
dps = bps / sizeof(fat_dentry_t);
 
blocks = parentp->size / bps;
 
for (i = 0; i < blocks; i++) {
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
for (j = 0; j < dps; j++) {
d = ((fat_dentry_t *)b->data) + j;
switch (fat_classify_dentry(d)) {
case FAT_DENTRY_SKIP:
case FAT_DENTRY_VALID:
/* skipping used and meta entries */
continue;
case FAT_DENTRY_FREE:
case FAT_DENTRY_LAST:
/* found an empty slot */
goto hit;
}
}
block_put(b);
}
j = 0;
/*
* We need to grow the parent in order to create a new unused dentry.
*/
if (parentp->idx->pfc == FAT_CLST_ROOT) {
/* Can't grow the root directory. */
futex_up(&parentp->idx->lock);
return ENOSPC;
}
rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl);
if (rc != EOK) {
futex_up(&parentp->idx->lock);
return rc;
}
fat_append_clusters(bs, parentp, mcl);
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NOREAD);
d = (fat_dentry_t *)b->data;
/*
* Clear all dentries in the block except for the first one (the first
* dentry will be cleared in the next step).
*/
memset(d + 1, 0, bps - sizeof(fat_dentry_t));
 
hit:
/*
* At this point we only establish the link between the parent and the
* child. The dentry, except of the name and the extension, will remain
* uninitialized until the corresponding node is synced. Thus the valid
* dentry data is kept in the child node structure.
*/
memset(d, 0, sizeof(fat_dentry_t));
fat_dentry_name_set(d, name);
b->dirty = true; /* need to sync block */
block_put(b);
futex_up(&parentp->idx->lock);
 
futex_down(&childp->idx->lock);
/*
* If possible, create the Sub-directory Identifier Entry and the
* Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
* are not mandatory according to Standard ECMA-107 and HelenOS VFS does
* not use them anyway, so this is rather a sign of our good will.
*/
b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE);
d = (fat_dentry_t *)b->data;
if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
strcmp(d->name, FAT_NAME_DOT) == 0) {
memset(d, 0, sizeof(fat_dentry_t));
strcpy(d->name, FAT_NAME_DOT);
strcpy(d->ext, FAT_EXT_PAD);
d->attr = FAT_ATTR_SUBDIR;
d->firstc = host2uint16_t_le(childp->firstc);
/* TODO: initialize also the date/time members. */
}
d++;
if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
strcmp(d->name, FAT_NAME_DOT_DOT) == 0) {
memset(d, 0, sizeof(fat_dentry_t));
strcpy(d->name, FAT_NAME_DOT_DOT);
strcpy(d->ext, FAT_EXT_PAD);
d->attr = FAT_ATTR_SUBDIR;
d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
host2uint16_t_le(FAT_CLST_RES0) :
host2uint16_t_le(parentp->firstc);
/* TODO: initialize also the date/time members. */
}
b->dirty = true; /* need to sync block */
block_put(b);
 
childp->idx->pfc = parentp->firstc;
childp->idx->pdi = i * dps + j;
futex_up(&childp->idx->lock);
 
futex_down(&childp->lock);
childp->lnkcnt = 1;
childp->dirty = true; /* need to sync node */
futex_up(&childp->lock);
 
/*
* Hash in the index structure into the position hash.
*/
fat_idx_hashin(childp->idx);
 
return EOK;
}
 
static int fat_unlink(void *prnt, void *chld)
int fat_unlink(void *prnt, void *chld)
{
return ENOTSUP; /* not supported at the moment */
fat_node_t *parentp = (fat_node_t *)prnt;
fat_node_t *childp = (fat_node_t *)chld;
fat_bs_t *bs;
fat_dentry_t *d;
uint16_t bps;
block_t *b;
 
futex_down(&parentp->lock);
futex_down(&childp->lock);
assert(childp->lnkcnt == 1);
futex_down(&childp->idx->lock);
bs = block_bb_get(childp->idx->dev_handle);
bps = uint16_t_le2host(bs->bps);
 
b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc,
(childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
BLOCK_FLAGS_NONE);
d = (fat_dentry_t *)b->data +
(childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
/* mark the dentry as not-currently-used */
d->name[0] = FAT_DENTRY_ERASED;
b->dirty = true; /* need to sync block */
block_put(b);
 
/* remove the index structure from the position hash */
fat_idx_hashout(childp->idx);
/* clear position information */
childp->idx->pfc = FAT_CLST_RES0;
childp->idx->pdi = 0;
futex_up(&childp->idx->lock);
childp->lnkcnt = 0;
childp->dirty = true;
futex_up(&childp->lock);
futex_up(&parentp->lock);
 
return EOK;
}
 
static void *fat_match(void *prnt, const char *component)
void *fat_match(void *prnt, const char *component)
{
fat_bs_t *bs;
fat_node_t *parentp = (fat_node_t *)prnt;
char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
unsigned i, j;
489,20 → 580,17
block_t *b;
 
futex_down(&parentp->idx->lock);
bps = fat_bps_get(parentp->idx->dev_handle);
bs = block_bb_get(parentp->idx->dev_handle);
bps = uint16_t_le2host(bs->bps);
dps = bps / sizeof(fat_dentry_t);
blocks = parentp->size / bps + (parentp->size % bps != 0);
blocks = parentp->size / bps;
for (i = 0; i < blocks; i++) {
unsigned dentries;
b = fat_block_get(parentp, i);
dentries = (i == blocks - 1) ?
parentp->size % sizeof(fat_dentry_t) :
dps;
for (j = 0; j < dentries; j++) {
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
for (j = 0; j < dps; j++) {
d = ((fat_dentry_t *)b->data) + j;
switch (fat_classify_dentry(d)) {
case FAT_DENTRY_SKIP:
case FAT_DENTRY_FREE:
continue;
case FAT_DENTRY_LAST:
block_put(b);
510,10 → 598,10
return NULL;
default:
case FAT_DENTRY_VALID:
dentry_name_canonify(d, name);
fat_dentry_name_get(d, name);
break;
}
if (stricmp(name, component) == 0) {
if (fat_dentry_namecmp(name, component) == 0) {
/* hit */
void *node;
/*
542,11 → 630,12
}
block_put(b);
}
 
futex_up(&parentp->idx->lock);
return NULL;
}
 
static fs_index_t fat_index_get(void *node)
fs_index_t fat_index_get(void *node)
{
fat_node_t *fnodep = (fat_node_t *)node;
if (!fnodep)
554,18 → 643,19
return fnodep->idx->index;
}
 
static size_t fat_size_get(void *node)
size_t fat_size_get(void *node)
{
return ((fat_node_t *)node)->size;
}
 
static unsigned fat_lnkcnt_get(void *node)
unsigned fat_lnkcnt_get(void *node)
{
return ((fat_node_t *)node)->lnkcnt;
}
 
static bool fat_has_children(void *node)
bool fat_has_children(void *node)
{
fat_bs_t *bs;
fat_node_t *nodep = (fat_node_t *)node;
unsigned bps;
unsigned dps;
575,25 → 665,23
 
if (nodep->type != FAT_DIRECTORY)
return false;
 
futex_down(&nodep->idx->lock);
bps = fat_bps_get(nodep->idx->dev_handle);
bs = block_bb_get(nodep->idx->dev_handle);
bps = uint16_t_le2host(bs->bps);
dps = bps / sizeof(fat_dentry_t);
 
blocks = nodep->size / bps + (nodep->size % bps != 0);
blocks = nodep->size / bps;
 
for (i = 0; i < blocks; i++) {
unsigned dentries;
fat_dentry_t *d;
b = fat_block_get(nodep, i);
dentries = (i == blocks - 1) ?
nodep->size % sizeof(fat_dentry_t) :
dps;
for (j = 0; j < dentries; j++) {
b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
for (j = 0; j < dps; j++) {
d = ((fat_dentry_t *)b->data) + j;
switch (fat_classify_dentry(d)) {
case FAT_DENTRY_SKIP:
case FAT_DENTRY_FREE:
continue;
case FAT_DENTRY_LAST:
block_put(b);
616,22 → 704,22
return false;
}
 
static void *fat_root_get(dev_handle_t dev_handle)
void *fat_root_get(dev_handle_t dev_handle)
{
return fat_node_get(dev_handle, 0);
}
 
static char fat_plb_get_char(unsigned pos)
char fat_plb_get_char(unsigned pos)
{
return fat_reg.plb_ro[pos % PLB_SIZE];
}
 
static bool fat_is_directory(void *node)
bool fat_is_directory(void *node)
{
return ((fat_node_t *)node)->type == FAT_DIRECTORY;
}
 
static bool fat_is_file(void *node)
bool fat_is_file(void *node)
{
return ((fat_node_t *)node)->type == FAT_FILE;
}
641,8 → 729,8
.match = fat_match,
.node_get = fat_node_get,
.node_put = fat_node_put,
.create = fat_create,
.destroy = fat_destroy,
.create = fat_create_node,
.destroy = fat_destroy_node,
.link = fat_link,
.unlink = fat_unlink,
.index_get = fat_index_get,
655,60 → 743,57
.is_file = fat_is_file
};
 
/*
* VFS operations.
*/
 
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
block_t *bb;
fat_bs_t *bs;
uint16_t bps;
uint16_t rde;
int rc;
 
/*
* For now, we don't bother to remember dev_handle, dev_phone or
* dev_buffer in some data structure. We use global variables because we
* know there will be at most one mount on this file system.
* Of course, this is a huge TODO item.
*/
dev_buffer = mmap(NULL, BS_SIZE, PROTO_READ | PROTO_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (!dev_buffer) {
ipc_answer_0(rid, ENOMEM);
/* initialize libblock */
rc = block_init(dev_handle, BS_SIZE);
if (rc != EOK) {
ipc_answer_0(rid, rc);
return;
}
 
dev_phone = ipc_connect_me_to(PHONE_NS, SERVICE_DEVMAP,
DEVMAP_CONNECT_TO_DEVICE, dev_handle);
 
if (dev_phone < 0) {
munmap(dev_buffer, BS_SIZE);
ipc_answer_0(rid, dev_phone);
return;
}
 
rc = ipc_share_out_start(dev_phone, dev_buffer,
AS_AREA_READ | AS_AREA_WRITE);
/* prepare the boot block */
rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
if (rc != EOK) {
munmap(dev_buffer, BS_SIZE);
block_fini(dev_handle);
ipc_answer_0(rid, rc);
return;
}
 
/* get the buffer with the boot sector */
bs = block_bb_get(dev_handle);
/* Read the number of root directory entries. */
bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
bps = uint16_t_le2host(FAT_BS(bb)->bps);
rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
block_put(bb);
bps = uint16_t_le2host(bs->bps);
rde = uint16_t_le2host(bs->root_ent_max);
 
if (bps != BS_SIZE) {
munmap(dev_buffer, BS_SIZE);
block_fini(dev_handle);
ipc_answer_0(rid, ENOTSUP);
return;
}
 
/* Initialize the block cache */
rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
if (rc != EOK) {
block_fini(dev_handle);
ipc_answer_0(rid, rc);
return;
}
 
rc = fat_idx_init_by_dev_handle(dev_handle);
if (rc != EOK) {
munmap(dev_buffer, BS_SIZE);
block_fini(dev_handle);
ipc_answer_0(rid, rc);
return;
}
716,7 → 801,7
/* Initialize the root node. */
fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
if (!rootp) {
munmap(dev_buffer, BS_SIZE);
block_fini(dev_handle);
fat_idx_fini_by_dev_handle(dev_handle);
ipc_answer_0(rid, ENOMEM);
return;
725,7 → 810,7
 
fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
if (!ridxp) {
munmap(dev_buffer, BS_SIZE);
block_fini(dev_handle);
free(rootp);
fat_idx_fini_by_dev_handle(dev_handle);
ipc_answer_0(rid, ENOMEM);
763,7 → 848,8
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
off_t pos = (off_t)IPC_GET_ARG3(*request);
fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
uint16_t bps = fat_bps_get(dev_handle);
fat_bs_t *bs;
uint16_t bps;
size_t bytes;
block_t *b;
 
781,6 → 867,9
return;
}
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
 
if (nodep->type == FAT_FILE) {
/*
* Our strategy for regular file reads is to read one block at
787,11 → 876,19
* most and make use of the possibility to return less data than
* requested. This keeps the code very simple.
*/
bytes = min(len, bps - pos % bps);
b = fat_block_get(nodep, pos / bps);
(void) ipc_data_read_finalize(callid, b->data + pos % bps,
bytes);
block_put(b);
if (pos >= nodep->size) {
/* reading beyond the EOF */
bytes = 0;
(void) ipc_data_read_finalize(callid, NULL, 0);
} else {
bytes = min(len, bps - pos % bps);
bytes = min(bytes, nodep->size - pos);
b = fat_block_get(bs, nodep, pos / bps,
BLOCK_FLAGS_NONE);
(void) ipc_data_read_finalize(callid, b->data + pos % bps,
bytes);
block_put(b);
}
} else {
unsigned bnum;
off_t spos = pos;
812,7 → 909,7
while (bnum < nodep->size / bps) {
off_t o;
 
b = fat_block_get(nodep, bnum);
b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
for (o = pos % (bps / sizeof(fat_dentry_t));
o < bps / sizeof(fat_dentry_t);
o++, pos++) {
819,6 → 916,7
d = ((fat_dentry_t *)b->data) + o;
switch (fat_classify_dentry(d)) {
case FAT_DENTRY_SKIP:
case FAT_DENTRY_FREE:
continue;
case FAT_DENTRY_LAST:
block_put(b);
825,7 → 923,7
goto miss;
default:
case FAT_DENTRY_VALID:
dentry_name_canonify(d, name);
fat_dentry_name_get(d, name);
block_put(b);
goto hit;
}
847,6 → 945,186
ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
}
 
void fat_write(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
off_t pos = (off_t)IPC_GET_ARG3(*request);
fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
fat_bs_t *bs;
size_t bytes;
block_t *b;
uint16_t bps;
unsigned spc;
unsigned bpc; /* bytes per cluster */
off_t boundary;
int flags = BLOCK_FLAGS_NONE;
if (!nodep) {
ipc_answer_0(rid, ENOENT);
return;
}
ipc_callid_t callid;
size_t len;
if (!ipc_data_write_receive(&callid, &len)) {
fat_node_put(nodep);
ipc_answer_0(callid, EINVAL);
ipc_answer_0(rid, EINVAL);
return;
}
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
bpc = bps * spc;
 
/*
* In all scenarios, we will attempt to write out only one block worth
* of data at maximum. There might be some more efficient approaches,
* but this one greatly simplifies fat_write(). Note that we can afford
* to do this because the client must be ready to handle the return
* value signalizing a smaller number of bytes written.
*/
bytes = min(len, bps - pos % bps);
if (bytes == bps)
flags |= BLOCK_FLAGS_NOREAD;
boundary = ROUND_UP(nodep->size, bpc);
if (pos < boundary) {
/*
* This is the easier case - we are either overwriting already
* existing contents or writing behind the EOF, but still within
* the limits of the last cluster. The node size may grow to the
* next block size boundary.
*/
fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
b = fat_block_get(bs, nodep, pos / bps, flags);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
block_put(b);
if (pos + bytes > nodep->size) {
nodep->size = pos + bytes;
nodep->dirty = true; /* need to sync node */
}
ipc_answer_2(rid, EOK, bytes, nodep->size);
fat_node_put(nodep);
return;
} else {
/*
* This is the more difficult case. We must allocate new
* clusters for the node and zero them out.
*/
int status;
unsigned nclsts;
fat_cluster_t mcl, lcl;
nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
/* create an independent chain of nclsts clusters in all FATs */
status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
if (status != EOK) {
/* could not allocate a chain of nclsts clusters */
fat_node_put(nodep);
ipc_answer_0(callid, status);
ipc_answer_0(rid, status);
return;
}
/* zero fill any gaps */
fat_fill_gap(bs, nodep, mcl, pos);
b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
flags);
(void) ipc_data_write_finalize(callid, b->data + pos % bps,
bytes);
b->dirty = true; /* need to sync block */
block_put(b);
/*
* Append the cluster chain starting in mcl to the end of the
* node's cluster chain.
*/
fat_append_clusters(bs, nodep, mcl);
nodep->size = pos + bytes;
nodep->dirty = true; /* need to sync node */
ipc_answer_2(rid, EOK, bytes, nodep->size);
fat_node_put(nodep);
return;
}
}
 
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
size_t size = (off_t)IPC_GET_ARG3(*request);
fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
fat_bs_t *bs;
uint16_t bps;
uint8_t spc;
unsigned bpc; /* bytes per cluster */
int rc;
 
if (!nodep) {
ipc_answer_0(rid, ENOENT);
return;
}
 
bs = block_bb_get(dev_handle);
bps = uint16_t_le2host(bs->bps);
spc = bs->spc;
bpc = bps * spc;
 
if (nodep->size == size) {
rc = EOK;
} else if (nodep->size < size) {
/*
* The standard says we have the freedom to grow the node.
* For now, we simply return an error.
*/
rc = EINVAL;
} else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
/*
* The node will be shrunk, but no clusters will be deallocated.
*/
nodep->size = size;
nodep->dirty = true; /* need to sync node */
rc = EOK;
} else {
/*
* The node will be shrunk, clusters will be deallocated.
*/
if (size == 0) {
fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
} else {
fat_cluster_t lastc;
(void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
&lastc, (size - 1) / bpc);
fat_chop_clusters(bs, nodep, lastc);
}
nodep->size = size;
nodep->dirty = true; /* need to sync node */
rc = EOK;
}
fat_node_put(nodep);
ipc_answer_0(rid, rc);
return;
}
 
void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
{
dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
int rc;
 
fat_node_t *nodep = fat_node_get(dev_handle, index);
if (!nodep) {
ipc_answer_0(rid, ENOENT);
return;
}
 
rc = fat_destroy_node(nodep);
ipc_answer_0(rid, rc);
}
 
/**
* @}
*/