Subversion Repositories HelenOS

Rev

Rev 3621 | Rev 3628 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2627 jermar 1
/*
2793 jermar 2
 * Copyright (c) 2008 Jakub Jermar
2627 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/** @addtogroup fs
30
 * @{
31
 */
32
 
33
/**
34
 * @file    fat_ops.c
35
 * @brief   Implementation of VFS operations for the FAT file system server.
36
 */
37
 
38
#include "fat.h"
3505 jermar 39
#include "fat_dentry.h"
40
#include "fat_fat.h"
2638 jermar 41
#include "../../vfs/vfs.h"
2793 jermar 42
#include <libfs.h>
3521 jermar 43
#include <libblock.h>
2627 jermar 44
#include <ipc/ipc.h>
3257 jermar 45
#include <ipc/services.h>
46
#include <ipc/devmap.h>
2627 jermar 47
#include <async.h>
48
#include <errno.h>
2793 jermar 49
#include <string.h>
2798 jermar 50
#include <byteorder.h>
2831 jermar 51
#include <libadt/hash_table.h>
52
#include <libadt/list.h>
53
#include <assert.h>
2856 jermar 54
#include <futex.h>
3257 jermar 55
#include <sys/mman.h>
3499 jermar 56
#include <align.h>
2627 jermar 57
 
2951 jermar 58
/** Futex protecting the list of cached free FAT nodes. */
59
static futex_t ffn_futex = FUTEX_INITIALIZER;
2843 jermar 60
 
2951 jermar 61
/** List of cached free FAT nodes. */
62
static LIST_INITIALIZE(ffn_head);
63
 
2831 jermar 64
static void fat_node_initialize(fat_node_t *node)
2793 jermar 65
{
2951 jermar 66
    futex_initialize(&node->lock, 1);
2864 jermar 67
    node->idx = NULL;
2831 jermar 68
    node->type = 0;
69
    link_initialize(&node->ffn_link);
70
    node->size = 0;
71
    node->lnkcnt = 0;
72
    node->refcnt = 0;
73
    node->dirty = false;
2793 jermar 74
}
75
 
2893 jermar 76
static void fat_node_sync(fat_node_t *node)
2831 jermar 77
{
3530 jermar 78
    block_t *b;
79
    fat_bs_t *bs;
3519 jermar 80
    fat_dentry_t *d;
81
    uint16_t bps;
82
    unsigned dps;
83
 
84
    assert(node->dirty);
85
 
3530 jermar 86
    bs = block_bb_get(node->idx->dev_handle);
87
    bps = uint16_t_le2host(bs->bps);
3519 jermar 88
    dps = bps / sizeof(fat_dentry_t);
89
 
90
    /* Read the block that contains the dentry of interest. */
3530 jermar 91
    b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
3595 jermar 92
        (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
3519 jermar 93
 
94
    d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
95
 
96
    d->firstc = host2uint16_t_le(node->firstc);
97
    if (node->type == FAT_FILE)
98
        d->size = host2uint32_t_le(node->size);
99
    /* TODO: update other fields? (e.g time fields, attr field) */
100
 
101
    b->dirty = true;        /* need to sync block */
102
    block_put(b);
2831 jermar 103
}
104
 
3574 jermar 105
static fat_node_t *fat_node_get_new(void)
106
{
107
    fat_node_t *nodep;
108
 
109
    futex_down(&ffn_futex);
110
    if (!list_empty(&ffn_head)) {
111
        /* Try to use a cached free node structure. */
112
        fat_idx_t *idxp_tmp;
113
        nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
114
        if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
115
            goto skip_cache;
116
        idxp_tmp = nodep->idx;
117
        if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
118
            futex_up(&nodep->lock);
119
            goto skip_cache;
120
        }
121
        list_remove(&nodep->ffn_link);
122
        futex_up(&ffn_futex);
123
        if (nodep->dirty)
124
            fat_node_sync(nodep);
125
        idxp_tmp->nodep = NULL;
126
        futex_up(&nodep->lock);
127
        futex_up(&idxp_tmp->lock);
128
    } else {
129
skip_cache:
130
        /* Try to allocate a new node structure. */
131
        futex_up(&ffn_futex);
132
        nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
133
        if (!nodep)
134
            return NULL;
135
    }
136
    fat_node_initialize(nodep);
137
 
138
    return nodep;
139
}
140
 
2951 jermar 141
/** Internal version of fat_node_get().
142
 *
143
 * @param idxp      Locked index structure.
144
 */
145
static void *fat_node_get_core(fat_idx_t *idxp)
2831 jermar 146
{
3530 jermar 147
    block_t *b;
148
    fat_bs_t *bs;
2891 jermar 149
    fat_dentry_t *d;
3312 jermar 150
    fat_node_t *nodep = NULL;
2891 jermar 151
    unsigned bps;
3550 jermar 152
    unsigned spc;
2891 jermar 153
    unsigned dps;
154
 
2951 jermar 155
    if (idxp->nodep) {
2891 jermar 156
        /*
157
         * We are lucky.
158
         * The node is already instantiated in memory.
159
         */
2951 jermar 160
        futex_down(&idxp->nodep->lock);
161
        if (!idxp->nodep->refcnt++)
3312 jermar 162
            list_remove(&idxp->nodep->ffn_link);
2951 jermar 163
        futex_up(&idxp->nodep->lock);
164
        return idxp->nodep;
2891 jermar 165
    }
166
 
167
    /*
168
     * We must instantiate the node from the file system.
169
     */
170
 
2951 jermar 171
    assert(idxp->pfc);
2891 jermar 172
 
3574 jermar 173
    nodep = fat_node_get_new();
174
    if (!nodep)
175
        return NULL;
2891 jermar 176
 
3530 jermar 177
    bs = block_bb_get(idxp->dev_handle);
178
    bps = uint16_t_le2host(bs->bps);
3550 jermar 179
    spc = bs->spc;
2891 jermar 180
    dps = bps / sizeof(fat_dentry_t);
181
 
2893 jermar 182
    /* Read the block that contains the dentry of interest. */
3530 jermar 183
    b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
3595 jermar 184
        (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
2891 jermar 185
    assert(b);
186
 
2951 jermar 187
    d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
2893 jermar 188
    if (d->attr & FAT_ATTR_SUBDIR) {
189
        /*
190
         * The only directory which does not have this bit set is the
191
         * root directory itself. The root directory node is handled
192
         * and initialized elsewhere.
193
         */
194
        nodep->type = FAT_DIRECTORY;
3282 jermar 195
        /*
3325 jermar 196
         * Unfortunately, the 'size' field of the FAT dentry is not
197
         * defined for the directory entry type. We must determine the
198
         * size of the directory by walking the FAT.
3282 jermar 199
         */
3550 jermar 200
        nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
201
            uint16_t_le2host(d->firstc));
2893 jermar 202
    } else {
203
        nodep->type = FAT_FILE;
3282 jermar 204
        nodep->size = uint32_t_le2host(d->size);
2893 jermar 205
    }
206
    nodep->firstc = uint16_t_le2host(d->firstc);
207
    nodep->lnkcnt = 1;
208
    nodep->refcnt = 1;
209
 
210
    block_put(b);
211
 
212
    /* Link the idx structure with the node structure. */
2951 jermar 213
    nodep->idx = idxp;
214
    idxp->nodep = nodep;
2893 jermar 215
 
216
    return nodep;
2831 jermar 217
}
218
 
3621 jermar 219
/*
220
 * Forward declarations of FAT libfs operations.
221
 */
222
static void *fat_node_get(dev_handle_t, fs_index_t);
223
static void fat_node_put(void *);
224
static void *fat_create_node(dev_handle_t, int);
225
static int fat_destroy_node(void *);
3625 jermar 226
static int fat_link(void *, void *, const char *);
3621 jermar 227
static int fat_unlink(void *, void *);
228
static void *fat_match(void *, const char *);
229
static fs_index_t fat_index_get(void *);
230
static size_t fat_size_get(void *);
231
static unsigned fat_lnkcnt_get(void *);
232
static bool fat_has_children(void *);
233
static void *fat_root_get(dev_handle_t);
234
static char fat_plb_get_char(unsigned);
235
static bool fat_is_directory(void *);
236
static bool fat_is_file(void *node);
237
 
238
/*
239
 * FAT libfs operations.
240
 */
241
 
2951 jermar 242
/** Instantiate a FAT in-core node. */
3621 jermar 243
void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
2951 jermar 244
{
245
    void *node;
246
    fat_idx_t *idxp;
247
 
248
    idxp = fat_idx_get_by_index(dev_handle, index);
249
    if (!idxp)
250
        return NULL;
251
    /* idxp->lock held */
252
    node = fat_node_get_core(idxp);
253
    futex_up(&idxp->lock);
254
    return node;
255
}
256
 
3621 jermar 257
void fat_node_put(void *node)
2852 jermar 258
{
2910 jermar 259
    fat_node_t *nodep = (fat_node_t *)node;
3609 jermar 260
    bool destroy = false;
2910 jermar 261
 
2951 jermar 262
    futex_down(&nodep->lock);
2910 jermar 263
    if (!--nodep->refcnt) {
3609 jermar 264
        if (nodep->idx) {
265
            futex_down(&ffn_futex);
266
            list_append(&nodep->ffn_link, &ffn_head);
267
            futex_up(&ffn_futex);
268
        } else {
269
            /*
270
             * The node does not have any index structure associated
271
             * with itself. This can only mean that we are releasing
272
             * the node after a failed attempt to allocate the index
273
             * structure for it.
274
             */
275
            destroy = true;
276
        }
2910 jermar 277
    }
2951 jermar 278
    futex_up(&nodep->lock);
3609 jermar 279
    if (destroy)
280
        free(node);
2852 jermar 281
}
282
 
3621 jermar 283
void *fat_create_node(dev_handle_t dev_handle, int flags)
2857 jermar 284
{
3609 jermar 285
    fat_idx_t *idxp;
286
    fat_node_t *nodep;
287
 
288
    nodep = fat_node_get_new();
289
    if (!nodep)
290
        return NULL;
291
    idxp = fat_idx_get_new(dev_handle);
292
    if (!idxp) {
293
        fat_node_put(nodep);
294
        return NULL;
295
    }
296
    /* idxp->lock held */
297
    if (flags & L_DIRECTORY) {
298
        nodep->type = FAT_DIRECTORY;
299
    } else {
300
        nodep->type = FAT_FILE;
301
    }
302
    nodep->size = 0;
303
    nodep->firstc = FAT_CLST_RES0;
304
    nodep->lnkcnt = 0;  /* not linked anywhere */
305
    nodep->refcnt = 1;
306
 
307
    nodep->idx = idxp;
308
    idxp->nodep = nodep;
309
 
310
    futex_up(&idxp->lock);
311
    return nodep;
2857 jermar 312
}
313
 
3621 jermar 314
int fat_destroy_node(void *node)
2857 jermar 315
{
3621 jermar 316
    fat_node_t *nodep = (fat_node_t *)node;
317
    fat_bs_t *bs;
318
 
319
    /*
320
     * The node is not reachable from the file system. This means that the
321
     * link count should be zero and that the index structure cannot be
322
     * found in the position hash. Obviously, we don't need to lock the node
323
     * nor its index structure.
324
     */
325
    assert(nodep->lnkcnt == 0);
326
 
327
    /*
328
     * The node may not have any children.
329
     */
330
    assert(fat_has_children(node) == false);
331
 
332
    bs = block_bb_get(nodep->idx->dev_handle);
333
    if (nodep->firstc != FAT_CLST_RES0) {
334
        assert(nodep->size);
335
        /* Free all clusters allocated to the node. */
336
        fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
337
    }
338
 
339
    fat_idx_destroy(nodep->idx);
340
    free(nodep);
341
    return EOK;
2857 jermar 342
}
343
 
3625 jermar 344
int fat_link(void *prnt, void *chld, const char *name)
2857 jermar 345
{
3625 jermar 346
    return ENOTSUP; /* not supported at the moment */
2857 jermar 347
}
348
 
3621 jermar 349
int fat_unlink(void *prnt, void *chld)
2857 jermar 350
{
351
    return ENOTSUP; /* not supported at the moment */
352
}
353
 
3621 jermar 354
void *fat_match(void *prnt, const char *component)
2793 jermar 355
{
3530 jermar 356
    fat_bs_t *bs;
2793 jermar 357
    fat_node_t *parentp = (fat_node_t *)prnt;
358
    char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
2822 jermar 359
    unsigned i, j;
2828 jermar 360
    unsigned bps;       /* bytes per sector */
2822 jermar 361
    unsigned dps;       /* dentries per sector */
362
    unsigned blocks;
2793 jermar 363
    fat_dentry_t *d;
3530 jermar 364
    block_t *b;
2793 jermar 365
 
2953 jermar 366
    futex_down(&parentp->idx->lock);
3530 jermar 367
    bs = block_bb_get(parentp->idx->dev_handle);
368
    bps = uint16_t_le2host(bs->bps);
2828 jermar 369
    dps = bps / sizeof(fat_dentry_t);
3526 jermar 370
    blocks = parentp->size / bps;
2822 jermar 371
    for (i = 0; i < blocks; i++) {
3595 jermar 372
        b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
3526 jermar 373
        for (j = 0; j < dps; j++) {
2822 jermar 374
            d = ((fat_dentry_t *)b->data) + j;
2845 jermar 375
            switch (fat_classify_dentry(d)) {
376
            case FAT_DENTRY_SKIP:
2822 jermar 377
                continue;
2845 jermar 378
            case FAT_DENTRY_LAST:
2822 jermar 379
                block_put(b);
2953 jermar 380
                futex_up(&parentp->idx->lock);
2822 jermar 381
                return NULL;
2845 jermar 382
            default:
383
            case FAT_DENTRY_VALID:
384
                dentry_name_canonify(d, name);
385
                break;
2822 jermar 386
            }
3272 jermar 387
            if (stricmp(name, component) == 0) {
2822 jermar 388
                /* hit */
2951 jermar 389
                void *node;
2953 jermar 390
                /*
391
                 * Assume tree hierarchy for locking.  We
392
                 * already have the parent and now we are going
393
                 * to lock the child.  Never lock in the oposite
394
                 * order.
395
                 */
2890 jermar 396
                fat_idx_t *idx = fat_idx_get_by_pos(
2881 jermar 397
                    parentp->idx->dev_handle, parentp->firstc,
2864 jermar 398
                    i * dps + j);
2953 jermar 399
                futex_up(&parentp->idx->lock);
2890 jermar 400
                if (!idx) {
401
                    /*
402
                     * Can happen if memory is low or if we
403
                     * run out of 32-bit indices.
404
                     */
405
                    block_put(b);
406
                    return NULL;
407
                }
2951 jermar 408
                node = fat_node_get_core(idx);
409
                futex_up(&idx->lock);
2822 jermar 410
                block_put(b);
411
                return node;
412
            }
2793 jermar 413
        }
2822 jermar 414
        block_put(b);
2639 jermar 415
    }
3516 jermar 416
 
2953 jermar 417
    futex_up(&parentp->idx->lock);
2793 jermar 418
    return NULL;
2638 jermar 419
}
420
 
3621 jermar 421
fs_index_t fat_index_get(void *node)
2831 jermar 422
{
423
    fat_node_t *fnodep = (fat_node_t *)node;
424
    if (!fnodep)
425
        return 0;
2864 jermar 426
    return fnodep->idx->index;
2831 jermar 427
}
428
 
3621 jermar 429
size_t fat_size_get(void *node)
2831 jermar 430
{
431
    return ((fat_node_t *)node)->size;
432
}
433
 
3621 jermar 434
unsigned fat_lnkcnt_get(void *node)
2831 jermar 435
{
436
    return ((fat_node_t *)node)->lnkcnt;
437
}
438
 
3621 jermar 439
bool fat_has_children(void *node)
2845 jermar 440
{
3530 jermar 441
    fat_bs_t *bs;
2845 jermar 442
    fat_node_t *nodep = (fat_node_t *)node;
443
    unsigned bps;
444
    unsigned dps;
445
    unsigned blocks;
3530 jermar 446
    block_t *b;
2845 jermar 447
    unsigned i, j;
448
 
449
    if (nodep->type != FAT_DIRECTORY)
450
        return false;
3526 jermar 451
 
2951 jermar 452
    futex_down(&nodep->idx->lock);
3530 jermar 453
    bs = block_bb_get(nodep->idx->dev_handle);
454
    bps = uint16_t_le2host(bs->bps);
2845 jermar 455
    dps = bps / sizeof(fat_dentry_t);
456
 
3526 jermar 457
    blocks = nodep->size / bps;
2845 jermar 458
 
459
    for (i = 0; i < blocks; i++) {
460
        fat_dentry_t *d;
461
 
3595 jermar 462
        b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
3526 jermar 463
        for (j = 0; j < dps; j++) {
2845 jermar 464
            d = ((fat_dentry_t *)b->data) + j;
465
            switch (fat_classify_dentry(d)) {
466
            case FAT_DENTRY_SKIP:
467
                continue;
468
            case FAT_DENTRY_LAST:
469
                block_put(b);
2951 jermar 470
                futex_up(&nodep->idx->lock);
2845 jermar 471
                return false;
472
            default:
473
            case FAT_DENTRY_VALID:
474
                block_put(b);
2951 jermar 475
                futex_up(&nodep->idx->lock);
2845 jermar 476
                return true;
477
            }
478
            block_put(b);
2951 jermar 479
            futex_up(&nodep->idx->lock);
2845 jermar 480
            return true;
481
        }
482
        block_put(b);
483
    }
484
 
2951 jermar 485
    futex_up(&nodep->idx->lock);
2845 jermar 486
    return false;
487
}
488
 
3621 jermar 489
void *fat_root_get(dev_handle_t dev_handle)
2844 jermar 490
{
3119 jermar 491
    return fat_node_get(dev_handle, 0);
2844 jermar 492
}
493
 
3621 jermar 494
char fat_plb_get_char(unsigned pos)
2844 jermar 495
{
496
    return fat_reg.plb_ro[pos % PLB_SIZE];
497
}
498
 
3621 jermar 499
bool fat_is_directory(void *node)
2831 jermar 500
{
501
    return ((fat_node_t *)node)->type == FAT_DIRECTORY;
502
}
503
 
3621 jermar 504
bool fat_is_file(void *node)
2831 jermar 505
{
506
    return ((fat_node_t *)node)->type == FAT_FILE;
507
}
508
 
2793 jermar 509
/** libfs operations */
510
libfs_ops_t fat_libfs_ops = {
511
    .match = fat_match,
512
    .node_get = fat_node_get,
2852 jermar 513
    .node_put = fat_node_put,
3609 jermar 514
    .create = fat_create_node,
515
    .destroy = fat_destroy_node,
2857 jermar 516
    .link = fat_link,
517
    .unlink = fat_unlink,
2831 jermar 518
    .index_get = fat_index_get,
519
    .size_get = fat_size_get,
520
    .lnkcnt_get = fat_lnkcnt_get,
2845 jermar 521
    .has_children = fat_has_children,
2844 jermar 522
    .root_get = fat_root_get,
523
    .plb_get_char = fat_plb_get_char,
2831 jermar 524
    .is_directory = fat_is_directory,
525
    .is_file = fat_is_file
2793 jermar 526
};
527
 
3625 jermar 528
/*
529
 * VFS operations.
530
 */
531
 
3110 jermar 532
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
533
{
534
    dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
3530 jermar 535
    fat_bs_t *bs;
3257 jermar 536
    uint16_t bps;
3119 jermar 537
    uint16_t rde;
3110 jermar 538
    int rc;
539
 
3530 jermar 540
    /* initialize libblock */
3537 jermar 541
    rc = block_init(dev_handle, BS_SIZE);
3257 jermar 542
    if (rc != EOK) {
3537 jermar 543
        ipc_answer_0(rid, rc);
3257 jermar 544
        return;
545
    }
546
 
3537 jermar 547
    /* prepare the boot block */
548
    rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
549
    if (rc != EOK) {
550
        block_fini(dev_handle);
551
        ipc_answer_0(rid, rc);
552
        return;
553
    }
554
 
3530 jermar 555
    /* get the buffer with the boot sector */
556
    bs = block_bb_get(dev_handle);
557
 
3119 jermar 558
    /* Read the number of root directory entries. */
3530 jermar 559
    bps = uint16_t_le2host(bs->bps);
560
    rde = uint16_t_le2host(bs->root_ent_max);
3119 jermar 561
 
3257 jermar 562
    if (bps != BS_SIZE) {
3530 jermar 563
        block_fini(dev_handle);
3257 jermar 564
        ipc_answer_0(rid, ENOTSUP);
565
        return;
566
    }
567
 
3539 jermar 568
    /* Initialize the block cache */
569
    rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
570
    if (rc != EOK) {
571
        block_fini(dev_handle);
572
        ipc_answer_0(rid, rc);
573
        return;
574
    }
575
 
3110 jermar 576
    rc = fat_idx_init_by_dev_handle(dev_handle);
577
    if (rc != EOK) {
3530 jermar 578
        block_fini(dev_handle);
3110 jermar 579
        ipc_answer_0(rid, rc);
580
        return;
581
    }
582
 
3119 jermar 583
    /* Initialize the root node. */
584
    fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
585
    if (!rootp) {
3530 jermar 586
        block_fini(dev_handle);
3119 jermar 587
        fat_idx_fini_by_dev_handle(dev_handle);
588
        ipc_answer_0(rid, ENOMEM);
589
        return;
590
    }
591
    fat_node_initialize(rootp);
592
 
593
    fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
594
    if (!ridxp) {
3530 jermar 595
        block_fini(dev_handle);
3119 jermar 596
        free(rootp);
597
        fat_idx_fini_by_dev_handle(dev_handle);
598
        ipc_answer_0(rid, ENOMEM);
599
        return;
600
    }
601
    assert(ridxp->index == 0);
602
    /* ridxp->lock held */
603
 
604
    rootp->type = FAT_DIRECTORY;
605
    rootp->firstc = FAT_CLST_ROOT;
606
    rootp->refcnt = 1;
3352 jermar 607
    rootp->lnkcnt = 0;  /* FS root is not linked */
3119 jermar 608
    rootp->size = rde * sizeof(fat_dentry_t);
609
    rootp->idx = ridxp;
610
    ridxp->nodep = rootp;
611
 
612
    futex_up(&ridxp->lock);
613
 
3352 jermar 614
    ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
3110 jermar 615
}
616
 
617
void fat_mount(ipc_callid_t rid, ipc_call_t *request)
618
{
619
    ipc_answer_0(rid, ENOTSUP);
620
}
621
 
2627 jermar 622
void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
623
{
2793 jermar 624
    libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
2627 jermar 625
}
626
 
3307 jermar 627
void fat_read(ipc_callid_t rid, ipc_call_t *request)
628
{
629
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
630
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
631
    off_t pos = (off_t)IPC_GET_ARG3(*request);
632
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3530 jermar 633
    fat_bs_t *bs;
3516 jermar 634
    uint16_t bps;
3308 jermar 635
    size_t bytes;
3530 jermar 636
    block_t *b;
3308 jermar 637
 
3307 jermar 638
    if (!nodep) {
639
        ipc_answer_0(rid, ENOENT);
640
        return;
641
    }
642
 
643
    ipc_callid_t callid;
644
    size_t len;
3314 jermar 645
    if (!ipc_data_read_receive(&callid, &len)) {
3307 jermar 646
        fat_node_put(nodep);
647
        ipc_answer_0(callid, EINVAL);
648
        ipc_answer_0(rid, EINVAL);
649
        return;
650
    }
651
 
3530 jermar 652
    bs = block_bb_get(dev_handle);
653
    bps = uint16_t_le2host(bs->bps);
3516 jermar 654
 
3307 jermar 655
    if (nodep->type == FAT_FILE) {
3335 jermar 656
        /*
657
         * Our strategy for regular file reads is to read one block at
658
         * most and make use of the possibility to return less data than
659
         * requested. This keeps the code very simple.
660
         */
3532 jermar 661
        if (pos >= nodep->size) {
3533 jermar 662
            /* reading beyond the EOF */
663
            bytes = 0;
3532 jermar 664
            (void) ipc_data_read_finalize(callid, NULL, 0);
665
        } else {
666
            bytes = min(len, bps - pos % bps);
667
            bytes = min(bytes, nodep->size - pos);
3595 jermar 668
            b = fat_block_get(bs, nodep, pos / bps,
669
                BLOCK_FLAGS_NONE);
3532 jermar 670
            (void) ipc_data_read_finalize(callid, b->data + pos % bps,
671
                bytes);
672
            block_put(b);
673
        }
3307 jermar 674
    } else {
3335 jermar 675
        unsigned bnum;
676
        off_t spos = pos;
677
        char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
678
        fat_dentry_t *d;
679
 
3307 jermar 680
        assert(nodep->type == FAT_DIRECTORY);
3335 jermar 681
        assert(nodep->size % bps == 0);
682
        assert(bps % sizeof(fat_dentry_t) == 0);
683
 
684
        /*
685
         * Our strategy for readdir() is to use the position pointer as
686
         * an index into the array of all dentries. On entry, it points
687
         * to the first unread dentry. If we skip any dentries, we bump
688
         * the position pointer accordingly.
689
         */
690
        bnum = (pos * sizeof(fat_dentry_t)) / bps;
691
        while (bnum < nodep->size / bps) {
692
            off_t o;
693
 
3595 jermar 694
            b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
3335 jermar 695
            for (o = pos % (bps / sizeof(fat_dentry_t));
696
                o < bps / sizeof(fat_dentry_t);
697
                o++, pos++) {
698
                d = ((fat_dentry_t *)b->data) + o;
699
                switch (fat_classify_dentry(d)) {
700
                case FAT_DENTRY_SKIP:
701
                    continue;
702
                case FAT_DENTRY_LAST:
703
                    block_put(b);
704
                    goto miss;
705
                default:
706
                case FAT_DENTRY_VALID:
707
                    dentry_name_canonify(d, name);
708
                    block_put(b);
709
                    goto hit;
710
                }
711
            }
712
            block_put(b);
713
            bnum++;
714
        }
715
miss:
3307 jermar 716
        fat_node_put(nodep);
3335 jermar 717
        ipc_answer_0(callid, ENOENT);
718
        ipc_answer_1(rid, ENOENT, 0);
3307 jermar 719
        return;
3335 jermar 720
hit:
721
        (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
722
        bytes = (pos - spos) + 1;
3307 jermar 723
    }
724
 
725
    fat_node_put(nodep);
3308 jermar 726
    ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
3307 jermar 727
}
728
 
3497 jermar 729
void fat_write(ipc_callid_t rid, ipc_call_t *request)
730
{
3499 jermar 731
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
732
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
733
    off_t pos = (off_t)IPC_GET_ARG3(*request);
734
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3530 jermar 735
    fat_bs_t *bs;
3499 jermar 736
    size_t bytes;
3530 jermar 737
    block_t *b;
3499 jermar 738
    uint16_t bps;
739
    unsigned spc;
3572 jermar 740
    unsigned bpc;       /* bytes per cluster */
3501 jermar 741
    off_t boundary;
3595 jermar 742
    int flags = BLOCK_FLAGS_NONE;
3499 jermar 743
 
744
    if (!nodep) {
745
        ipc_answer_0(rid, ENOENT);
746
        return;
747
    }
748
 
749
    ipc_callid_t callid;
750
    size_t len;
751
    if (!ipc_data_write_receive(&callid, &len)) {
752
        fat_node_put(nodep);
753
        ipc_answer_0(callid, EINVAL);
754
        ipc_answer_0(rid, EINVAL);
755
        return;
756
    }
757
 
3572 jermar 758
    bs = block_bb_get(dev_handle);
759
    bps = uint16_t_le2host(bs->bps);
760
    spc = bs->spc;
761
    bpc = bps * spc;
762
 
3499 jermar 763
    /*
764
     * In all scenarios, we will attempt to write out only one block worth
765
     * of data at maximum. There might be some more efficient approaches,
766
     * but this one greatly simplifies fat_write(). Note that we can afford
767
     * to do this because the client must be ready to handle the return
768
     * value signalizing a smaller number of bytes written.
769
     */
770
    bytes = min(len, bps - pos % bps);
3595 jermar 771
    if (bytes == bps)
772
        flags |= BLOCK_FLAGS_NOREAD;
3499 jermar 773
 
3572 jermar 774
    boundary = ROUND_UP(nodep->size, bpc);
3501 jermar 775
    if (pos < boundary) {
3499 jermar 776
        /*
777
         * This is the easier case - we are either overwriting already
778
         * existing contents or writing behind the EOF, but still within
779
         * the limits of the last cluster. The node size may grow to the
780
         * next block size boundary.
781
         */
3530 jermar 782
        fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
3595 jermar 783
        b = fat_block_get(bs, nodep, pos / bps, flags);
3499 jermar 784
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
785
            bytes);
786
        b->dirty = true;        /* need to sync block */
3500 jermar 787
        block_put(b);
3499 jermar 788
        if (pos + bytes > nodep->size) {
789
            nodep->size = pos + bytes;
790
            nodep->dirty = true;    /* need to sync node */
791
        }
3573 jermar 792
        ipc_answer_2(rid, EOK, bytes, nodep->size);
3499 jermar 793
        fat_node_put(nodep);
794
        return;
795
    } else {
796
        /*
797
         * This is the more difficult case. We must allocate new
798
         * clusters for the node and zero them out.
799
         */
3500 jermar 800
        int status;
3499 jermar 801
        unsigned nclsts;
3548 jermar 802
        fat_cluster_t mcl, lcl;
803
 
3572 jermar 804
        nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
3500 jermar 805
        /* create an independent chain of nclsts clusters in all FATs */
3572 jermar 806
        status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
3500 jermar 807
        if (status != EOK) {
808
            /* could not allocate a chain of nclsts clusters */
809
            fat_node_put(nodep);
810
            ipc_answer_0(callid, status);
811
            ipc_answer_0(rid, status);
812
            return;
813
        }
814
        /* zero fill any gaps */
3530 jermar 815
        fat_fill_gap(bs, nodep, mcl, pos);
3595 jermar 816
        b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
817
            flags);
3500 jermar 818
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
819
            bytes);
3501 jermar 820
        b->dirty = true;        /* need to sync block */
3500 jermar 821
        block_put(b);
822
        /*
823
         * Append the cluster chain starting in mcl to the end of the
824
         * node's cluster chain.
825
         */
3530 jermar 826
        fat_append_clusters(bs, nodep, mcl);
3500 jermar 827
        nodep->size = pos + bytes;
3501 jermar 828
        nodep->dirty = true;        /* need to sync node */
3573 jermar 829
        ipc_answer_2(rid, EOK, bytes, nodep->size);
3500 jermar 830
        fat_node_put(nodep);
831
        return;
3499 jermar 832
    }
3497 jermar 833
}
834
 
3546 jermar 835
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
836
{
3548 jermar 837
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
838
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
839
    size_t size = (off_t)IPC_GET_ARG3(*request);
840
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3572 jermar 841
    fat_bs_t *bs;
842
    uint16_t bps;
843
    uint8_t spc;
844
    unsigned bpc;   /* bytes per cluster */
3548 jermar 845
    int rc;
846
 
847
    if (!nodep) {
848
        ipc_answer_0(rid, ENOENT);
849
        return;
850
    }
851
 
3572 jermar 852
    bs = block_bb_get(dev_handle);
853
    bps = uint16_t_le2host(bs->bps);
854
    spc = bs->spc;
855
    bpc = bps * spc;
856
 
3548 jermar 857
    if (nodep->size == size) {
858
        rc = EOK;
859
    } else if (nodep->size < size) {
860
        /*
3572 jermar 861
         * The standard says we have the freedom to grow the node.
3548 jermar 862
         * For now, we simply return an error.
863
         */
864
        rc = EINVAL;
3572 jermar 865
    } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
866
        /*
867
         * The node will be shrunk, but no clusters will be deallocated.
868
         */
869
        nodep->size = size;
870
        nodep->dirty = true;        /* need to sync node */
871
        rc = EOK;  
3548 jermar 872
    } else {
873
        /*
3572 jermar 874
         * The node will be shrunk, clusters will be deallocated.
3548 jermar 875
         */
3572 jermar 876
        if (size == 0) {
877
            fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
878
        } else {
879
            fat_cluster_t lastc;
880
            (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
881
                &lastc, (size - 1) / bpc);
882
            fat_chop_clusters(bs, nodep, lastc);
883
        }
884
        nodep->size = size;
885
        nodep->dirty = true;        /* need to sync node */
886
        rc = EOK;  
3548 jermar 887
    }
888
    fat_node_put(nodep);
889
    ipc_answer_0(rid, rc);
890
    return;
3546 jermar 891
}
892
 
3621 jermar 893
void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
894
{
895
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
896
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
897
    int rc;
898
 
899
    fat_node_t *nodep = fat_node_get(dev_handle, index);
900
    if (!nodep) {
901
        ipc_answer_0(rid, ENOENT);
902
        return;
903
    }
904
 
905
    rc = fat_destroy_node(nodep);
906
    ipc_answer_0(rid, rc);
907
}
908
 
2627 jermar 909
/**
910
 * @}
911
 */