Subversion Repositories HelenOS

Rev

Rev 3397 | Rev 3602 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2627 jermar 1
/*
2793 jermar 2
 * Copyright (c) 2008 Jakub Jermar
2627 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/** @addtogroup fs
30
 * @{
31
 */
32
 
33
/**
34
 * @file    fat_ops.c
35
 * @brief   Implementation of VFS operations for the FAT file system server.
36
 */
37
 
38
#include "fat.h"
3593 rimsky 39
#include "fat_dentry.h"
40
#include "fat_fat.h"
2638 jermar 41
#include "../../vfs/vfs.h"
2793 jermar 42
#include <libfs.h>
3593 rimsky 43
#include <libblock.h>
2627 jermar 44
#include <ipc/ipc.h>
3257 jermar 45
#include <ipc/services.h>
46
#include <ipc/devmap.h>
2627 jermar 47
#include <async.h>
48
#include <errno.h>
2793 jermar 49
#include <string.h>
2798 jermar 50
#include <byteorder.h>
2831 jermar 51
#include <libadt/hash_table.h>
52
#include <libadt/list.h>
53
#include <assert.h>
2856 jermar 54
#include <futex.h>
3257 jermar 55
#include <sys/mman.h>
3593 rimsky 56
#include <align.h>
2627 jermar 57
 
2951 jermar 58
/** Futex protecting the list of cached free FAT nodes. */
59
static futex_t ffn_futex = FUTEX_INITIALIZER;
2843 jermar 60
 
2951 jermar 61
/** List of cached free FAT nodes. */
62
static LIST_INITIALIZE(ffn_head);
63
 
2831 jermar 64
static void fat_node_initialize(fat_node_t *node)
2793 jermar 65
{
2951 jermar 66
    futex_initialize(&node->lock, 1);
2864 jermar 67
    node->idx = NULL;
2831 jermar 68
    node->type = 0;
69
    link_initialize(&node->ffn_link);
70
    node->size = 0;
71
    node->lnkcnt = 0;
72
    node->refcnt = 0;
73
    node->dirty = false;
2793 jermar 74
}
75
 
3593 rimsky 76
static void fat_node_sync(fat_node_t *node)
2843 jermar 77
{
3593 rimsky 78
    block_t *b;
79
    fat_bs_t *bs;
80
    fat_dentry_t *d;
2843 jermar 81
    uint16_t bps;
3593 rimsky 82
    unsigned dps;
2843 jermar 83
 
3593 rimsky 84
    assert(node->dirty);
2843 jermar 85
 
3593 rimsky 86
    bs = block_bb_get(node->idx->dev_handle);
87
    bps = uint16_t_le2host(bs->bps);
88
    dps = bps / sizeof(fat_dentry_t);
89
 
90
    /* Read the block that contains the dentry of interest. */
91
    b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
92
        (node->idx->pdi * sizeof(fat_dentry_t)) / bps);
2843 jermar 93
 
3593 rimsky 94
    d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
2845 jermar 95
 
3593 rimsky 96
    d->firstc = host2uint16_t_le(node->firstc);
97
    if (node->type == FAT_FILE)
98
        d->size = host2uint32_t_le(node->size);
99
    /* TODO: update other fields? (e.g time fields, attr field) */
100
 
101
    b->dirty = true;        /* need to sync block */
102
    block_put(b);
2845 jermar 103
}
104
 
3593 rimsky 105
static fat_node_t *fat_node_get_new(void)
2831 jermar 106
{
3593 rimsky 107
    fat_node_t *nodep;
108
 
109
    futex_down(&ffn_futex);
110
    if (!list_empty(&ffn_head)) {
111
        /* Try to use a cached free node structure. */
112
        fat_idx_t *idxp_tmp;
113
        nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
114
        if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
115
            goto skip_cache;
116
        idxp_tmp = nodep->idx;
117
        if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
118
            futex_up(&nodep->lock);
119
            goto skip_cache;
120
        }
121
        list_remove(&nodep->ffn_link);
122
        futex_up(&ffn_futex);
123
        if (nodep->dirty)
124
            fat_node_sync(nodep);
125
        idxp_tmp->nodep = NULL;
126
        futex_up(&nodep->lock);
127
        futex_up(&idxp_tmp->lock);
128
    } else {
129
skip_cache:
130
        /* Try to allocate a new node structure. */
131
        futex_up(&ffn_futex);
132
        nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
133
        if (!nodep)
134
            return NULL;
135
    }
136
    fat_node_initialize(nodep);
137
 
138
    return nodep;
2831 jermar 139
}
140
 
2951 jermar 141
/** Internal version of fat_node_get().
142
 *
143
 * @param idxp      Locked index structure.
144
 */
145
static void *fat_node_get_core(fat_idx_t *idxp)
2831 jermar 146
{
2891 jermar 147
    block_t *b;
3593 rimsky 148
    fat_bs_t *bs;
2891 jermar 149
    fat_dentry_t *d;
3312 jermar 150
    fat_node_t *nodep = NULL;
2891 jermar 151
    unsigned bps;
3593 rimsky 152
    unsigned spc;
2891 jermar 153
    unsigned dps;
154
 
2951 jermar 155
    if (idxp->nodep) {
2891 jermar 156
        /*
157
         * We are lucky.
158
         * The node is already instantiated in memory.
159
         */
2951 jermar 160
        futex_down(&idxp->nodep->lock);
161
        if (!idxp->nodep->refcnt++)
3312 jermar 162
            list_remove(&idxp->nodep->ffn_link);
2951 jermar 163
        futex_up(&idxp->nodep->lock);
164
        return idxp->nodep;
2891 jermar 165
    }
166
 
167
    /*
168
     * We must instantiate the node from the file system.
169
     */
170
 
2951 jermar 171
    assert(idxp->pfc);
2891 jermar 172
 
3593 rimsky 173
    nodep = fat_node_get_new();
174
    if (!nodep)
175
        return NULL;
2891 jermar 176
 
3593 rimsky 177
    bs = block_bb_get(idxp->dev_handle);
178
    bps = uint16_t_le2host(bs->bps);
179
    spc = bs->spc;
2891 jermar 180
    dps = bps / sizeof(fat_dentry_t);
181
 
2893 jermar 182
    /* Read the block that contains the dentry of interest. */
3593 rimsky 183
    b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
2951 jermar 184
        (idxp->pdi * sizeof(fat_dentry_t)) / bps);
2891 jermar 185
    assert(b);
186
 
2951 jermar 187
    d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
2893 jermar 188
    if (d->attr & FAT_ATTR_SUBDIR) {
189
        /*
190
         * The only directory which does not have this bit set is the
191
         * root directory itself. The root directory node is handled
192
         * and initialized elsewhere.
193
         */
194
        nodep->type = FAT_DIRECTORY;
3282 jermar 195
        /*
3325 jermar 196
         * Unfortunately, the 'size' field of the FAT dentry is not
197
         * defined for the directory entry type. We must determine the
198
         * size of the directory by walking the FAT.
3282 jermar 199
         */
3593 rimsky 200
        nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
3325 jermar 201
            uint16_t_le2host(d->firstc));
2893 jermar 202
    } else {
203
        nodep->type = FAT_FILE;
3282 jermar 204
        nodep->size = uint32_t_le2host(d->size);
2893 jermar 205
    }
206
    nodep->firstc = uint16_t_le2host(d->firstc);
207
    nodep->lnkcnt = 1;
208
    nodep->refcnt = 1;
209
 
210
    block_put(b);
211
 
212
    /* Link the idx structure with the node structure. */
2951 jermar 213
    nodep->idx = idxp;
214
    idxp->nodep = nodep;
2893 jermar 215
 
216
    return nodep;
2831 jermar 217
}
218
 
2951 jermar 219
/** Instantiate a FAT in-core node. */
220
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
221
{
222
    void *node;
223
    fat_idx_t *idxp;
224
 
225
    idxp = fat_idx_get_by_index(dev_handle, index);
226
    if (!idxp)
227
        return NULL;
228
    /* idxp->lock held */
229
    node = fat_node_get_core(idxp);
230
    futex_up(&idxp->lock);
231
    return node;
232
}
233
 
2852 jermar 234
static void fat_node_put(void *node)
235
{
2910 jermar 236
    fat_node_t *nodep = (fat_node_t *)node;
237
 
2951 jermar 238
    futex_down(&nodep->lock);
2910 jermar 239
    if (!--nodep->refcnt) {
2951 jermar 240
        futex_down(&ffn_futex);
2910 jermar 241
        list_append(&nodep->ffn_link, &ffn_head);
2951 jermar 242
        futex_up(&ffn_futex);
2910 jermar 243
    }
2951 jermar 244
    futex_up(&nodep->lock);
2852 jermar 245
}
246
 
3593 rimsky 247
static void *fat_create(dev_handle_t dev_handle, int flags)
2857 jermar 248
{
249
    return NULL;    /* not supported at the moment */
250
}
251
 
2858 jermar 252
static int fat_destroy(void *node)
2857 jermar 253
{
2858 jermar 254
    return ENOTSUP; /* not supported at the moment */
2857 jermar 255
}
256
 
257
static bool fat_link(void *prnt, void *chld, const char *name)
258
{
259
    return false;   /* not supported at the moment */
260
}
261
 
262
static int fat_unlink(void *prnt, void *chld)
263
{
264
    return ENOTSUP; /* not supported at the moment */
265
}
266
 
2793 jermar 267
static void *fat_match(void *prnt, const char *component)
268
{
3593 rimsky 269
    fat_bs_t *bs;
2793 jermar 270
    fat_node_t *parentp = (fat_node_t *)prnt;
271
    char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
2822 jermar 272
    unsigned i, j;
2828 jermar 273
    unsigned bps;       /* bytes per sector */
2822 jermar 274
    unsigned dps;       /* dentries per sector */
275
    unsigned blocks;
2793 jermar 276
    fat_dentry_t *d;
2822 jermar 277
    block_t *b;
2793 jermar 278
 
2953 jermar 279
    futex_down(&parentp->idx->lock);
3593 rimsky 280
    bs = block_bb_get(parentp->idx->dev_handle);
281
    bps = uint16_t_le2host(bs->bps);
2828 jermar 282
    dps = bps / sizeof(fat_dentry_t);
3593 rimsky 283
    blocks = parentp->size / bps;
2822 jermar 284
    for (i = 0; i < blocks; i++) {
3593 rimsky 285
        b = fat_block_get(bs, parentp, i);
286
        for (j = 0; j < dps; j++) {
2822 jermar 287
            d = ((fat_dentry_t *)b->data) + j;
2845 jermar 288
            switch (fat_classify_dentry(d)) {
289
            case FAT_DENTRY_SKIP:
2822 jermar 290
                continue;
2845 jermar 291
            case FAT_DENTRY_LAST:
2822 jermar 292
                block_put(b);
2953 jermar 293
                futex_up(&parentp->idx->lock);
2822 jermar 294
                return NULL;
2845 jermar 295
            default:
296
            case FAT_DENTRY_VALID:
297
                dentry_name_canonify(d, name);
298
                break;
2822 jermar 299
            }
3272 jermar 300
            if (stricmp(name, component) == 0) {
2822 jermar 301
                /* hit */
2951 jermar 302
                void *node;
2953 jermar 303
                /*
304
                 * Assume tree hierarchy for locking.  We
305
                 * already have the parent and now we are going
306
                 * to lock the child.  Never lock in the oposite
307
                 * order.
308
                 */
2890 jermar 309
                fat_idx_t *idx = fat_idx_get_by_pos(
2881 jermar 310
                    parentp->idx->dev_handle, parentp->firstc,
2864 jermar 311
                    i * dps + j);
2953 jermar 312
                futex_up(&parentp->idx->lock);
2890 jermar 313
                if (!idx) {
314
                    /*
315
                     * Can happen if memory is low or if we
316
                     * run out of 32-bit indices.
317
                     */
318
                    block_put(b);
319
                    return NULL;
320
                }
2951 jermar 321
                node = fat_node_get_core(idx);
322
                futex_up(&idx->lock);
2822 jermar 323
                block_put(b);
324
                return node;
325
            }
2793 jermar 326
        }
2822 jermar 327
        block_put(b);
2639 jermar 328
    }
3593 rimsky 329
 
2953 jermar 330
    futex_up(&parentp->idx->lock);
2793 jermar 331
    return NULL;
2638 jermar 332
}
333
 
2831 jermar 334
static fs_index_t fat_index_get(void *node)
335
{
336
    fat_node_t *fnodep = (fat_node_t *)node;
337
    if (!fnodep)
338
        return 0;
2864 jermar 339
    return fnodep->idx->index;
2831 jermar 340
}
341
 
342
static size_t fat_size_get(void *node)
343
{
344
    return ((fat_node_t *)node)->size;
345
}
346
 
347
static unsigned fat_lnkcnt_get(void *node)
348
{
349
    return ((fat_node_t *)node)->lnkcnt;
350
}
351
 
2845 jermar 352
static bool fat_has_children(void *node)
353
{
3593 rimsky 354
    fat_bs_t *bs;
2845 jermar 355
    fat_node_t *nodep = (fat_node_t *)node;
356
    unsigned bps;
357
    unsigned dps;
358
    unsigned blocks;
359
    block_t *b;
360
    unsigned i, j;
361
 
362
    if (nodep->type != FAT_DIRECTORY)
363
        return false;
3593 rimsky 364
 
2951 jermar 365
    futex_down(&nodep->idx->lock);
3593 rimsky 366
    bs = block_bb_get(nodep->idx->dev_handle);
367
    bps = uint16_t_le2host(bs->bps);
2845 jermar 368
    dps = bps / sizeof(fat_dentry_t);
369
 
3593 rimsky 370
    blocks = nodep->size / bps;
2845 jermar 371
 
372
    for (i = 0; i < blocks; i++) {
373
        fat_dentry_t *d;
374
 
3593 rimsky 375
        b = fat_block_get(bs, nodep, i);
376
        for (j = 0; j < dps; j++) {
2845 jermar 377
            d = ((fat_dentry_t *)b->data) + j;
378
            switch (fat_classify_dentry(d)) {
379
            case FAT_DENTRY_SKIP:
380
                continue;
381
            case FAT_DENTRY_LAST:
382
                block_put(b);
2951 jermar 383
                futex_up(&nodep->idx->lock);
2845 jermar 384
                return false;
385
            default:
386
            case FAT_DENTRY_VALID:
387
                block_put(b);
2951 jermar 388
                futex_up(&nodep->idx->lock);
2845 jermar 389
                return true;
390
            }
391
            block_put(b);
2951 jermar 392
            futex_up(&nodep->idx->lock);
2845 jermar 393
            return true;
394
        }
395
        block_put(b);
396
    }
397
 
2951 jermar 398
    futex_up(&nodep->idx->lock);
2845 jermar 399
    return false;
400
}
401
 
2844 jermar 402
static void *fat_root_get(dev_handle_t dev_handle)
403
{
3119 jermar 404
    return fat_node_get(dev_handle, 0);
2844 jermar 405
}
406
 
407
static char fat_plb_get_char(unsigned pos)
408
{
409
    return fat_reg.plb_ro[pos % PLB_SIZE];
410
}
411
 
2831 jermar 412
static bool fat_is_directory(void *node)
413
{
414
    return ((fat_node_t *)node)->type == FAT_DIRECTORY;
415
}
416
 
417
static bool fat_is_file(void *node)
418
{
419
    return ((fat_node_t *)node)->type == FAT_FILE;
420
}
421
 
2793 jermar 422
/** libfs operations */
423
libfs_ops_t fat_libfs_ops = {
424
    .match = fat_match,
425
    .node_get = fat_node_get,
2852 jermar 426
    .node_put = fat_node_put,
2857 jermar 427
    .create = fat_create,
428
    .destroy = fat_destroy,
429
    .link = fat_link,
430
    .unlink = fat_unlink,
2831 jermar 431
    .index_get = fat_index_get,
432
    .size_get = fat_size_get,
433
    .lnkcnt_get = fat_lnkcnt_get,
2845 jermar 434
    .has_children = fat_has_children,
2844 jermar 435
    .root_get = fat_root_get,
436
    .plb_get_char = fat_plb_get_char,
2831 jermar 437
    .is_directory = fat_is_directory,
438
    .is_file = fat_is_file
2793 jermar 439
};
440
 
3110 jermar 441
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
442
{
443
    dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
3593 rimsky 444
    fat_bs_t *bs;
3257 jermar 445
    uint16_t bps;
3119 jermar 446
    uint16_t rde;
3110 jermar 447
    int rc;
448
 
3593 rimsky 449
    /* initialize libblock */
450
    rc = block_init(dev_handle, BS_SIZE);
451
    if (rc != EOK) {
452
        ipc_answer_0(rid, rc);
3257 jermar 453
        return;
454
    }
455
 
3593 rimsky 456
    /* prepare the boot block */
457
    rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
3257 jermar 458
    if (rc != EOK) {
3593 rimsky 459
        block_fini(dev_handle);
3257 jermar 460
        ipc_answer_0(rid, rc);
461
        return;
462
    }
463
 
3593 rimsky 464
    /* get the buffer with the boot sector */
465
    bs = block_bb_get(dev_handle);
466
 
3119 jermar 467
    /* Read the number of root directory entries. */
3593 rimsky 468
    bps = uint16_t_le2host(bs->bps);
469
    rde = uint16_t_le2host(bs->root_ent_max);
3119 jermar 470
 
3257 jermar 471
    if (bps != BS_SIZE) {
3593 rimsky 472
        block_fini(dev_handle);
3257 jermar 473
        ipc_answer_0(rid, ENOTSUP);
474
        return;
475
    }
476
 
3593 rimsky 477
    /* Initialize the block cache */
478
    rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
479
    if (rc != EOK) {
480
        block_fini(dev_handle);
481
        ipc_answer_0(rid, rc);
482
        return;
483
    }
484
 
3110 jermar 485
    rc = fat_idx_init_by_dev_handle(dev_handle);
486
    if (rc != EOK) {
3593 rimsky 487
        block_fini(dev_handle);
3110 jermar 488
        ipc_answer_0(rid, rc);
489
        return;
490
    }
491
 
3119 jermar 492
    /* Initialize the root node. */
493
    fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
494
    if (!rootp) {
3593 rimsky 495
        block_fini(dev_handle);
3119 jermar 496
        fat_idx_fini_by_dev_handle(dev_handle);
497
        ipc_answer_0(rid, ENOMEM);
498
        return;
499
    }
500
    fat_node_initialize(rootp);
501
 
502
    fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
503
    if (!ridxp) {
3593 rimsky 504
        block_fini(dev_handle);
3119 jermar 505
        free(rootp);
506
        fat_idx_fini_by_dev_handle(dev_handle);
507
        ipc_answer_0(rid, ENOMEM);
508
        return;
509
    }
510
    assert(ridxp->index == 0);
511
    /* ridxp->lock held */
512
 
513
    rootp->type = FAT_DIRECTORY;
514
    rootp->firstc = FAT_CLST_ROOT;
515
    rootp->refcnt = 1;
3397 rimsky 516
    rootp->lnkcnt = 0;  /* FS root is not linked */
3119 jermar 517
    rootp->size = rde * sizeof(fat_dentry_t);
518
    rootp->idx = ridxp;
519
    ridxp->nodep = rootp;
520
 
521
    futex_up(&ridxp->lock);
522
 
3397 rimsky 523
    ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
3110 jermar 524
}
525
 
526
void fat_mount(ipc_callid_t rid, ipc_call_t *request)
527
{
528
    ipc_answer_0(rid, ENOTSUP);
529
}
530
 
2627 jermar 531
void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
532
{
2793 jermar 533
    libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
2627 jermar 534
}
535
 
3307 jermar 536
void fat_read(ipc_callid_t rid, ipc_call_t *request)
537
{
538
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
539
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
540
    off_t pos = (off_t)IPC_GET_ARG3(*request);
541
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3593 rimsky 542
    fat_bs_t *bs;
543
    uint16_t bps;
3308 jermar 544
    size_t bytes;
3335 jermar 545
    block_t *b;
3308 jermar 546
 
3307 jermar 547
    if (!nodep) {
548
        ipc_answer_0(rid, ENOENT);
549
        return;
550
    }
551
 
552
    ipc_callid_t callid;
553
    size_t len;
3314 jermar 554
    if (!ipc_data_read_receive(&callid, &len)) {
3307 jermar 555
        fat_node_put(nodep);
556
        ipc_answer_0(callid, EINVAL);
557
        ipc_answer_0(rid, EINVAL);
558
        return;
559
    }
560
 
3593 rimsky 561
    bs = block_bb_get(dev_handle);
562
    bps = uint16_t_le2host(bs->bps);
563
 
3307 jermar 564
    if (nodep->type == FAT_FILE) {
3335 jermar 565
        /*
566
         * Our strategy for regular file reads is to read one block at
567
         * most and make use of the possibility to return less data than
568
         * requested. This keeps the code very simple.
569
         */
3593 rimsky 570
        if (pos >= nodep->size) {
571
            /* reading beyond the EOF */
572
            bytes = 0;
573
            (void) ipc_data_read_finalize(callid, NULL, 0);
574
        } else {
575
            bytes = min(len, bps - pos % bps);
576
            bytes = min(bytes, nodep->size - pos);
577
            b = fat_block_get(bs, nodep, pos / bps);
578
            (void) ipc_data_read_finalize(callid, b->data + pos % bps,
579
                bytes);
580
            block_put(b);
581
        }
3307 jermar 582
    } else {
3335 jermar 583
        unsigned bnum;
584
        off_t spos = pos;
585
        char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
586
        fat_dentry_t *d;
587
 
3307 jermar 588
        assert(nodep->type == FAT_DIRECTORY);
3335 jermar 589
        assert(nodep->size % bps == 0);
590
        assert(bps % sizeof(fat_dentry_t) == 0);
591
 
592
        /*
593
         * Our strategy for readdir() is to use the position pointer as
594
         * an index into the array of all dentries. On entry, it points
595
         * to the first unread dentry. If we skip any dentries, we bump
596
         * the position pointer accordingly.
597
         */
598
        bnum = (pos * sizeof(fat_dentry_t)) / bps;
599
        while (bnum < nodep->size / bps) {
600
            off_t o;
601
 
3593 rimsky 602
            b = fat_block_get(bs, nodep, bnum);
3335 jermar 603
            for (o = pos % (bps / sizeof(fat_dentry_t));
604
                o < bps / sizeof(fat_dentry_t);
605
                o++, pos++) {
606
                d = ((fat_dentry_t *)b->data) + o;
607
                switch (fat_classify_dentry(d)) {
608
                case FAT_DENTRY_SKIP:
609
                    continue;
610
                case FAT_DENTRY_LAST:
611
                    block_put(b);
612
                    goto miss;
613
                default:
614
                case FAT_DENTRY_VALID:
615
                    dentry_name_canonify(d, name);
616
                    block_put(b);
617
                    goto hit;
618
                }
619
            }
620
            block_put(b);
621
            bnum++;
622
        }
623
miss:
3307 jermar 624
        fat_node_put(nodep);
3335 jermar 625
        ipc_answer_0(callid, ENOENT);
626
        ipc_answer_1(rid, ENOENT, 0);
3307 jermar 627
        return;
3335 jermar 628
hit:
629
        (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
630
        bytes = (pos - spos) + 1;
3307 jermar 631
    }
632
 
633
    fat_node_put(nodep);
3308 jermar 634
    ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
3307 jermar 635
}
636
 
3593 rimsky 637
void fat_write(ipc_callid_t rid, ipc_call_t *request)
638
{
639
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
640
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
641
    off_t pos = (off_t)IPC_GET_ARG3(*request);
642
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
643
    fat_bs_t *bs;
644
    size_t bytes;
645
    block_t *b;
646
    uint16_t bps;
647
    unsigned spc;
648
    unsigned bpc;       /* bytes per cluster */
649
    off_t boundary;
650
 
651
    if (!nodep) {
652
        ipc_answer_0(rid, ENOENT);
653
        return;
654
    }
655
 
656
    ipc_callid_t callid;
657
    size_t len;
658
    if (!ipc_data_write_receive(&callid, &len)) {
659
        fat_node_put(nodep);
660
        ipc_answer_0(callid, EINVAL);
661
        ipc_answer_0(rid, EINVAL);
662
        return;
663
    }
664
 
665
    bs = block_bb_get(dev_handle);
666
    bps = uint16_t_le2host(bs->bps);
667
    spc = bs->spc;
668
    bpc = bps * spc;
669
 
670
    /*
671
     * In all scenarios, we will attempt to write out only one block worth
672
     * of data at maximum. There might be some more efficient approaches,
673
     * but this one greatly simplifies fat_write(). Note that we can afford
674
     * to do this because the client must be ready to handle the return
675
     * value signalizing a smaller number of bytes written.
676
     */
677
    bytes = min(len, bps - pos % bps);
678
 
679
    boundary = ROUND_UP(nodep->size, bpc);
680
    if (pos < boundary) {
681
        /*
682
         * This is the easier case - we are either overwriting already
683
         * existing contents or writing behind the EOF, but still within
684
         * the limits of the last cluster. The node size may grow to the
685
         * next block size boundary.
686
         */
687
        fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
688
        b = fat_block_get(bs, nodep, pos / bps);
689
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
690
            bytes);
691
        b->dirty = true;        /* need to sync block */
692
        block_put(b);
693
        if (pos + bytes > nodep->size) {
694
            nodep->size = pos + bytes;
695
            nodep->dirty = true;    /* need to sync node */
696
        }
697
        ipc_answer_2(rid, EOK, bytes, nodep->size);
698
        fat_node_put(nodep);
699
        return;
700
    } else {
701
        /*
702
         * This is the more difficult case. We must allocate new
703
         * clusters for the node and zero them out.
704
         */
705
        int status;
706
        unsigned nclsts;
707
        fat_cluster_t mcl, lcl;
708
 
709
        nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
710
        /* create an independent chain of nclsts clusters in all FATs */
711
        status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
712
        if (status != EOK) {
713
            /* could not allocate a chain of nclsts clusters */
714
            fat_node_put(nodep);
715
            ipc_answer_0(callid, status);
716
            ipc_answer_0(rid, status);
717
            return;
718
        }
719
        /* zero fill any gaps */
720
        fat_fill_gap(bs, nodep, mcl, pos);
721
        b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc);
722
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
723
            bytes);
724
        b->dirty = true;        /* need to sync block */
725
        block_put(b);
726
        /*
727
         * Append the cluster chain starting in mcl to the end of the
728
         * node's cluster chain.
729
         */
730
        fat_append_clusters(bs, nodep, mcl);
731
        nodep->size = pos + bytes;
732
        nodep->dirty = true;        /* need to sync node */
733
        ipc_answer_2(rid, EOK, bytes, nodep->size);
734
        fat_node_put(nodep);
735
        return;
736
    }
737
}
738
 
739
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
740
{
741
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
742
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
743
    size_t size = (off_t)IPC_GET_ARG3(*request);
744
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
745
    fat_bs_t *bs;
746
    uint16_t bps;
747
    uint8_t spc;
748
    unsigned bpc;   /* bytes per cluster */
749
    int rc;
750
 
751
    if (!nodep) {
752
        ipc_answer_0(rid, ENOENT);
753
        return;
754
    }
755
 
756
    bs = block_bb_get(dev_handle);
757
    bps = uint16_t_le2host(bs->bps);
758
    spc = bs->spc;
759
    bpc = bps * spc;
760
 
761
    if (nodep->size == size) {
762
        rc = EOK;
763
    } else if (nodep->size < size) {
764
        /*
765
         * The standard says we have the freedom to grow the node.
766
         * For now, we simply return an error.
767
         */
768
        rc = EINVAL;
769
    } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
770
        /*
771
         * The node will be shrunk, but no clusters will be deallocated.
772
         */
773
        nodep->size = size;
774
        nodep->dirty = true;        /* need to sync node */
775
        rc = EOK;  
776
    } else {
777
        /*
778
         * The node will be shrunk, clusters will be deallocated.
779
         */
780
        if (size == 0) {
781
            fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
782
        } else {
783
            fat_cluster_t lastc;
784
            (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
785
                &lastc, (size - 1) / bpc);
786
            fat_chop_clusters(bs, nodep, lastc);
787
        }
788
        nodep->size = size;
789
        nodep->dirty = true;        /* need to sync node */
790
        rc = EOK;  
791
    }
792
    fat_node_put(nodep);
793
    ipc_answer_0(rid, rc);
794
    return;
795
}
796
 
2627 jermar 797
/**
798
 * @}
799
 */