Subversion Repositories HelenOS

Rev

Rev 3593 | Rev 3665 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2627 jermar 1
/*
2793 jermar 2
 * Copyright (c) 2008 Jakub Jermar
2627 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/** @addtogroup fs
30
 * @{
31
 */
32
 
33
/**
34
 * @file    fat_ops.c
35
 * @brief   Implementation of VFS operations for the FAT file system server.
36
 */
37
 
38
#include "fat.h"
3593 rimsky 39
#include "fat_dentry.h"
40
#include "fat_fat.h"
2638 jermar 41
#include "../../vfs/vfs.h"
2793 jermar 42
#include <libfs.h>
3593 rimsky 43
#include <libblock.h>
2627 jermar 44
#include <ipc/ipc.h>
3257 jermar 45
#include <ipc/services.h>
46
#include <ipc/devmap.h>
2627 jermar 47
#include <async.h>
48
#include <errno.h>
2793 jermar 49
#include <string.h>
2798 jermar 50
#include <byteorder.h>
2831 jermar 51
#include <libadt/hash_table.h>
52
#include <libadt/list.h>
53
#include <assert.h>
2856 jermar 54
#include <futex.h>
3257 jermar 55
#include <sys/mman.h>
3593 rimsky 56
#include <align.h>
2627 jermar 57
 
2951 jermar 58
/** Futex protecting the list of cached free FAT nodes. */
59
static futex_t ffn_futex = FUTEX_INITIALIZER;
2843 jermar 60
 
2951 jermar 61
/** List of cached free FAT nodes. */
62
static LIST_INITIALIZE(ffn_head);
63
 
2831 jermar 64
static void fat_node_initialize(fat_node_t *node)
2793 jermar 65
{
2951 jermar 66
    futex_initialize(&node->lock, 1);
2864 jermar 67
    node->idx = NULL;
2831 jermar 68
    node->type = 0;
69
    link_initialize(&node->ffn_link);
70
    node->size = 0;
71
    node->lnkcnt = 0;
72
    node->refcnt = 0;
73
    node->dirty = false;
2793 jermar 74
}
75
 
3593 rimsky 76
static void fat_node_sync(fat_node_t *node)
2843 jermar 77
{
3593 rimsky 78
    block_t *b;
79
    fat_bs_t *bs;
80
    fat_dentry_t *d;
2843 jermar 81
    uint16_t bps;
3593 rimsky 82
    unsigned dps;
2843 jermar 83
 
3593 rimsky 84
    assert(node->dirty);
2843 jermar 85
 
3593 rimsky 86
    bs = block_bb_get(node->idx->dev_handle);
87
    bps = uint16_t_le2host(bs->bps);
88
    dps = bps / sizeof(fat_dentry_t);
89
 
90
    /* Read the block that contains the dentry of interest. */
91
    b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
3602 rimsky 92
        (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
2843 jermar 93
 
3593 rimsky 94
    d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
2845 jermar 95
 
3593 rimsky 96
    d->firstc = host2uint16_t_le(node->firstc);
97
    if (node->type == FAT_FILE)
98
        d->size = host2uint32_t_le(node->size);
99
    /* TODO: update other fields? (e.g time fields, attr field) */
100
 
101
    b->dirty = true;        /* need to sync block */
102
    block_put(b);
2845 jermar 103
}
104
 
3593 rimsky 105
static fat_node_t *fat_node_get_new(void)
2831 jermar 106
{
3593 rimsky 107
    fat_node_t *nodep;
108
 
109
    futex_down(&ffn_futex);
110
    if (!list_empty(&ffn_head)) {
111
        /* Try to use a cached free node structure. */
112
        fat_idx_t *idxp_tmp;
113
        nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
114
        if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
115
            goto skip_cache;
116
        idxp_tmp = nodep->idx;
117
        if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
118
            futex_up(&nodep->lock);
119
            goto skip_cache;
120
        }
121
        list_remove(&nodep->ffn_link);
122
        futex_up(&ffn_futex);
123
        if (nodep->dirty)
124
            fat_node_sync(nodep);
125
        idxp_tmp->nodep = NULL;
126
        futex_up(&nodep->lock);
127
        futex_up(&idxp_tmp->lock);
128
    } else {
129
skip_cache:
130
        /* Try to allocate a new node structure. */
131
        futex_up(&ffn_futex);
132
        nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
133
        if (!nodep)
134
            return NULL;
135
    }
136
    fat_node_initialize(nodep);
137
 
138
    return nodep;
2831 jermar 139
}
140
 
2951 jermar 141
/** Internal version of fat_node_get().
142
 *
143
 * @param idxp      Locked index structure.
144
 */
145
static void *fat_node_get_core(fat_idx_t *idxp)
2831 jermar 146
{
2891 jermar 147
    block_t *b;
3593 rimsky 148
    fat_bs_t *bs;
2891 jermar 149
    fat_dentry_t *d;
3312 jermar 150
    fat_node_t *nodep = NULL;
2891 jermar 151
    unsigned bps;
3593 rimsky 152
    unsigned spc;
2891 jermar 153
    unsigned dps;
154
 
2951 jermar 155
    if (idxp->nodep) {
2891 jermar 156
        /*
157
         * We are lucky.
158
         * The node is already instantiated in memory.
159
         */
2951 jermar 160
        futex_down(&idxp->nodep->lock);
161
        if (!idxp->nodep->refcnt++)
3312 jermar 162
            list_remove(&idxp->nodep->ffn_link);
2951 jermar 163
        futex_up(&idxp->nodep->lock);
164
        return idxp->nodep;
2891 jermar 165
    }
166
 
167
    /*
168
     * We must instantiate the node from the file system.
169
     */
170
 
2951 jermar 171
    assert(idxp->pfc);
2891 jermar 172
 
3593 rimsky 173
    nodep = fat_node_get_new();
174
    if (!nodep)
175
        return NULL;
2891 jermar 176
 
3593 rimsky 177
    bs = block_bb_get(idxp->dev_handle);
178
    bps = uint16_t_le2host(bs->bps);
179
    spc = bs->spc;
2891 jermar 180
    dps = bps / sizeof(fat_dentry_t);
181
 
2893 jermar 182
    /* Read the block that contains the dentry of interest. */
3593 rimsky 183
    b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
3602 rimsky 184
        (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
2891 jermar 185
    assert(b);
186
 
2951 jermar 187
    d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
2893 jermar 188
    if (d->attr & FAT_ATTR_SUBDIR) {
189
        /*
190
         * The only directory which does not have this bit set is the
191
         * root directory itself. The root directory node is handled
192
         * and initialized elsewhere.
193
         */
194
        nodep->type = FAT_DIRECTORY;
3282 jermar 195
        /*
3325 jermar 196
         * Unfortunately, the 'size' field of the FAT dentry is not
197
         * defined for the directory entry type. We must determine the
198
         * size of the directory by walking the FAT.
3282 jermar 199
         */
3593 rimsky 200
        nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
3325 jermar 201
            uint16_t_le2host(d->firstc));
2893 jermar 202
    } else {
203
        nodep->type = FAT_FILE;
3282 jermar 204
        nodep->size = uint32_t_le2host(d->size);
2893 jermar 205
    }
206
    nodep->firstc = uint16_t_le2host(d->firstc);
207
    nodep->lnkcnt = 1;
208
    nodep->refcnt = 1;
209
 
210
    block_put(b);
211
 
212
    /* Link the idx structure with the node structure. */
2951 jermar 213
    nodep->idx = idxp;
214
    idxp->nodep = nodep;
2893 jermar 215
 
216
    return nodep;
2831 jermar 217
}
218
 
2951 jermar 219
/** Instantiate a FAT in-core node. */
220
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
221
{
222
    void *node;
223
    fat_idx_t *idxp;
224
 
225
    idxp = fat_idx_get_by_index(dev_handle, index);
226
    if (!idxp)
227
        return NULL;
228
    /* idxp->lock held */
229
    node = fat_node_get_core(idxp);
230
    futex_up(&idxp->lock);
231
    return node;
232
}
233
 
2852 jermar 234
static void fat_node_put(void *node)
235
{
2910 jermar 236
    fat_node_t *nodep = (fat_node_t *)node;
237
 
2951 jermar 238
    futex_down(&nodep->lock);
2910 jermar 239
    if (!--nodep->refcnt) {
2951 jermar 240
        futex_down(&ffn_futex);
2910 jermar 241
        list_append(&nodep->ffn_link, &ffn_head);
2951 jermar 242
        futex_up(&ffn_futex);
2910 jermar 243
    }
2951 jermar 244
    futex_up(&nodep->lock);
2852 jermar 245
}
246
 
3593 rimsky 247
static void *fat_create(dev_handle_t dev_handle, int flags)
2857 jermar 248
{
249
    return NULL;    /* not supported at the moment */
250
}
251
 
2858 jermar 252
static int fat_destroy(void *node)
2857 jermar 253
{
2858 jermar 254
    return ENOTSUP; /* not supported at the moment */
2857 jermar 255
}
256
 
257
static bool fat_link(void *prnt, void *chld, const char *name)
258
{
259
    return false;   /* not supported at the moment */
260
}
261
 
262
static int fat_unlink(void *prnt, void *chld)
263
{
264
    return ENOTSUP; /* not supported at the moment */
265
}
266
 
2793 jermar 267
static void *fat_match(void *prnt, const char *component)
268
{
3593 rimsky 269
    fat_bs_t *bs;
2793 jermar 270
    fat_node_t *parentp = (fat_node_t *)prnt;
271
    char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
2822 jermar 272
    unsigned i, j;
2828 jermar 273
    unsigned bps;       /* bytes per sector */
2822 jermar 274
    unsigned dps;       /* dentries per sector */
275
    unsigned blocks;
2793 jermar 276
    fat_dentry_t *d;
2822 jermar 277
    block_t *b;
2793 jermar 278
 
2953 jermar 279
    futex_down(&parentp->idx->lock);
3593 rimsky 280
    bs = block_bb_get(parentp->idx->dev_handle);
281
    bps = uint16_t_le2host(bs->bps);
2828 jermar 282
    dps = bps / sizeof(fat_dentry_t);
3593 rimsky 283
    blocks = parentp->size / bps;
2822 jermar 284
    for (i = 0; i < blocks; i++) {
3602 rimsky 285
        b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
3593 rimsky 286
        for (j = 0; j < dps; j++) {
2822 jermar 287
            d = ((fat_dentry_t *)b->data) + j;
2845 jermar 288
            switch (fat_classify_dentry(d)) {
289
            case FAT_DENTRY_SKIP:
2822 jermar 290
                continue;
2845 jermar 291
            case FAT_DENTRY_LAST:
2822 jermar 292
                block_put(b);
2953 jermar 293
                futex_up(&parentp->idx->lock);
2822 jermar 294
                return NULL;
2845 jermar 295
            default:
296
            case FAT_DENTRY_VALID:
297
                dentry_name_canonify(d, name);
298
                break;
2822 jermar 299
            }
3272 jermar 300
            if (stricmp(name, component) == 0) {
2822 jermar 301
                /* hit */
2951 jermar 302
                void *node;
2953 jermar 303
                /*
304
                 * Assume tree hierarchy for locking.  We
305
                 * already have the parent and now we are going
306
                 * to lock the child.  Never lock in the oposite
307
                 * order.
308
                 */
2890 jermar 309
                fat_idx_t *idx = fat_idx_get_by_pos(
2881 jermar 310
                    parentp->idx->dev_handle, parentp->firstc,
2864 jermar 311
                    i * dps + j);
2953 jermar 312
                futex_up(&parentp->idx->lock);
2890 jermar 313
                if (!idx) {
314
                    /*
315
                     * Can happen if memory is low or if we
316
                     * run out of 32-bit indices.
317
                     */
318
                    block_put(b);
319
                    return NULL;
320
                }
2951 jermar 321
                node = fat_node_get_core(idx);
322
                futex_up(&idx->lock);
2822 jermar 323
                block_put(b);
324
                return node;
325
            }
2793 jermar 326
        }
2822 jermar 327
        block_put(b);
2639 jermar 328
    }
3593 rimsky 329
 
2953 jermar 330
    futex_up(&parentp->idx->lock);
2793 jermar 331
    return NULL;
2638 jermar 332
}
333
 
2831 jermar 334
static fs_index_t fat_index_get(void *node)
335
{
336
    fat_node_t *fnodep = (fat_node_t *)node;
337
    if (!fnodep)
338
        return 0;
2864 jermar 339
    return fnodep->idx->index;
2831 jermar 340
}
341
 
342
static size_t fat_size_get(void *node)
343
{
344
    return ((fat_node_t *)node)->size;
345
}
346
 
347
static unsigned fat_lnkcnt_get(void *node)
348
{
349
    return ((fat_node_t *)node)->lnkcnt;
350
}
351
 
2845 jermar 352
static bool fat_has_children(void *node)
353
{
3593 rimsky 354
    fat_bs_t *bs;
2845 jermar 355
    fat_node_t *nodep = (fat_node_t *)node;
356
    unsigned bps;
357
    unsigned dps;
358
    unsigned blocks;
359
    block_t *b;
360
    unsigned i, j;
361
 
362
    if (nodep->type != FAT_DIRECTORY)
363
        return false;
3593 rimsky 364
 
2951 jermar 365
    futex_down(&nodep->idx->lock);
3593 rimsky 366
    bs = block_bb_get(nodep->idx->dev_handle);
367
    bps = uint16_t_le2host(bs->bps);
2845 jermar 368
    dps = bps / sizeof(fat_dentry_t);
369
 
3593 rimsky 370
    blocks = nodep->size / bps;
2845 jermar 371
 
372
    for (i = 0; i < blocks; i++) {
373
        fat_dentry_t *d;
374
 
3602 rimsky 375
        b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
3593 rimsky 376
        for (j = 0; j < dps; j++) {
2845 jermar 377
            d = ((fat_dentry_t *)b->data) + j;
378
            switch (fat_classify_dentry(d)) {
379
            case FAT_DENTRY_SKIP:
380
                continue;
381
            case FAT_DENTRY_LAST:
382
                block_put(b);
2951 jermar 383
                futex_up(&nodep->idx->lock);
2845 jermar 384
                return false;
385
            default:
386
            case FAT_DENTRY_VALID:
387
                block_put(b);
2951 jermar 388
                futex_up(&nodep->idx->lock);
2845 jermar 389
                return true;
390
            }
391
            block_put(b);
2951 jermar 392
            futex_up(&nodep->idx->lock);
2845 jermar 393
            return true;
394
        }
395
        block_put(b);
396
    }
397
 
2951 jermar 398
    futex_up(&nodep->idx->lock);
2845 jermar 399
    return false;
400
}
401
 
2844 jermar 402
static void *fat_root_get(dev_handle_t dev_handle)
403
{
3119 jermar 404
    return fat_node_get(dev_handle, 0);
2844 jermar 405
}
406
 
407
static char fat_plb_get_char(unsigned pos)
408
{
409
    return fat_reg.plb_ro[pos % PLB_SIZE];
410
}
411
 
2831 jermar 412
static bool fat_is_directory(void *node)
413
{
414
    return ((fat_node_t *)node)->type == FAT_DIRECTORY;
415
}
416
 
417
static bool fat_is_file(void *node)
418
{
419
    return ((fat_node_t *)node)->type == FAT_FILE;
420
}
421
 
2793 jermar 422
/** libfs operations */
423
libfs_ops_t fat_libfs_ops = {
424
    .match = fat_match,
425
    .node_get = fat_node_get,
2852 jermar 426
    .node_put = fat_node_put,
2857 jermar 427
    .create = fat_create,
428
    .destroy = fat_destroy,
429
    .link = fat_link,
430
    .unlink = fat_unlink,
2831 jermar 431
    .index_get = fat_index_get,
432
    .size_get = fat_size_get,
433
    .lnkcnt_get = fat_lnkcnt_get,
2845 jermar 434
    .has_children = fat_has_children,
2844 jermar 435
    .root_get = fat_root_get,
436
    .plb_get_char = fat_plb_get_char,
2831 jermar 437
    .is_directory = fat_is_directory,
438
    .is_file = fat_is_file
2793 jermar 439
};
440
 
3110 jermar 441
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
442
{
443
    dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
3593 rimsky 444
    fat_bs_t *bs;
3257 jermar 445
    uint16_t bps;
3119 jermar 446
    uint16_t rde;
3110 jermar 447
    int rc;
448
 
3593 rimsky 449
    /* initialize libblock */
450
    rc = block_init(dev_handle, BS_SIZE);
451
    if (rc != EOK) {
452
        ipc_answer_0(rid, rc);
3257 jermar 453
        return;
454
    }
455
 
3593 rimsky 456
    /* prepare the boot block */
457
    rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
3257 jermar 458
    if (rc != EOK) {
3593 rimsky 459
        block_fini(dev_handle);
3257 jermar 460
        ipc_answer_0(rid, rc);
461
        return;
462
    }
463
 
3593 rimsky 464
    /* get the buffer with the boot sector */
465
    bs = block_bb_get(dev_handle);
466
 
3119 jermar 467
    /* Read the number of root directory entries. */
3593 rimsky 468
    bps = uint16_t_le2host(bs->bps);
469
    rde = uint16_t_le2host(bs->root_ent_max);
3119 jermar 470
 
3257 jermar 471
    if (bps != BS_SIZE) {
3593 rimsky 472
        block_fini(dev_handle);
3257 jermar 473
        ipc_answer_0(rid, ENOTSUP);
474
        return;
475
    }
476
 
3593 rimsky 477
    /* Initialize the block cache */
478
    rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
479
    if (rc != EOK) {
480
        block_fini(dev_handle);
481
        ipc_answer_0(rid, rc);
482
        return;
483
    }
484
 
3110 jermar 485
    rc = fat_idx_init_by_dev_handle(dev_handle);
486
    if (rc != EOK) {
3593 rimsky 487
        block_fini(dev_handle);
3110 jermar 488
        ipc_answer_0(rid, rc);
489
        return;
490
    }
491
 
3119 jermar 492
    /* Initialize the root node. */
493
    fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
494
    if (!rootp) {
3593 rimsky 495
        block_fini(dev_handle);
3119 jermar 496
        fat_idx_fini_by_dev_handle(dev_handle);
497
        ipc_answer_0(rid, ENOMEM);
498
        return;
499
    }
500
    fat_node_initialize(rootp);
501
 
502
    fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
503
    if (!ridxp) {
3593 rimsky 504
        block_fini(dev_handle);
3119 jermar 505
        free(rootp);
506
        fat_idx_fini_by_dev_handle(dev_handle);
507
        ipc_answer_0(rid, ENOMEM);
508
        return;
509
    }
510
    assert(ridxp->index == 0);
511
    /* ridxp->lock held */
512
 
513
    rootp->type = FAT_DIRECTORY;
514
    rootp->firstc = FAT_CLST_ROOT;
515
    rootp->refcnt = 1;
3397 rimsky 516
    rootp->lnkcnt = 0;  /* FS root is not linked */
3119 jermar 517
    rootp->size = rde * sizeof(fat_dentry_t);
518
    rootp->idx = ridxp;
519
    ridxp->nodep = rootp;
520
 
521
    futex_up(&ridxp->lock);
522
 
3397 rimsky 523
    ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
3110 jermar 524
}
525
 
526
void fat_mount(ipc_callid_t rid, ipc_call_t *request)
527
{
528
    ipc_answer_0(rid, ENOTSUP);
529
}
530
 
2627 jermar 531
void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
532
{
2793 jermar 533
    libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
2627 jermar 534
}
535
 
3307 jermar 536
void fat_read(ipc_callid_t rid, ipc_call_t *request)
537
{
538
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
539
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
540
    off_t pos = (off_t)IPC_GET_ARG3(*request);
541
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3593 rimsky 542
    fat_bs_t *bs;
543
    uint16_t bps;
3308 jermar 544
    size_t bytes;
3335 jermar 545
    block_t *b;
3308 jermar 546
 
3307 jermar 547
    if (!nodep) {
548
        ipc_answer_0(rid, ENOENT);
549
        return;
550
    }
551
 
552
    ipc_callid_t callid;
553
    size_t len;
3314 jermar 554
    if (!ipc_data_read_receive(&callid, &len)) {
3307 jermar 555
        fat_node_put(nodep);
556
        ipc_answer_0(callid, EINVAL);
557
        ipc_answer_0(rid, EINVAL);
558
        return;
559
    }
560
 
3593 rimsky 561
    bs = block_bb_get(dev_handle);
562
    bps = uint16_t_le2host(bs->bps);
563
 
3307 jermar 564
    if (nodep->type == FAT_FILE) {
3335 jermar 565
        /*
566
         * Our strategy for regular file reads is to read one block at
567
         * most and make use of the possibility to return less data than
568
         * requested. This keeps the code very simple.
569
         */
3593 rimsky 570
        if (pos >= nodep->size) {
571
            /* reading beyond the EOF */
572
            bytes = 0;
573
            (void) ipc_data_read_finalize(callid, NULL, 0);
574
        } else {
575
            bytes = min(len, bps - pos % bps);
576
            bytes = min(bytes, nodep->size - pos);
3602 rimsky 577
            b = fat_block_get(bs, nodep, pos / bps,
578
                BLOCK_FLAGS_NONE);
3593 rimsky 579
            (void) ipc_data_read_finalize(callid, b->data + pos % bps,
580
                bytes);
581
            block_put(b);
582
        }
3307 jermar 583
    } else {
3335 jermar 584
        unsigned bnum;
585
        off_t spos = pos;
586
        char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
587
        fat_dentry_t *d;
588
 
3307 jermar 589
        assert(nodep->type == FAT_DIRECTORY);
3335 jermar 590
        assert(nodep->size % bps == 0);
591
        assert(bps % sizeof(fat_dentry_t) == 0);
592
 
593
        /*
594
         * Our strategy for readdir() is to use the position pointer as
595
         * an index into the array of all dentries. On entry, it points
596
         * to the first unread dentry. If we skip any dentries, we bump
597
         * the position pointer accordingly.
598
         */
599
        bnum = (pos * sizeof(fat_dentry_t)) / bps;
600
        while (bnum < nodep->size / bps) {
601
            off_t o;
602
 
3602 rimsky 603
            b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
3335 jermar 604
            for (o = pos % (bps / sizeof(fat_dentry_t));
605
                o < bps / sizeof(fat_dentry_t);
606
                o++, pos++) {
607
                d = ((fat_dentry_t *)b->data) + o;
608
                switch (fat_classify_dentry(d)) {
609
                case FAT_DENTRY_SKIP:
610
                    continue;
611
                case FAT_DENTRY_LAST:
612
                    block_put(b);
613
                    goto miss;
614
                default:
615
                case FAT_DENTRY_VALID:
616
                    dentry_name_canonify(d, name);
617
                    block_put(b);
618
                    goto hit;
619
                }
620
            }
621
            block_put(b);
622
            bnum++;
623
        }
624
miss:
3307 jermar 625
        fat_node_put(nodep);
3335 jermar 626
        ipc_answer_0(callid, ENOENT);
627
        ipc_answer_1(rid, ENOENT, 0);
3307 jermar 628
        return;
3335 jermar 629
hit:
630
        (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
631
        bytes = (pos - spos) + 1;
3307 jermar 632
    }
633
 
634
    fat_node_put(nodep);
3308 jermar 635
    ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
3307 jermar 636
}
637
 
3593 rimsky 638
void fat_write(ipc_callid_t rid, ipc_call_t *request)
639
{
640
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
641
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
642
    off_t pos = (off_t)IPC_GET_ARG3(*request);
643
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
644
    fat_bs_t *bs;
645
    size_t bytes;
646
    block_t *b;
647
    uint16_t bps;
648
    unsigned spc;
649
    unsigned bpc;       /* bytes per cluster */
650
    off_t boundary;
3602 rimsky 651
    int flags = BLOCK_FLAGS_NONE;
3593 rimsky 652
 
653
    if (!nodep) {
654
        ipc_answer_0(rid, ENOENT);
655
        return;
656
    }
657
 
658
    ipc_callid_t callid;
659
    size_t len;
660
    if (!ipc_data_write_receive(&callid, &len)) {
661
        fat_node_put(nodep);
662
        ipc_answer_0(callid, EINVAL);
663
        ipc_answer_0(rid, EINVAL);
664
        return;
665
    }
666
 
667
    bs = block_bb_get(dev_handle);
668
    bps = uint16_t_le2host(bs->bps);
669
    spc = bs->spc;
670
    bpc = bps * spc;
671
 
672
    /*
673
     * In all scenarios, we will attempt to write out only one block worth
674
     * of data at maximum. There might be some more efficient approaches,
675
     * but this one greatly simplifies fat_write(). Note that we can afford
676
     * to do this because the client must be ready to handle the return
677
     * value signalizing a smaller number of bytes written.
678
     */
679
    bytes = min(len, bps - pos % bps);
3602 rimsky 680
    if (bytes == bps)
681
        flags |= BLOCK_FLAGS_NOREAD;
3593 rimsky 682
 
683
    boundary = ROUND_UP(nodep->size, bpc);
684
    if (pos < boundary) {
685
        /*
686
         * This is the easier case - we are either overwriting already
687
         * existing contents or writing behind the EOF, but still within
688
         * the limits of the last cluster. The node size may grow to the
689
         * next block size boundary.
690
         */
691
        fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
3602 rimsky 692
        b = fat_block_get(bs, nodep, pos / bps, flags);
3593 rimsky 693
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
694
            bytes);
695
        b->dirty = true;        /* need to sync block */
696
        block_put(b);
697
        if (pos + bytes > nodep->size) {
698
            nodep->size = pos + bytes;
699
            nodep->dirty = true;    /* need to sync node */
700
        }
701
        ipc_answer_2(rid, EOK, bytes, nodep->size);
702
        fat_node_put(nodep);
703
        return;
704
    } else {
705
        /*
706
         * This is the more difficult case. We must allocate new
707
         * clusters for the node and zero them out.
708
         */
709
        int status;
710
        unsigned nclsts;
711
        fat_cluster_t mcl, lcl;
712
 
713
        nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
714
        /* create an independent chain of nclsts clusters in all FATs */
715
        status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
716
        if (status != EOK) {
717
            /* could not allocate a chain of nclsts clusters */
718
            fat_node_put(nodep);
719
            ipc_answer_0(callid, status);
720
            ipc_answer_0(rid, status);
721
            return;
722
        }
723
        /* zero fill any gaps */
724
        fat_fill_gap(bs, nodep, mcl, pos);
3602 rimsky 725
        b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
726
            flags);
3593 rimsky 727
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
728
            bytes);
729
        b->dirty = true;        /* need to sync block */
730
        block_put(b);
731
        /*
732
         * Append the cluster chain starting in mcl to the end of the
733
         * node's cluster chain.
734
         */
735
        fat_append_clusters(bs, nodep, mcl);
736
        nodep->size = pos + bytes;
737
        nodep->dirty = true;        /* need to sync node */
738
        ipc_answer_2(rid, EOK, bytes, nodep->size);
739
        fat_node_put(nodep);
740
        return;
741
    }
742
}
743
 
744
void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
745
{
746
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
747
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
748
    size_t size = (off_t)IPC_GET_ARG3(*request);
749
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
750
    fat_bs_t *bs;
751
    uint16_t bps;
752
    uint8_t spc;
753
    unsigned bpc;   /* bytes per cluster */
754
    int rc;
755
 
756
    if (!nodep) {
757
        ipc_answer_0(rid, ENOENT);
758
        return;
759
    }
760
 
761
    bs = block_bb_get(dev_handle);
762
    bps = uint16_t_le2host(bs->bps);
763
    spc = bs->spc;
764
    bpc = bps * spc;
765
 
766
    if (nodep->size == size) {
767
        rc = EOK;
768
    } else if (nodep->size < size) {
769
        /*
770
         * The standard says we have the freedom to grow the node.
771
         * For now, we simply return an error.
772
         */
773
        rc = EINVAL;
774
    } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
775
        /*
776
         * The node will be shrunk, but no clusters will be deallocated.
777
         */
778
        nodep->size = size;
779
        nodep->dirty = true;        /* need to sync node */
780
        rc = EOK;  
781
    } else {
782
        /*
783
         * The node will be shrunk, clusters will be deallocated.
784
         */
785
        if (size == 0) {
786
            fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
787
        } else {
788
            fat_cluster_t lastc;
789
            (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
790
                &lastc, (size - 1) / bpc);
791
            fat_chop_clusters(bs, nodep, lastc);
792
        }
793
        nodep->size = size;
794
        nodep->dirty = true;        /* need to sync node */
795
        rc = EOK;  
796
    }
797
    fat_node_put(nodep);
798
    ipc_answer_0(rid, rc);
799
    return;
800
}
801
 
2627 jermar 802
/**
803
 * @}
804
 */