Subversion Repositories HelenOS

Rev

Rev 3503 | Rev 3506 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2627 jermar 1
/*
2793 jermar 2
 * Copyright (c) 2008 Jakub Jermar
2627 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/** @addtogroup fs
30
 * @{
31
 */
32
 
33
/**
34
 * @file    fat_ops.c
35
 * @brief   Implementation of VFS operations for the FAT file system server.
36
 */
37
 
38
#include "fat.h"
3505 jermar 39
#include "fat_dentry.h"
40
#include "fat_fat.h"
2638 jermar 41
#include "../../vfs/vfs.h"
2793 jermar 42
#include <libfs.h>
2627 jermar 43
#include <ipc/ipc.h>
3257 jermar 44
#include <ipc/services.h>
45
#include <ipc/devmap.h>
2627 jermar 46
#include <async.h>
47
#include <errno.h>
2793 jermar 48
#include <string.h>
2798 jermar 49
#include <byteorder.h>
2831 jermar 50
#include <libadt/hash_table.h>
51
#include <libadt/list.h>
52
#include <assert.h>
2856 jermar 53
#include <futex.h>
3257 jermar 54
#include <sys/mman.h>
3499 jermar 55
#include <align.h>
2627 jermar 56
 
2843 jermar 57
#define BS_BLOCK        0
3253 jermar 58
#define BS_SIZE         512
2843 jermar 59
 
2951 jermar 60
/** Futex protecting the list of cached free FAT nodes. */
61
static futex_t ffn_futex = FUTEX_INITIALIZER;
2843 jermar 62
 
2951 jermar 63
/** List of cached free FAT nodes. */
64
static LIST_INITIALIZE(ffn_head);
65
 
3308 jermar 66
#define min(a, b)       ((a) < (b) ? (a) : (b))
67
 
3257 jermar 68
static int dev_phone = -1;      /* FIXME */
69
static void *dev_buffer = NULL;     /* FIXME */
70
 
2910 jermar 71
/* TODO move somewhere else */
2822 jermar 72
typedef struct {
73
    void *data;
3257 jermar 74
    size_t size;
3499 jermar 75
    bool dirty;
2822 jermar 76
} block_t;
77
 
3253 jermar 78
static block_t *block_get(dev_handle_t dev_handle, off_t offset, size_t bs)
2793 jermar 79
{
3257 jermar 80
    /* FIXME */
81
    block_t *b;
82
    off_t bufpos = 0;
83
    size_t buflen = 0;
3272 jermar 84
    off_t pos = offset * bs;
3257 jermar 85
 
86
    assert(dev_phone != -1);
87
    assert(dev_buffer);
88
 
89
    b = malloc(sizeof(block_t));
90
    if (!b)
91
        return NULL;
92
 
93
    b->data = malloc(bs);
94
    if (!b->data) {
95
        free(b);
96
        return NULL;
97
    }
98
    b->size = bs;
99
 
3272 jermar 100
    if (!libfs_blockread(dev_phone, dev_buffer, &bufpos, &buflen, &pos,
3257 jermar 101
        b->data, bs, bs)) {
102
        free(b->data);
103
        free(b);
104
        return NULL;
105
    }
106
 
107
    return b;
2793 jermar 108
}
109
 
2822 jermar 110
static void block_put(block_t *block)
2793 jermar 111
{
3257 jermar 112
    /* FIXME */
113
    free(block->data);
114
    free(block);
2793 jermar 115
}
116
 
3503 jermar 117
#define FAT1        0
118
 
2859 jermar 119
#define FAT_BS(b)       ((fat_bs_t *)((b)->data))
120
 
2864 jermar 121
#define FAT_CLST_RES0   0x0000
3119 jermar 122
#define FAT_CLST_RES1   0x0001
2859 jermar 123
#define FAT_CLST_FIRST  0x0002
124
#define FAT_CLST_BAD    0xfff7
125
#define FAT_CLST_LAST1  0xfff8
126
#define FAT_CLST_LAST8  0xffff
127
 
3119 jermar 128
/* internally used to mark root directory's parent */
129
#define FAT_CLST_ROOTPAR    FAT_CLST_RES0
130
/* internally used to mark root directory */
131
#define FAT_CLST_ROOT       FAT_CLST_RES1
132
 
2891 jermar 133
#define fat_block_get(np, off) \
134
    _fat_block_get((np)->idx->dev_handle, (np)->firstc, (off))
135
 
136
static block_t *
2893 jermar 137
_fat_block_get(dev_handle_t dev_handle, fat_cluster_t firstc, off_t offset)
2859 jermar 138
{
139
    block_t *bb;
140
    block_t *b;
141
    unsigned bps;
142
    unsigned spc;
143
    unsigned rscnt;     /* block address of the first FAT */
144
    unsigned fatcnt;
145
    unsigned rde;
146
    unsigned rds;       /* root directory size */
147
    unsigned sf;
148
    unsigned ssa;       /* size of the system area */
149
    unsigned clusters;
2893 jermar 150
    fat_cluster_t clst = firstc;
2859 jermar 151
    unsigned i;
152
 
3253 jermar 153
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
2859 jermar 154
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
155
    spc = FAT_BS(bb)->spc;
156
    rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
157
    fatcnt = FAT_BS(bb)->fatcnt;
158
    rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
159
    sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
160
    block_put(bb);
161
 
162
    rds = (sizeof(fat_dentry_t) * rde) / bps;
163
    rds += ((sizeof(fat_dentry_t) * rde) % bps != 0);
164
    ssa = rscnt + fatcnt * sf + rds;
165
 
3119 jermar 166
    if (firstc == FAT_CLST_ROOT) {
2859 jermar 167
        /* root directory special case */
168
        assert(offset < rds);
3253 jermar 169
        b = block_get(dev_handle, rscnt + fatcnt * sf + offset, bps);
2859 jermar 170
        return b;
171
    }
172
 
173
    clusters = offset / spc;
174
    for (i = 0; i < clusters; i++) {
175
        unsigned fsec;  /* sector offset relative to FAT1 */
176
        unsigned fidx;  /* FAT1 entry index */
177
 
178
        assert(clst >= FAT_CLST_FIRST && clst < FAT_CLST_BAD);
2864 jermar 179
        fsec = (clst * sizeof(fat_cluster_t)) / bps;
180
        fidx = clst % (bps / sizeof(fat_cluster_t));
2859 jermar 181
        /* read FAT1 */
3253 jermar 182
        b = block_get(dev_handle, rscnt + fsec, bps);
2864 jermar 183
        clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
2859 jermar 184
        assert(clst != FAT_CLST_BAD);
185
        assert(clst < FAT_CLST_LAST1);
186
        block_put(b);
187
    }
188
 
2891 jermar 189
    b = block_get(dev_handle, ssa + (clst - FAT_CLST_FIRST) * spc +
3253 jermar 190
        offset % spc, bps);
2859 jermar 191
 
192
    return b;
193
}
194
 
3325 jermar 195
/** Return number of blocks allocated to a file.
196
 *
197
 * @param dev_handle    Device handle of the device with the file.
198
 * @param firstc    First cluster of the file.
199
 *
200
 * @return      Number of blocks allocated to the file.
201
 */
202
static uint16_t
203
_fat_blcks_get(dev_handle_t dev_handle, fat_cluster_t firstc)
204
{
205
    block_t *bb;
206
    block_t *b;
207
    unsigned bps;
208
    unsigned spc;
209
    unsigned rscnt;     /* block address of the first FAT */
210
    unsigned clusters = 0;
211
    fat_cluster_t clst = firstc;
212
 
213
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
214
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
215
    spc = FAT_BS(bb)->spc;
216
    rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
217
    block_put(bb);
218
 
219
    if (firstc == FAT_CLST_RES0) {
220
        /* No space allocated to the file. */
221
        return 0;
222
    }
223
 
224
    while (clst < FAT_CLST_LAST1) {
225
        unsigned fsec;  /* sector offset relative to FAT1 */
226
        unsigned fidx;  /* FAT1 entry index */
227
 
228
        assert(clst >= FAT_CLST_FIRST);
229
        fsec = (clst * sizeof(fat_cluster_t)) / bps;
230
        fidx = clst % (bps / sizeof(fat_cluster_t));
231
        /* read FAT1 */
232
        b = block_get(dev_handle, rscnt + fsec, bps);
233
        clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
234
        assert(clst != FAT_CLST_BAD);
235
        block_put(b);
236
        clusters++;
237
    }
238
 
239
    return clusters * spc;
240
}
241
 
2831 jermar 242
static void fat_node_initialize(fat_node_t *node)
2793 jermar 243
{
2951 jermar 244
    futex_initialize(&node->lock, 1);
2864 jermar 245
    node->idx = NULL;
2831 jermar 246
    node->type = 0;
247
    link_initialize(&node->ffn_link);
248
    node->size = 0;
249
    node->lnkcnt = 0;
250
    node->refcnt = 0;
251
    node->dirty = false;
2793 jermar 252
}
253
 
2843 jermar 254
static uint16_t fat_bps_get(dev_handle_t dev_handle)
255
{
256
    block_t *bb;
257
    uint16_t bps;
258
 
3253 jermar 259
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
2843 jermar 260
    assert(bb != NULL);
2859 jermar 261
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
2843 jermar 262
    block_put(bb);
263
 
264
    return bps;
265
}
266
 
2893 jermar 267
static void fat_node_sync(fat_node_t *node)
2831 jermar 268
{
269
    /* TODO */
270
}
271
 
2951 jermar 272
/** Internal version of fat_node_get().
273
 *
274
 * @param idxp      Locked index structure.
275
 */
276
static void *fat_node_get_core(fat_idx_t *idxp)
2831 jermar 277
{
2891 jermar 278
    block_t *b;
279
    fat_dentry_t *d;
3312 jermar 280
    fat_node_t *nodep = NULL;
2891 jermar 281
    unsigned bps;
282
    unsigned dps;
283
 
2951 jermar 284
    if (idxp->nodep) {
2891 jermar 285
        /*
286
         * We are lucky.
287
         * The node is already instantiated in memory.
288
         */
2951 jermar 289
        futex_down(&idxp->nodep->lock);
290
        if (!idxp->nodep->refcnt++)
3312 jermar 291
            list_remove(&idxp->nodep->ffn_link);
2951 jermar 292
        futex_up(&idxp->nodep->lock);
293
        return idxp->nodep;
2891 jermar 294
    }
295
 
296
    /*
297
     * We must instantiate the node from the file system.
298
     */
299
 
2951 jermar 300
    assert(idxp->pfc);
2891 jermar 301
 
2951 jermar 302
    futex_down(&ffn_futex);
2893 jermar 303
    if (!list_empty(&ffn_head)) {
2951 jermar 304
        /* Try to use a cached free node structure. */
305
        fat_idx_t *idxp_tmp;
2893 jermar 306
        nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
2951 jermar 307
        if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
308
            goto skip_cache;
309
        idxp_tmp = nodep->idx;
310
        if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
311
            futex_up(&nodep->lock);
312
            goto skip_cache;
313
        }
314
        list_remove(&nodep->ffn_link);
315
        futex_up(&ffn_futex);
2893 jermar 316
        if (nodep->dirty)
317
            fat_node_sync(nodep);
2951 jermar 318
        idxp_tmp->nodep = NULL;
319
        futex_up(&nodep->lock);
320
        futex_up(&idxp_tmp->lock);
2893 jermar 321
    } else {
2951 jermar 322
skip_cache:
2893 jermar 323
        /* Try to allocate a new node structure. */
2951 jermar 324
        futex_up(&ffn_futex);
2893 jermar 325
        nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
326
        if (!nodep)
327
            return NULL;
328
    }
2891 jermar 329
    fat_node_initialize(nodep);
330
 
2951 jermar 331
    bps = fat_bps_get(idxp->dev_handle);
2891 jermar 332
    dps = bps / sizeof(fat_dentry_t);
333
 
2893 jermar 334
    /* Read the block that contains the dentry of interest. */
2951 jermar 335
    b = _fat_block_get(idxp->dev_handle, idxp->pfc,
336
        (idxp->pdi * sizeof(fat_dentry_t)) / bps);
2891 jermar 337
    assert(b);
338
 
2951 jermar 339
    d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
2893 jermar 340
    if (d->attr & FAT_ATTR_SUBDIR) {
341
        /*
342
         * The only directory which does not have this bit set is the
343
         * root directory itself. The root directory node is handled
344
         * and initialized elsewhere.
345
         */
346
        nodep->type = FAT_DIRECTORY;
3282 jermar 347
        /*
3325 jermar 348
         * Unfortunately, the 'size' field of the FAT dentry is not
349
         * defined for the directory entry type. We must determine the
350
         * size of the directory by walking the FAT.
3282 jermar 351
         */
3325 jermar 352
        nodep->size = bps * _fat_blcks_get(idxp->dev_handle,
353
            uint16_t_le2host(d->firstc));
2893 jermar 354
    } else {
355
        nodep->type = FAT_FILE;
3282 jermar 356
        nodep->size = uint32_t_le2host(d->size);
2893 jermar 357
    }
358
    nodep->firstc = uint16_t_le2host(d->firstc);
359
    nodep->lnkcnt = 1;
360
    nodep->refcnt = 1;
361
 
362
    block_put(b);
363
 
364
    /* Link the idx structure with the node structure. */
2951 jermar 365
    nodep->idx = idxp;
366
    idxp->nodep = nodep;
2893 jermar 367
 
368
    return nodep;
2831 jermar 369
}
370
 
2951 jermar 371
/** Instantiate a FAT in-core node. */
372
static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
373
{
374
    void *node;
375
    fat_idx_t *idxp;
376
 
377
    idxp = fat_idx_get_by_index(dev_handle, index);
378
    if (!idxp)
379
        return NULL;
380
    /* idxp->lock held */
381
    node = fat_node_get_core(idxp);
382
    futex_up(&idxp->lock);
383
    return node;
384
}
385
 
2852 jermar 386
static void fat_node_put(void *node)
387
{
2910 jermar 388
    fat_node_t *nodep = (fat_node_t *)node;
389
 
2951 jermar 390
    futex_down(&nodep->lock);
2910 jermar 391
    if (!--nodep->refcnt) {
2951 jermar 392
        futex_down(&ffn_futex);
2910 jermar 393
        list_append(&nodep->ffn_link, &ffn_head);
2951 jermar 394
        futex_up(&ffn_futex);
2910 jermar 395
    }
2951 jermar 396
    futex_up(&nodep->lock);
2852 jermar 397
}
398
 
2857 jermar 399
static void *fat_create(int flags)
400
{
401
    return NULL;    /* not supported at the moment */
402
}
403
 
2858 jermar 404
static int fat_destroy(void *node)
2857 jermar 405
{
2858 jermar 406
    return ENOTSUP; /* not supported at the moment */
2857 jermar 407
}
408
 
409
static bool fat_link(void *prnt, void *chld, const char *name)
410
{
411
    return false;   /* not supported at the moment */
412
}
413
 
414
static int fat_unlink(void *prnt, void *chld)
415
{
416
    return ENOTSUP; /* not supported at the moment */
417
}
418
 
2793 jermar 419
static void *fat_match(void *prnt, const char *component)
420
{
421
    fat_node_t *parentp = (fat_node_t *)prnt;
422
    char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
2822 jermar 423
    unsigned i, j;
2828 jermar 424
    unsigned bps;       /* bytes per sector */
2822 jermar 425
    unsigned dps;       /* dentries per sector */
426
    unsigned blocks;
2793 jermar 427
    fat_dentry_t *d;
2822 jermar 428
    block_t *b;
2793 jermar 429
 
2953 jermar 430
    futex_down(&parentp->idx->lock);
2864 jermar 431
    bps = fat_bps_get(parentp->idx->dev_handle);
2828 jermar 432
    dps = bps / sizeof(fat_dentry_t);
433
    blocks = parentp->size / bps + (parentp->size % bps != 0);
2822 jermar 434
    for (i = 0; i < blocks; i++) {
435
        unsigned dentries;
2793 jermar 436
 
2864 jermar 437
        b = fat_block_get(parentp, i);
2822 jermar 438
        dentries = (i == blocks - 1) ?
439
            parentp->size % sizeof(fat_dentry_t) :
440
            dps;
441
        for (j = 0; j < dentries; j++) {
442
            d = ((fat_dentry_t *)b->data) + j;
2845 jermar 443
            switch (fat_classify_dentry(d)) {
444
            case FAT_DENTRY_SKIP:
2822 jermar 445
                continue;
2845 jermar 446
            case FAT_DENTRY_LAST:
2822 jermar 447
                block_put(b);
2953 jermar 448
                futex_up(&parentp->idx->lock);
2822 jermar 449
                return NULL;
2845 jermar 450
            default:
451
            case FAT_DENTRY_VALID:
452
                dentry_name_canonify(d, name);
453
                break;
2822 jermar 454
            }
3272 jermar 455
            if (stricmp(name, component) == 0) {
2822 jermar 456
                /* hit */
2951 jermar 457
                void *node;
2953 jermar 458
                /*
459
                 * Assume tree hierarchy for locking.  We
460
                 * already have the parent and now we are going
461
                 * to lock the child.  Never lock in the oposite
462
                 * order.
463
                 */
2890 jermar 464
                fat_idx_t *idx = fat_idx_get_by_pos(
2881 jermar 465
                    parentp->idx->dev_handle, parentp->firstc,
2864 jermar 466
                    i * dps + j);
2953 jermar 467
                futex_up(&parentp->idx->lock);
2890 jermar 468
                if (!idx) {
469
                    /*
470
                     * Can happen if memory is low or if we
471
                     * run out of 32-bit indices.
472
                     */
473
                    block_put(b);
474
                    return NULL;
475
                }
2951 jermar 476
                node = fat_node_get_core(idx);
477
                futex_up(&idx->lock);
2822 jermar 478
                block_put(b);
479
                return node;
480
            }
2793 jermar 481
        }
2822 jermar 482
        block_put(b);
2639 jermar 483
    }
2953 jermar 484
    futex_up(&parentp->idx->lock);
2793 jermar 485
    return NULL;
2638 jermar 486
}
487
 
2831 jermar 488
static fs_index_t fat_index_get(void *node)
489
{
490
    fat_node_t *fnodep = (fat_node_t *)node;
491
    if (!fnodep)
492
        return 0;
2864 jermar 493
    return fnodep->idx->index;
2831 jermar 494
}
495
 
496
static size_t fat_size_get(void *node)
497
{
498
    return ((fat_node_t *)node)->size;
499
}
500
 
501
static unsigned fat_lnkcnt_get(void *node)
502
{
503
    return ((fat_node_t *)node)->lnkcnt;
504
}
505
 
2845 jermar 506
static bool fat_has_children(void *node)
507
{
508
    fat_node_t *nodep = (fat_node_t *)node;
509
    unsigned bps;
510
    unsigned dps;
511
    unsigned blocks;
512
    block_t *b;
513
    unsigned i, j;
514
 
515
    if (nodep->type != FAT_DIRECTORY)
516
        return false;
517
 
2951 jermar 518
    futex_down(&nodep->idx->lock);
2864 jermar 519
    bps = fat_bps_get(nodep->idx->dev_handle);
2845 jermar 520
    dps = bps / sizeof(fat_dentry_t);
521
 
522
    blocks = nodep->size / bps + (nodep->size % bps != 0);
523
 
524
    for (i = 0; i < blocks; i++) {
525
        unsigned dentries;
526
        fat_dentry_t *d;
527
 
2864 jermar 528
        b = fat_block_get(nodep, i);
2845 jermar 529
        dentries = (i == blocks - 1) ?
530
            nodep->size % sizeof(fat_dentry_t) :
531
            dps;
532
        for (j = 0; j < dentries; j++) {
533
            d = ((fat_dentry_t *)b->data) + j;
534
            switch (fat_classify_dentry(d)) {
535
            case FAT_DENTRY_SKIP:
536
                continue;
537
            case FAT_DENTRY_LAST:
538
                block_put(b);
2951 jermar 539
                futex_up(&nodep->idx->lock);
2845 jermar 540
                return false;
541
            default:
542
            case FAT_DENTRY_VALID:
543
                block_put(b);
2951 jermar 544
                futex_up(&nodep->idx->lock);
2845 jermar 545
                return true;
546
            }
547
            block_put(b);
2951 jermar 548
            futex_up(&nodep->idx->lock);
2845 jermar 549
            return true;
550
        }
551
        block_put(b);
552
    }
553
 
2951 jermar 554
    futex_up(&nodep->idx->lock);
2845 jermar 555
    return false;
556
}
557
 
2844 jermar 558
static void *fat_root_get(dev_handle_t dev_handle)
559
{
3119 jermar 560
    return fat_node_get(dev_handle, 0);
2844 jermar 561
}
562
 
563
static char fat_plb_get_char(unsigned pos)
564
{
565
    return fat_reg.plb_ro[pos % PLB_SIZE];
566
}
567
 
2831 jermar 568
static bool fat_is_directory(void *node)
569
{
570
    return ((fat_node_t *)node)->type == FAT_DIRECTORY;
571
}
572
 
573
static bool fat_is_file(void *node)
574
{
575
    return ((fat_node_t *)node)->type == FAT_FILE;
576
}
577
 
2793 jermar 578
/** libfs operations */
579
libfs_ops_t fat_libfs_ops = {
580
    .match = fat_match,
581
    .node_get = fat_node_get,
2852 jermar 582
    .node_put = fat_node_put,
2857 jermar 583
    .create = fat_create,
584
    .destroy = fat_destroy,
585
    .link = fat_link,
586
    .unlink = fat_unlink,
2831 jermar 587
    .index_get = fat_index_get,
588
    .size_get = fat_size_get,
589
    .lnkcnt_get = fat_lnkcnt_get,
2845 jermar 590
    .has_children = fat_has_children,
2844 jermar 591
    .root_get = fat_root_get,
592
    .plb_get_char = fat_plb_get_char,
2831 jermar 593
    .is_directory = fat_is_directory,
594
    .is_file = fat_is_file
2793 jermar 595
};
596
 
3110 jermar 597
void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
598
{
599
    dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
3119 jermar 600
    block_t *bb;
3257 jermar 601
    uint16_t bps;
3119 jermar 602
    uint16_t rde;
3110 jermar 603
    int rc;
604
 
3257 jermar 605
    /*
606
     * For now, we don't bother to remember dev_handle, dev_phone or
607
     * dev_buffer in some data structure. We use global variables because we
608
     * know there will be at most one mount on this file system.
609
     * Of course, this is a huge TODO item.
610
     */
611
    dev_buffer = mmap(NULL, BS_SIZE, PROTO_READ | PROTO_WRITE,
612
        MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
613
 
614
    if (!dev_buffer) {
615
        ipc_answer_0(rid, ENOMEM);
616
        return;
617
    }
618
 
619
    dev_phone = ipc_connect_me_to(PHONE_NS, SERVICE_DEVMAP,
620
        DEVMAP_CONNECT_TO_DEVICE, dev_handle);
621
 
622
    if (dev_phone < 0) {
623
        munmap(dev_buffer, BS_SIZE);
624
        ipc_answer_0(rid, dev_phone);
625
        return;
626
    }
627
 
628
    rc = ipc_share_out_start(dev_phone, dev_buffer,
629
        AS_AREA_READ | AS_AREA_WRITE);
630
    if (rc != EOK) {
631
            munmap(dev_buffer, BS_SIZE);
632
        ipc_answer_0(rid, rc);
633
        return;
634
    }
635
 
3119 jermar 636
    /* Read the number of root directory entries. */
3253 jermar 637
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
3257 jermar 638
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
3119 jermar 639
    rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
640
    block_put(bb);
641
 
3257 jermar 642
    if (bps != BS_SIZE) {
643
        munmap(dev_buffer, BS_SIZE);
644
        ipc_answer_0(rid, ENOTSUP);
645
        return;
646
    }
647
 
3110 jermar 648
    rc = fat_idx_init_by_dev_handle(dev_handle);
649
    if (rc != EOK) {
3257 jermar 650
            munmap(dev_buffer, BS_SIZE);
3110 jermar 651
        ipc_answer_0(rid, rc);
652
        return;
653
    }
654
 
3119 jermar 655
    /* Initialize the root node. */
656
    fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
657
    if (!rootp) {
3257 jermar 658
            munmap(dev_buffer, BS_SIZE);
3119 jermar 659
        fat_idx_fini_by_dev_handle(dev_handle);
660
        ipc_answer_0(rid, ENOMEM);
661
        return;
662
    }
663
    fat_node_initialize(rootp);
664
 
665
    fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
666
    if (!ridxp) {
3257 jermar 667
            munmap(dev_buffer, BS_SIZE);
3119 jermar 668
        free(rootp);
669
        fat_idx_fini_by_dev_handle(dev_handle);
670
        ipc_answer_0(rid, ENOMEM);
671
        return;
672
    }
673
    assert(ridxp->index == 0);
674
    /* ridxp->lock held */
675
 
676
    rootp->type = FAT_DIRECTORY;
677
    rootp->firstc = FAT_CLST_ROOT;
678
    rootp->refcnt = 1;
3352 jermar 679
    rootp->lnkcnt = 0;  /* FS root is not linked */
3119 jermar 680
    rootp->size = rde * sizeof(fat_dentry_t);
681
    rootp->idx = ridxp;
682
    ridxp->nodep = rootp;
683
 
684
    futex_up(&ridxp->lock);
685
 
3352 jermar 686
    ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
3110 jermar 687
}
688
 
689
void fat_mount(ipc_callid_t rid, ipc_call_t *request)
690
{
691
    ipc_answer_0(rid, ENOTSUP);
692
}
693
 
2627 jermar 694
void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
695
{
2793 jermar 696
    libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
2627 jermar 697
}
698
 
3307 jermar 699
void fat_read(ipc_callid_t rid, ipc_call_t *request)
700
{
701
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
702
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
703
    off_t pos = (off_t)IPC_GET_ARG3(*request);
704
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
3308 jermar 705
    uint16_t bps = fat_bps_get(dev_handle);
706
    size_t bytes;
3335 jermar 707
    block_t *b;
3308 jermar 708
 
3307 jermar 709
    if (!nodep) {
710
        ipc_answer_0(rid, ENOENT);
711
        return;
712
    }
713
 
714
    ipc_callid_t callid;
715
    size_t len;
3314 jermar 716
    if (!ipc_data_read_receive(&callid, &len)) {
3307 jermar 717
        fat_node_put(nodep);
718
        ipc_answer_0(callid, EINVAL);
719
        ipc_answer_0(rid, EINVAL);
720
        return;
721
    }
722
 
723
    if (nodep->type == FAT_FILE) {
3335 jermar 724
        /*
725
         * Our strategy for regular file reads is to read one block at
726
         * most and make use of the possibility to return less data than
727
         * requested. This keeps the code very simple.
728
         */
3308 jermar 729
        bytes = min(len, bps - pos % bps);
730
        b = fat_block_get(nodep, pos / bps);
731
        (void) ipc_data_read_finalize(callid, b->data + pos % bps,
732
            bytes);
733
        block_put(b);
3307 jermar 734
    } else {
3335 jermar 735
        unsigned bnum;
736
        off_t spos = pos;
737
        char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
738
        fat_dentry_t *d;
739
 
3307 jermar 740
        assert(nodep->type == FAT_DIRECTORY);
3335 jermar 741
        assert(nodep->size % bps == 0);
742
        assert(bps % sizeof(fat_dentry_t) == 0);
743
 
744
        /*
745
         * Our strategy for readdir() is to use the position pointer as
746
         * an index into the array of all dentries. On entry, it points
747
         * to the first unread dentry. If we skip any dentries, we bump
748
         * the position pointer accordingly.
749
         */
750
        bnum = (pos * sizeof(fat_dentry_t)) / bps;
751
        while (bnum < nodep->size / bps) {
752
            off_t o;
753
 
754
            b = fat_block_get(nodep, bnum);
755
            for (o = pos % (bps / sizeof(fat_dentry_t));
756
                o < bps / sizeof(fat_dentry_t);
757
                o++, pos++) {
758
                d = ((fat_dentry_t *)b->data) + o;
759
                switch (fat_classify_dentry(d)) {
760
                case FAT_DENTRY_SKIP:
761
                    continue;
762
                case FAT_DENTRY_LAST:
763
                    block_put(b);
764
                    goto miss;
765
                default:
766
                case FAT_DENTRY_VALID:
767
                    dentry_name_canonify(d, name);
768
                    block_put(b);
769
                    goto hit;
770
                }
771
            }
772
            block_put(b);
773
            bnum++;
774
        }
775
miss:
3307 jermar 776
        fat_node_put(nodep);
3335 jermar 777
        ipc_answer_0(callid, ENOENT);
778
        ipc_answer_1(rid, ENOENT, 0);
3307 jermar 779
        return;
3335 jermar 780
hit:
781
        (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
782
        bytes = (pos - spos) + 1;
3307 jermar 783
    }
784
 
785
    fat_node_put(nodep);
3308 jermar 786
    ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
3307 jermar 787
}
788
 
3501 jermar 789
/** Fill the gap between EOF and a new file position.
790
 *
791
 * @param nodep     FAT node with the gap.
792
 * @param mcl       First cluster in an independent cluster chain that will
793
 *          be later appended to the end of the node's own cluster
794
 *          chain. If pos is still in the last allocated cluster,
795
 *          this argument is ignored.
796
 * @param pos       Position in the last node block.
797
 */
3500 jermar 798
static void
3501 jermar 799
fat_fill_gap(fat_node_t *nodep, fat_cluster_t mcl, off_t pos)
3499 jermar 800
{
3501 jermar 801
    uint16_t bps;
802
    unsigned spc;
803
    block_t *bb, *b;
804
    off_t o, boundary;
805
 
806
    bb = block_get(nodep->idx->dev_handle, BS_BLOCK, BS_SIZE);
807
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
808
    spc = FAT_BS(bb)->spc;
809
    block_put(bb);
810
 
811
    boundary = ROUND_UP(nodep->size, bps * spc);
812
 
813
    /* zero out already allocated space */
814
    for (o = nodep->size - 1; o < pos && o < boundary;
815
        o = ALIGN_DOWN(o + bps, bps)) {
816
        b = fat_block_get(nodep, o / bps);
817
        memset(b->data + o % bps, 0, bps - o % bps);
818
        b->dirty = true;        /* need to sync node */
819
        block_put(b);
820
    }
821
 
822
    if (o >= pos)
823
        return;
824
 
825
    /* zero out the initial part of the new cluster chain */
826
    for (o = boundary; o < pos; o += bps) {
827
        b = _fat_block_get(nodep->idx->dev_handle, mcl,
828
            (o - boundary) / bps);
829
        memset(b->data, 0, min(bps, pos - o));
3503 jermar 830
        b->dirty = true;        /* need to sync node */
3501 jermar 831
        block_put(b);
832
    }
3499 jermar 833
}
834
 
3503 jermar 835
static void
836
fat_mark_cluster(dev_handle_t dev_handle, unsigned fatno, fat_cluster_t clst,
837
    fat_cluster_t value)
838
{
839
    /* TODO */
840
}
841
 
842
static void
843
fat_alloc_shadow_clusters(dev_handle_t dev_handle, fat_cluster_t *lifo,
844
    unsigned nclsts)
845
{
846
    /* TODO */
847
}
848
 
3500 jermar 849
static int
3503 jermar 850
fat_alloc_clusters(dev_handle_t dev_handle, unsigned nclsts, fat_cluster_t *mcl,
851
    fat_cluster_t *lcl)
3500 jermar 852
{
3503 jermar 853
    uint16_t bps;
854
    uint16_t rscnt;
855
    uint16_t sf;
856
    block_t *bb, *blk;
857
    fat_cluster_t *lifo;    /* stack for storing free cluster numbers */
858
    unsigned found = 0; /* top of the free cluster number stack */
859
    unsigned b, c, cl;
860
 
861
    lifo = (fat_cluster_t *) malloc(nclsts * sizeof(fat_cluster_t));
862
    if (lifo)
863
        return ENOMEM;
864
 
865
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
866
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
867
    rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
868
    sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
869
    block_put(bb);
870
 
871
    /*
872
     * Search FAT1 for unused clusters.
873
     */
874
    for (b = 0, cl = 0; b < sf; blk++) {
875
        blk = block_get(dev_handle, rscnt + b, bps);
876
        for (c = 0; c < bps / sizeof(fat_cluster_t); c++, cl++) {
877
            fat_cluster_t *clst = (fat_cluster_t *)blk->data + c;
878
            if (*clst == FAT_CLST_RES0) {
879
                /*
880
                 * The cluster is free. Put it into our stack
881
                 * of found clusters and mark it as non-free.
882
                 */
883
                lifo[found] = cl;
884
                if (found == 0)
885
                    *clst = FAT_CLST_LAST1;
886
                else
887
                    *clst = lifo[found - 1];
888
                blk->dirty = true;  /* need to sync block */
889
                if (++found == nclsts) {
890
                    /* we are almost done */
891
                    block_put(blk);
892
                    /* update the shadow copies of FAT */
893
                    fat_alloc_shadow_clusters(dev_handle,
894
                        lifo, nclsts);
895
                    *mcl = lifo[found - 1];
896
                    *lcl = lifo[0];
897
                    free(lifo);
898
                    return EOK;
899
                }
900
            }
901
        }
902
        block_put(blk);
903
    }
904
 
905
    /*
906
     * We could not find enough clusters. Now we need to free the clusters
907
     * we have allocated so far.
908
     */
909
    while (found--)
910
        fat_mark_cluster(dev_handle, FAT1, lifo[found], FAT_CLST_RES0);
911
 
912
    free(lifo);
913
    return ENOSPC;
3500 jermar 914
}
915
 
916
static void
917
fat_append_clusters(fat_node_t *nodep, fat_cluster_t mcl)
918
{
919
}
920
 
3497 jermar 921
void fat_write(ipc_callid_t rid, ipc_call_t *request)
922
{
3499 jermar 923
    dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
924
    fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
925
    off_t pos = (off_t)IPC_GET_ARG3(*request);
926
    fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
927
    size_t bytes;
928
    block_t *b, *bb;
929
    uint16_t bps;
930
    unsigned spc;
3501 jermar 931
    off_t boundary;
3499 jermar 932
 
933
    if (!nodep) {
934
        ipc_answer_0(rid, ENOENT);
935
        return;
936
    }
937
 
938
    /* XXX remove me when you are ready */
939
    {
940
        ipc_answer_0(rid, ENOTSUP);
941
        fat_node_put(nodep);
942
        return;
943
    }
944
 
945
    ipc_callid_t callid;
946
    size_t len;
947
    if (!ipc_data_write_receive(&callid, &len)) {
948
        fat_node_put(nodep);
949
        ipc_answer_0(callid, EINVAL);
950
        ipc_answer_0(rid, EINVAL);
951
        return;
952
    }
953
 
954
    /*
955
     * In all scenarios, we will attempt to write out only one block worth
956
     * of data at maximum. There might be some more efficient approaches,
957
     * but this one greatly simplifies fat_write(). Note that we can afford
958
     * to do this because the client must be ready to handle the return
959
     * value signalizing a smaller number of bytes written.
960
     */
961
    bytes = min(len, bps - pos % bps);
962
 
963
    bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
964
    bps = uint16_t_le2host(FAT_BS(bb)->bps);
965
    spc = FAT_BS(bb)->spc;
966
    block_put(bb);
967
 
3501 jermar 968
    boundary = ROUND_UP(nodep->size, bps * spc);
969
    if (pos < boundary) {
3499 jermar 970
        /*
971
         * This is the easier case - we are either overwriting already
972
         * existing contents or writing behind the EOF, but still within
973
         * the limits of the last cluster. The node size may grow to the
974
         * next block size boundary.
975
         */
3500 jermar 976
        fat_fill_gap(nodep, FAT_CLST_RES0, pos);
3499 jermar 977
        b = fat_block_get(nodep, pos / bps);
978
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
979
            bytes);
980
        b->dirty = true;        /* need to sync block */
3500 jermar 981
        block_put(b);
3499 jermar 982
        if (pos + bytes > nodep->size) {
983
            nodep->size = pos + bytes;
984
            nodep->dirty = true;    /* need to sync node */
985
        }
986
        fat_node_put(nodep);
987
        ipc_answer_1(rid, EOK, bytes); 
988
        return;
989
    } else {
990
        /*
991
         * This is the more difficult case. We must allocate new
992
         * clusters for the node and zero them out.
993
         */
3500 jermar 994
        int status;
3499 jermar 995
        unsigned nclsts;
3500 jermar 996
        fat_cluster_t mcl, lcl;
997
 
3501 jermar 998
        nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) /
3500 jermar 999
            bps * spc;
1000
        /* create an independent chain of nclsts clusters in all FATs */
3503 jermar 1001
        status = fat_alloc_clusters(dev_handle, nclsts, &mcl, &lcl);
3500 jermar 1002
        if (status != EOK) {
1003
            /* could not allocate a chain of nclsts clusters */
1004
            fat_node_put(nodep);
1005
            ipc_answer_0(callid, status);
1006
            ipc_answer_0(rid, status);
1007
            return;
1008
        }
1009
        /* zero fill any gaps */
1010
        fat_fill_gap(nodep, mcl, pos);
1011
        b = _fat_block_get(dev_handle, lcl, (pos / bps) % spc);
1012
        (void) ipc_data_write_finalize(callid, b->data + pos % bps,
1013
            bytes);
3501 jermar 1014
        b->dirty = true;        /* need to sync block */
3500 jermar 1015
        block_put(b);
1016
        /*
1017
         * Append the cluster chain starting in mcl to the end of the
1018
         * node's cluster chain.
1019
         */
1020
        fat_append_clusters(nodep, mcl);
1021
        nodep->size = pos + bytes;
3501 jermar 1022
        nodep->dirty = true;        /* need to sync node */
3500 jermar 1023
        fat_node_put(nodep);
1024
        ipc_answer_1(rid, EOK, bytes);
1025
        return;
3499 jermar 1026
    }
3497 jermar 1027
}
1028
 
2627 jermar 1029
/**
1030
 * @}
1031
 */