Subversion Repositories HelenOS

Rev

Rev 3598 | Rev 4337 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2008 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup fs
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file    fat_ops.c
  35.  * @brief   Implementation of VFS operations for the FAT file system server.
  36.  */
  37.  
  38. #include "fat.h"
  39. #include "fat_dentry.h"
  40. #include "fat_fat.h"
  41. #include "../../vfs/vfs.h"
  42. #include <libfs.h>
  43. #include <libblock.h>
  44. #include <ipc/ipc.h>
  45. #include <ipc/services.h>
  46. #include <ipc/devmap.h>
  47. #include <async.h>
  48. #include <errno.h>
  49. #include <string.h>
  50. #include <byteorder.h>
  51. #include <libadt/hash_table.h>
  52. #include <libadt/list.h>
  53. #include <assert.h>
  54. #include <futex.h>
  55. #include <sys/mman.h>
  56. #include <align.h>
  57.  
  58. /** Futex protecting the list of cached free FAT nodes. */
  59. static futex_t ffn_futex = FUTEX_INITIALIZER;
  60.  
  61. /** List of cached free FAT nodes. */
  62. static LIST_INITIALIZE(ffn_head);
  63.  
  64. static void fat_node_initialize(fat_node_t *node)
  65. {
  66.     futex_initialize(&node->lock, 1);
  67.     node->idx = NULL;
  68.     node->type = 0;
  69.     link_initialize(&node->ffn_link);
  70.     node->size = 0;
  71.     node->lnkcnt = 0;
  72.     node->refcnt = 0;
  73.     node->dirty = false;
  74. }
  75.  
  76. static void fat_node_sync(fat_node_t *node)
  77. {
  78.     block_t *b;
  79.     fat_bs_t *bs;
  80.     fat_dentry_t *d;
  81.     uint16_t bps;
  82.     unsigned dps;
  83.    
  84.     assert(node->dirty);
  85.  
  86.     bs = block_bb_get(node->idx->dev_handle);
  87.     bps = uint16_t_le2host(bs->bps);
  88.     dps = bps / sizeof(fat_dentry_t);
  89.    
  90.     /* Read the block that contains the dentry of interest. */
  91.     b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
  92.         (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
  93.  
  94.     d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
  95.  
  96.     d->firstc = host2uint16_t_le(node->firstc);
  97.     if (node->type == FAT_FILE) {
  98.         d->size = host2uint32_t_le(node->size);
  99.     } else if (node->type == FAT_DIRECTORY) {
  100.         d->attr = FAT_ATTR_SUBDIR;
  101.     }
  102.    
  103.     /* TODO: update other fields? (e.g time fields) */
  104.    
  105.     b->dirty = true;        /* need to sync block */
  106.     block_put(b);
  107. }
  108.  
  109. static fat_node_t *fat_node_get_new(void)
  110. {
  111.     fat_node_t *nodep;
  112.  
  113.     futex_down(&ffn_futex);
  114.     if (!list_empty(&ffn_head)) {
  115.         /* Try to use a cached free node structure. */
  116.         fat_idx_t *idxp_tmp;
  117.         nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
  118.         if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
  119.             goto skip_cache;
  120.         idxp_tmp = nodep->idx;
  121.         if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
  122.             futex_up(&nodep->lock);
  123.             goto skip_cache;
  124.         }
  125.         list_remove(&nodep->ffn_link);
  126.         futex_up(&ffn_futex);
  127.         if (nodep->dirty)
  128.             fat_node_sync(nodep);
  129.         idxp_tmp->nodep = NULL;
  130.         futex_up(&nodep->lock);
  131.         futex_up(&idxp_tmp->lock);
  132.     } else {
  133. skip_cache:
  134.         /* Try to allocate a new node structure. */
  135.         futex_up(&ffn_futex);
  136.         nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
  137.         if (!nodep)
  138.             return NULL;
  139.     }
  140.     fat_node_initialize(nodep);
  141.    
  142.     return nodep;
  143. }
  144.  
  145. /** Internal version of fat_node_get().
  146.  *
  147.  * @param idxp      Locked index structure.
  148.  */
  149. static void *fat_node_get_core(fat_idx_t *idxp)
  150. {
  151.     block_t *b;
  152.     fat_bs_t *bs;
  153.     fat_dentry_t *d;
  154.     fat_node_t *nodep = NULL;
  155.     unsigned bps;
  156.     unsigned spc;
  157.     unsigned dps;
  158.  
  159.     if (idxp->nodep) {
  160.         /*
  161.          * We are lucky.
  162.          * The node is already instantiated in memory.
  163.          */
  164.         futex_down(&idxp->nodep->lock);
  165.         if (!idxp->nodep->refcnt++)
  166.             list_remove(&idxp->nodep->ffn_link);
  167.         futex_up(&idxp->nodep->lock);
  168.         return idxp->nodep;
  169.     }
  170.  
  171.     /*
  172.      * We must instantiate the node from the file system.
  173.      */
  174.    
  175.     assert(idxp->pfc);
  176.  
  177.     nodep = fat_node_get_new();
  178.     if (!nodep)
  179.         return NULL;
  180.  
  181.     bs = block_bb_get(idxp->dev_handle);
  182.     bps = uint16_t_le2host(bs->bps);
  183.     spc = bs->spc;
  184.     dps = bps / sizeof(fat_dentry_t);
  185.  
  186.     /* Read the block that contains the dentry of interest. */
  187.     b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
  188.         (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
  189.     assert(b);
  190.  
  191.     d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
  192.     if (d->attr & FAT_ATTR_SUBDIR) {
  193.         /*
  194.          * The only directory which does not have this bit set is the
  195.          * root directory itself. The root directory node is handled
  196.          * and initialized elsewhere.
  197.          */
  198.         nodep->type = FAT_DIRECTORY;
  199.         /*
  200.          * Unfortunately, the 'size' field of the FAT dentry is not
  201.          * defined for the directory entry type. We must determine the
  202.          * size of the directory by walking the FAT.
  203.          */
  204.         nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
  205.             uint16_t_le2host(d->firstc));
  206.     } else {
  207.         nodep->type = FAT_FILE;
  208.         nodep->size = uint32_t_le2host(d->size);
  209.     }
  210.     nodep->firstc = uint16_t_le2host(d->firstc);
  211.     nodep->lnkcnt = 1;
  212.     nodep->refcnt = 1;
  213.  
  214.     block_put(b);
  215.  
  216.     /* Link the idx structure with the node structure. */
  217.     nodep->idx = idxp;
  218.     idxp->nodep = nodep;
  219.  
  220.     return nodep;
  221. }
  222.  
  223. /*
  224.  * Forward declarations of FAT libfs operations.
  225.  */
  226. static void *fat_node_get(dev_handle_t, fs_index_t);
  227. static void fat_node_put(void *);
  228. static void *fat_create_node(dev_handle_t, int);
  229. static int fat_destroy_node(void *);
  230. static int fat_link(void *, void *, const char *);
  231. static int fat_unlink(void *, void *);
  232. static void *fat_match(void *, const char *);
  233. static fs_index_t fat_index_get(void *);
  234. static size_t fat_size_get(void *);
  235. static unsigned fat_lnkcnt_get(void *);
  236. static bool fat_has_children(void *);
  237. static void *fat_root_get(dev_handle_t);
  238. static char fat_plb_get_char(unsigned);
  239. static bool fat_is_directory(void *);
  240. static bool fat_is_file(void *node);
  241.  
  242. /*
  243.  * FAT libfs operations.
  244.  */
  245.  
  246. /** Instantiate a FAT in-core node. */
  247. void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
  248. {
  249.     void *node;
  250.     fat_idx_t *idxp;
  251.  
  252.     idxp = fat_idx_get_by_index(dev_handle, index);
  253.     if (!idxp)
  254.         return NULL;
  255.     /* idxp->lock held */
  256.     node = fat_node_get_core(idxp);
  257.     futex_up(&idxp->lock);
  258.     return node;
  259. }
  260.  
  261. void fat_node_put(void *node)
  262. {
  263.     fat_node_t *nodep = (fat_node_t *)node;
  264.     bool destroy = false;
  265.  
  266.     futex_down(&nodep->lock);
  267.     if (!--nodep->refcnt) {
  268.         if (nodep->idx) {
  269.             futex_down(&ffn_futex);
  270.             list_append(&nodep->ffn_link, &ffn_head);
  271.             futex_up(&ffn_futex);
  272.         } else {
  273.             /*
  274.              * The node does not have any index structure associated
  275.              * with itself. This can only mean that we are releasing
  276.              * the node after a failed attempt to allocate the index
  277.              * structure for it.
  278.              */
  279.             destroy = true;
  280.         }
  281.     }
  282.     futex_up(&nodep->lock);
  283.     if (destroy)
  284.         free(node);
  285. }
  286.  
  287. void *fat_create_node(dev_handle_t dev_handle, int flags)
  288. {
  289.     fat_idx_t *idxp;
  290.     fat_node_t *nodep;
  291.     fat_bs_t *bs;
  292.     fat_cluster_t mcl, lcl;
  293.     uint16_t bps;
  294.     int rc;
  295.  
  296.     bs = block_bb_get(dev_handle);
  297.     bps = uint16_t_le2host(bs->bps);
  298.     if (flags & L_DIRECTORY) {
  299.         /* allocate a cluster */
  300.         rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl);
  301.         if (rc != EOK)
  302.             return NULL;
  303.     }
  304.  
  305.     nodep = fat_node_get_new();
  306.     if (!nodep) {
  307.         fat_free_clusters(bs, dev_handle, mcl);
  308.         return NULL;
  309.     }
  310.     idxp = fat_idx_get_new(dev_handle);
  311.     if (!idxp) {
  312.         fat_free_clusters(bs, dev_handle, mcl);
  313.         fat_node_put(nodep);
  314.         return NULL;
  315.     }
  316.     /* idxp->lock held */
  317.     if (flags & L_DIRECTORY) {
  318.         int i;
  319.         block_t *b;
  320.  
  321.         /*
  322.          * Populate the new cluster with unused dentries.
  323.          */
  324.         for (i = 0; i < bs->spc; i++) {
  325.             b = _fat_block_get(bs, dev_handle, mcl, i,
  326.                 BLOCK_FLAGS_NOREAD);
  327.             /* mark all dentries as never-used */
  328.             memset(b->data, 0, bps);
  329.             b->dirty = false;
  330.             block_put(b);
  331.         }
  332.         nodep->type = FAT_DIRECTORY;
  333.         nodep->firstc = mcl;
  334.         nodep->size = bps * bs->spc;
  335.     } else {
  336.         nodep->type = FAT_FILE;
  337.         nodep->firstc = FAT_CLST_RES0;
  338.         nodep->size = 0;
  339.     }
  340.     nodep->lnkcnt = 0;  /* not linked anywhere */
  341.     nodep->refcnt = 1;
  342.     nodep->dirty = true;
  343.  
  344.     nodep->idx = idxp;
  345.     idxp->nodep = nodep;
  346.  
  347.     futex_up(&idxp->lock);
  348.     return nodep;
  349. }
  350.  
  351. int fat_destroy_node(void *node)
  352. {
  353.     fat_node_t *nodep = (fat_node_t *)node;
  354.     fat_bs_t *bs;
  355.  
  356.     /*
  357.      * The node is not reachable from the file system. This means that the
  358.      * link count should be zero and that the index structure cannot be
  359.      * found in the position hash. Obviously, we don't need to lock the node
  360.      * nor its index structure.
  361.      */
  362.     assert(nodep->lnkcnt == 0);
  363.  
  364.     /*
  365.      * The node may not have any children.
  366.      */
  367.     assert(fat_has_children(node) == false);
  368.  
  369.     bs = block_bb_get(nodep->idx->dev_handle);
  370.     if (nodep->firstc != FAT_CLST_RES0) {
  371.         assert(nodep->size);
  372.         /* Free all clusters allocated to the node. */
  373.         fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
  374.     }
  375.  
  376.     fat_idx_destroy(nodep->idx);
  377.     free(nodep);
  378.     return EOK;
  379. }
  380.  
  381. int fat_link(void *prnt, void *chld, const char *name)
  382. {
  383.     fat_node_t *parentp = (fat_node_t *)prnt;
  384.     fat_node_t *childp = (fat_node_t *)chld;
  385.     fat_dentry_t *d;
  386.     fat_bs_t *bs;
  387.     block_t *b;
  388.     int i, j;
  389.     uint16_t bps;
  390.     unsigned dps;
  391.     unsigned blocks;
  392.  
  393.     futex_down(&childp->lock);
  394.     if (childp->lnkcnt == 1) {
  395.         /*
  396.          * On FAT, we don't support multiple hard links.
  397.          */
  398.         futex_up(&childp->lock);
  399.         return EMLINK;
  400.     }
  401.     assert(childp->lnkcnt == 0);
  402.     futex_up(&childp->lock);
  403.  
  404.     if (!fat_dentry_name_verify(name)) {
  405.         /*
  406.          * Attempt to create unsupported name.
  407.          */
  408.         return ENOTSUP;
  409.     }
  410.  
  411.     /*
  412.      * Get us an unused parent node's dentry or grow the parent and allocate
  413.      * a new one.
  414.      */
  415.    
  416.     futex_down(&parentp->idx->lock);
  417.     bs = block_bb_get(parentp->idx->dev_handle);
  418.     bps = uint16_t_le2host(bs->bps);
  419.     dps = bps / sizeof(fat_dentry_t);
  420.  
  421.     blocks = parentp->size / bps;
  422.  
  423.     for (i = 0; i < blocks; i++) {
  424.         b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
  425.         for (j = 0; j < dps; j++) {
  426.             d = ((fat_dentry_t *)b->data) + j;
  427.             switch (fat_classify_dentry(d)) {
  428.             case FAT_DENTRY_SKIP:
  429.             case FAT_DENTRY_VALID:
  430.                 /* skipping used and meta entries */
  431.                 continue;
  432.             case FAT_DENTRY_FREE:
  433.             case FAT_DENTRY_LAST:
  434.                 /* found an empty slot */
  435.                 goto hit;
  436.             }
  437.         }
  438.         block_put(b);
  439.     }
  440.    
  441.     /*
  442.      * We need to grow the parent in order to create a new unused dentry.
  443.      */
  444.     futex_up(&parentp->idx->lock);
  445.     return ENOTSUP; /* XXX */
  446.  
  447. hit:
  448.     /*
  449.      * At this point we only establish the link between the parent and the
  450.      * child.  The dentry, except of the name and the extension, will remain
  451.      * uninitialized until the the corresponding node is synced. Thus the
  452.      * valid dentry data is kept in the child node structure.
  453.      */
  454.     memset(d, 0, sizeof(fat_dentry_t));
  455.     fat_dentry_name_set(d, name);
  456.     b->dirty = true;        /* need to sync block */
  457.     block_put(b);
  458.     futex_up(&parentp->idx->lock);
  459.  
  460.     futex_down(&childp->idx->lock);
  461.    
  462.     /*
  463.      * If possible, create the Sub-directory Identifier Entry and the
  464.      * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries
  465.      * are not mandatory according to Standard ECMA-107 and HelenOS VFS does
  466.      * not use them anyway, so this is rather a sign of our good will.
  467.      */
  468.     b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE);
  469.     d = (fat_dentry_t *)b->data;
  470.     if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
  471.         strcmp(d->name, FAT_NAME_DOT) == 0) {
  472.         memset(d, 0, sizeof(fat_dentry_t));
  473.         strcpy(d->name, FAT_NAME_DOT);
  474.         strcpy(d->ext, FAT_EXT_PAD);
  475.         d->attr = FAT_ATTR_SUBDIR;
  476.         d->firstc = host2uint16_t_le(childp->firstc);
  477.         /* TODO: initialize also the date/time members. */
  478.     }
  479.     d++;
  480.     if (fat_classify_dentry(d) == FAT_DENTRY_LAST ||
  481.         strcmp(d->name, FAT_NAME_DOT_DOT) == 0) {
  482.         memset(d, 0, sizeof(fat_dentry_t));
  483.         strcpy(d->name, FAT_NAME_DOT_DOT);
  484.         strcpy(d->ext, FAT_EXT_PAD);
  485.         d->attr = FAT_ATTR_SUBDIR;
  486.         d->firstc = (parentp->firstc == FAT_CLST_ROOT) ?
  487.             host2uint16_t_le(FAT_CLST_RES0) :
  488.             host2uint16_t_le(parentp->firstc);
  489.         /* TODO: initialize also the date/time members. */
  490.     }
  491.     b->dirty = true;        /* need to sync block */
  492.     block_put(b);
  493.  
  494.     childp->idx->pfc = parentp->firstc;
  495.     childp->idx->pdi = i * dps + j;
  496.     futex_up(&childp->idx->lock);
  497.  
  498.     futex_down(&childp->lock);
  499.     childp->lnkcnt = 1;
  500.     childp->dirty = true;       /* need to sync node */
  501.     futex_up(&childp->lock);
  502.  
  503.     /*
  504.      * Hash in the index structure into the position hash.
  505.      */
  506.     fat_idx_hashin(childp->idx);
  507.  
  508.     return EOK;
  509. }
  510.  
  511. int fat_unlink(void *prnt, void *chld)
  512. {
  513.     fat_node_t *parentp = (fat_node_t *)prnt;
  514.     fat_node_t *childp = (fat_node_t *)chld;
  515.     fat_bs_t *bs;
  516.     fat_dentry_t *d;
  517.     uint16_t bps;
  518.     block_t *b;
  519.  
  520.     futex_down(&parentp->lock);
  521.     futex_down(&childp->lock);
  522.     assert(childp->lnkcnt == 1);
  523.     futex_down(&childp->idx->lock);
  524.     bs = block_bb_get(childp->idx->dev_handle);
  525.     bps = uint16_t_le2host(bs->bps);
  526.  
  527.     b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc,
  528.         (childp->idx->pdi * sizeof(fat_dentry_t)) / bps,
  529.         BLOCK_FLAGS_NONE);
  530.     d = (fat_dentry_t *)b->data +
  531.         (childp->idx->pdi % (bps / sizeof(fat_dentry_t)));
  532.     /* mark the dentry as not-currently-used */
  533.     d->name[0] = FAT_DENTRY_ERASED;
  534.     b->dirty = true;        /* need to sync block */
  535.     block_put(b);
  536.  
  537.     /* remove the index structure from the position hash */
  538.     fat_idx_hashout(childp->idx);
  539.     /* clear position information */
  540.     childp->idx->pfc = FAT_CLST_RES0;
  541.     childp->idx->pdi = 0;
  542.     futex_up(&childp->idx->lock);
  543.     childp->lnkcnt = 0;
  544.     childp->dirty = true;
  545.     futex_up(&childp->lock);
  546.     futex_up(&parentp->lock);
  547.  
  548.     return EOK;
  549. }
  550.  
  551. void *fat_match(void *prnt, const char *component)
  552. {
  553.     fat_bs_t *bs;
  554.     fat_node_t *parentp = (fat_node_t *)prnt;
  555.     char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  556.     unsigned i, j;
  557.     unsigned bps;       /* bytes per sector */
  558.     unsigned dps;       /* dentries per sector */
  559.     unsigned blocks;
  560.     fat_dentry_t *d;
  561.     block_t *b;
  562.  
  563.     futex_down(&parentp->idx->lock);
  564.     bs = block_bb_get(parentp->idx->dev_handle);
  565.     bps = uint16_t_le2host(bs->bps);
  566.     dps = bps / sizeof(fat_dentry_t);
  567.     blocks = parentp->size / bps;
  568.     for (i = 0; i < blocks; i++) {
  569.         b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
  570.         for (j = 0; j < dps; j++) {
  571.             d = ((fat_dentry_t *)b->data) + j;
  572.             switch (fat_classify_dentry(d)) {
  573.             case FAT_DENTRY_SKIP:
  574.             case FAT_DENTRY_FREE:
  575.                 continue;
  576.             case FAT_DENTRY_LAST:
  577.                 block_put(b);
  578.                 futex_up(&parentp->idx->lock);
  579.                 return NULL;
  580.             default:
  581.             case FAT_DENTRY_VALID:
  582.                 fat_dentry_name_get(d, name);
  583.                 break;
  584.             }
  585.             if (fat_dentry_namecmp(name, component) == 0) {
  586.                 /* hit */
  587.                 void *node;
  588.                 /*
  589.                  * Assume tree hierarchy for locking.  We
  590.                  * already have the parent and now we are going
  591.                  * to lock the child.  Never lock in the oposite
  592.                  * order.
  593.                  */
  594.                 fat_idx_t *idx = fat_idx_get_by_pos(
  595.                     parentp->idx->dev_handle, parentp->firstc,
  596.                     i * dps + j);
  597.                 futex_up(&parentp->idx->lock);
  598.                 if (!idx) {
  599.                     /*
  600.                      * Can happen if memory is low or if we
  601.                      * run out of 32-bit indices.
  602.                      */
  603.                     block_put(b);
  604.                     return NULL;
  605.                 }
  606.                 node = fat_node_get_core(idx);
  607.                 futex_up(&idx->lock);
  608.                 block_put(b);
  609.                 return node;
  610.             }
  611.         }
  612.         block_put(b);
  613.     }
  614.  
  615.     futex_up(&parentp->idx->lock);
  616.     return NULL;
  617. }
  618.  
  619. fs_index_t fat_index_get(void *node)
  620. {
  621.     fat_node_t *fnodep = (fat_node_t *)node;
  622.     if (!fnodep)
  623.         return 0;
  624.     return fnodep->idx->index;
  625. }
  626.  
  627. size_t fat_size_get(void *node)
  628. {
  629.     return ((fat_node_t *)node)->size;
  630. }
  631.  
  632. unsigned fat_lnkcnt_get(void *node)
  633. {
  634.     return ((fat_node_t *)node)->lnkcnt;
  635. }
  636.  
  637. bool fat_has_children(void *node)
  638. {
  639.     fat_bs_t *bs;
  640.     fat_node_t *nodep = (fat_node_t *)node;
  641.     unsigned bps;
  642.     unsigned dps;
  643.     unsigned blocks;
  644.     block_t *b;
  645.     unsigned i, j;
  646.  
  647.     if (nodep->type != FAT_DIRECTORY)
  648.         return false;
  649.    
  650.     futex_down(&nodep->idx->lock);
  651.     bs = block_bb_get(nodep->idx->dev_handle);
  652.     bps = uint16_t_le2host(bs->bps);
  653.     dps = bps / sizeof(fat_dentry_t);
  654.  
  655.     blocks = nodep->size / bps;
  656.  
  657.     for (i = 0; i < blocks; i++) {
  658.         fat_dentry_t *d;
  659.    
  660.         b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
  661.         for (j = 0; j < dps; j++) {
  662.             d = ((fat_dentry_t *)b->data) + j;
  663.             switch (fat_classify_dentry(d)) {
  664.             case FAT_DENTRY_SKIP:
  665.             case FAT_DENTRY_FREE:
  666.                 continue;
  667.             case FAT_DENTRY_LAST:
  668.                 block_put(b);
  669.                 futex_up(&nodep->idx->lock);
  670.                 return false;
  671.             default:
  672.             case FAT_DENTRY_VALID:
  673.                 block_put(b);
  674.                 futex_up(&nodep->idx->lock);
  675.                 return true;
  676.             }
  677.             block_put(b);
  678.             futex_up(&nodep->idx->lock);
  679.             return true;
  680.         }
  681.         block_put(b);
  682.     }
  683.  
  684.     futex_up(&nodep->idx->lock);
  685.     return false;
  686. }
  687.  
  688. void *fat_root_get(dev_handle_t dev_handle)
  689. {
  690.     return fat_node_get(dev_handle, 0);
  691. }
  692.  
  693. char fat_plb_get_char(unsigned pos)
  694. {
  695.     return fat_reg.plb_ro[pos % PLB_SIZE];
  696. }
  697.  
  698. bool fat_is_directory(void *node)
  699. {
  700.     return ((fat_node_t *)node)->type == FAT_DIRECTORY;
  701. }
  702.  
  703. bool fat_is_file(void *node)
  704. {
  705.     return ((fat_node_t *)node)->type == FAT_FILE;
  706. }
  707.  
  708. /** libfs operations */
  709. libfs_ops_t fat_libfs_ops = {
  710.     .match = fat_match,
  711.     .node_get = fat_node_get,
  712.     .node_put = fat_node_put,
  713.     .create = fat_create_node,
  714.     .destroy = fat_destroy_node,
  715.     .link = fat_link,
  716.     .unlink = fat_unlink,
  717.     .index_get = fat_index_get,
  718.     .size_get = fat_size_get,
  719.     .lnkcnt_get = fat_lnkcnt_get,
  720.     .has_children = fat_has_children,
  721.     .root_get = fat_root_get,
  722.     .plb_get_char = fat_plb_get_char,
  723.     .is_directory = fat_is_directory,
  724.     .is_file = fat_is_file
  725. };
  726.  
  727. /*
  728.  * VFS operations.
  729.  */
  730.  
  731. void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
  732. {
  733.     dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
  734.     fat_bs_t *bs;
  735.     uint16_t bps;
  736.     uint16_t rde;
  737.     int rc;
  738.  
  739.     /* initialize libblock */
  740.     rc = block_init(dev_handle, BS_SIZE);
  741.     if (rc != EOK) {
  742.         ipc_answer_0(rid, rc);
  743.         return;
  744.     }
  745.  
  746.     /* prepare the boot block */
  747.     rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
  748.     if (rc != EOK) {
  749.         block_fini(dev_handle);
  750.         ipc_answer_0(rid, rc);
  751.         return;
  752.     }
  753.  
  754.     /* get the buffer with the boot sector */
  755.     bs = block_bb_get(dev_handle);
  756.    
  757.     /* Read the number of root directory entries. */
  758.     bps = uint16_t_le2host(bs->bps);
  759.     rde = uint16_t_le2host(bs->root_ent_max);
  760.  
  761.     if (bps != BS_SIZE) {
  762.         block_fini(dev_handle);
  763.         ipc_answer_0(rid, ENOTSUP);
  764.         return;
  765.     }
  766.  
  767.     /* Initialize the block cache */
  768.     rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
  769.     if (rc != EOK) {
  770.         block_fini(dev_handle);
  771.         ipc_answer_0(rid, rc);
  772.         return;
  773.     }
  774.  
  775.     rc = fat_idx_init_by_dev_handle(dev_handle);
  776.     if (rc != EOK) {
  777.         block_fini(dev_handle);
  778.         ipc_answer_0(rid, rc);
  779.         return;
  780.     }
  781.  
  782.     /* Initialize the root node. */
  783.     fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
  784.     if (!rootp) {
  785.         block_fini(dev_handle);
  786.         fat_idx_fini_by_dev_handle(dev_handle);
  787.         ipc_answer_0(rid, ENOMEM);
  788.         return;
  789.     }
  790.     fat_node_initialize(rootp);
  791.  
  792.     fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
  793.     if (!ridxp) {
  794.         block_fini(dev_handle);
  795.         free(rootp);
  796.         fat_idx_fini_by_dev_handle(dev_handle);
  797.         ipc_answer_0(rid, ENOMEM);
  798.         return;
  799.     }
  800.     assert(ridxp->index == 0);
  801.     /* ridxp->lock held */
  802.  
  803.     rootp->type = FAT_DIRECTORY;
  804.     rootp->firstc = FAT_CLST_ROOT;
  805.     rootp->refcnt = 1;
  806.     rootp->lnkcnt = 0;  /* FS root is not linked */
  807.     rootp->size = rde * sizeof(fat_dentry_t);
  808.     rootp->idx = ridxp;
  809.     ridxp->nodep = rootp;
  810.    
  811.     futex_up(&ridxp->lock);
  812.  
  813.     ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
  814. }
  815.  
  816. void fat_mount(ipc_callid_t rid, ipc_call_t *request)
  817. {
  818.     ipc_answer_0(rid, ENOTSUP);
  819. }
  820.  
  821. void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
  822. {
  823.     libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
  824. }
  825.  
  826. void fat_read(ipc_callid_t rid, ipc_call_t *request)
  827. {
  828.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  829.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  830.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  831.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  832.     fat_bs_t *bs;
  833.     uint16_t bps;
  834.     size_t bytes;
  835.     block_t *b;
  836.  
  837.     if (!nodep) {
  838.         ipc_answer_0(rid, ENOENT);
  839.         return;
  840.     }
  841.  
  842.     ipc_callid_t callid;
  843.     size_t len;
  844.     if (!ipc_data_read_receive(&callid, &len)) {
  845.         fat_node_put(nodep);
  846.         ipc_answer_0(callid, EINVAL);
  847.         ipc_answer_0(rid, EINVAL);
  848.         return;
  849.     }
  850.  
  851.     bs = block_bb_get(dev_handle);
  852.     bps = uint16_t_le2host(bs->bps);
  853.  
  854.     if (nodep->type == FAT_FILE) {
  855.         /*
  856.          * Our strategy for regular file reads is to read one block at
  857.          * most and make use of the possibility to return less data than
  858.          * requested. This keeps the code very simple.
  859.          */
  860.         if (pos >= nodep->size) {
  861.             /* reading beyond the EOF */
  862.             bytes = 0;
  863.             (void) ipc_data_read_finalize(callid, NULL, 0);
  864.         } else {
  865.             bytes = min(len, bps - pos % bps);
  866.             bytes = min(bytes, nodep->size - pos);
  867.             b = fat_block_get(bs, nodep, pos / bps,
  868.                 BLOCK_FLAGS_NONE);
  869.             (void) ipc_data_read_finalize(callid, b->data + pos % bps,
  870.                 bytes);
  871.             block_put(b);
  872.         }
  873.     } else {
  874.         unsigned bnum;
  875.         off_t spos = pos;
  876.         char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  877.         fat_dentry_t *d;
  878.  
  879.         assert(nodep->type == FAT_DIRECTORY);
  880.         assert(nodep->size % bps == 0);
  881.         assert(bps % sizeof(fat_dentry_t) == 0);
  882.  
  883.         /*
  884.          * Our strategy for readdir() is to use the position pointer as
  885.          * an index into the array of all dentries. On entry, it points
  886.          * to the first unread dentry. If we skip any dentries, we bump
  887.          * the position pointer accordingly.
  888.          */
  889.         bnum = (pos * sizeof(fat_dentry_t)) / bps;
  890.         while (bnum < nodep->size / bps) {
  891.             off_t o;
  892.  
  893.             b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
  894.             for (o = pos % (bps / sizeof(fat_dentry_t));
  895.                 o < bps / sizeof(fat_dentry_t);
  896.                 o++, pos++) {
  897.                 d = ((fat_dentry_t *)b->data) + o;
  898.                 switch (fat_classify_dentry(d)) {
  899.                 case FAT_DENTRY_SKIP:
  900.                 case FAT_DENTRY_FREE:
  901.                     continue;
  902.                 case FAT_DENTRY_LAST:
  903.                     block_put(b);
  904.                     goto miss;
  905.                 default:
  906.                 case FAT_DENTRY_VALID:
  907.                     fat_dentry_name_get(d, name);
  908.                     block_put(b);
  909.                     goto hit;
  910.                 }
  911.             }
  912.             block_put(b);
  913.             bnum++;
  914.         }
  915. miss:
  916.         fat_node_put(nodep);
  917.         ipc_answer_0(callid, ENOENT);
  918.         ipc_answer_1(rid, ENOENT, 0);
  919.         return;
  920. hit:
  921.         (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
  922.         bytes = (pos - spos) + 1;
  923.     }
  924.  
  925.     fat_node_put(nodep);
  926.     ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
  927. }
  928.  
  929. void fat_write(ipc_callid_t rid, ipc_call_t *request)
  930. {
  931.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  932.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  933.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  934.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  935.     fat_bs_t *bs;
  936.     size_t bytes;
  937.     block_t *b;
  938.     uint16_t bps;
  939.     unsigned spc;
  940.     unsigned bpc;       /* bytes per cluster */
  941.     off_t boundary;
  942.     int flags = BLOCK_FLAGS_NONE;
  943.    
  944.     if (!nodep) {
  945.         ipc_answer_0(rid, ENOENT);
  946.         return;
  947.     }
  948.    
  949.     ipc_callid_t callid;
  950.     size_t len;
  951.     if (!ipc_data_write_receive(&callid, &len)) {
  952.         fat_node_put(nodep);
  953.         ipc_answer_0(callid, EINVAL);
  954.         ipc_answer_0(rid, EINVAL);
  955.         return;
  956.     }
  957.  
  958.     bs = block_bb_get(dev_handle);
  959.     bps = uint16_t_le2host(bs->bps);
  960.     spc = bs->spc;
  961.     bpc = bps * spc;
  962.  
  963.     /*
  964.      * In all scenarios, we will attempt to write out only one block worth
  965.      * of data at maximum. There might be some more efficient approaches,
  966.      * but this one greatly simplifies fat_write(). Note that we can afford
  967.      * to do this because the client must be ready to handle the return
  968.      * value signalizing a smaller number of bytes written.
  969.      */
  970.     bytes = min(len, bps - pos % bps);
  971.     if (bytes == bps)
  972.         flags |= BLOCK_FLAGS_NOREAD;
  973.    
  974.     boundary = ROUND_UP(nodep->size, bpc);
  975.     if (pos < boundary) {
  976.         /*
  977.          * This is the easier case - we are either overwriting already
  978.          * existing contents or writing behind the EOF, but still within
  979.          * the limits of the last cluster. The node size may grow to the
  980.          * next block size boundary.
  981.          */
  982.         fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
  983.         b = fat_block_get(bs, nodep, pos / bps, flags);
  984.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  985.             bytes);
  986.         b->dirty = true;        /* need to sync block */
  987.         block_put(b);
  988.         if (pos + bytes > nodep->size) {
  989.             nodep->size = pos + bytes;
  990.             nodep->dirty = true;    /* need to sync node */
  991.         }
  992.         ipc_answer_2(rid, EOK, bytes, nodep->size);
  993.         fat_node_put(nodep);
  994.         return;
  995.     } else {
  996.         /*
  997.          * This is the more difficult case. We must allocate new
  998.          * clusters for the node and zero them out.
  999.          */
  1000.         int status;
  1001.         unsigned nclsts;
  1002.         fat_cluster_t mcl, lcl;
  1003.  
  1004.         nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
  1005.         /* create an independent chain of nclsts clusters in all FATs */
  1006.         status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
  1007.         if (status != EOK) {
  1008.             /* could not allocate a chain of nclsts clusters */
  1009.             fat_node_put(nodep);
  1010.             ipc_answer_0(callid, status);
  1011.             ipc_answer_0(rid, status);
  1012.             return;
  1013.         }
  1014.         /* zero fill any gaps */
  1015.         fat_fill_gap(bs, nodep, mcl, pos);
  1016.         b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
  1017.             flags);
  1018.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  1019.             bytes);
  1020.         b->dirty = true;        /* need to sync block */
  1021.         block_put(b);
  1022.         /*
  1023.          * Append the cluster chain starting in mcl to the end of the
  1024.          * node's cluster chain.
  1025.          */
  1026.         fat_append_clusters(bs, nodep, mcl);
  1027.         nodep->size = pos + bytes;
  1028.         nodep->dirty = true;        /* need to sync node */
  1029.         ipc_answer_2(rid, EOK, bytes, nodep->size);
  1030.         fat_node_put(nodep);
  1031.         return;
  1032.     }
  1033. }
  1034.  
  1035. void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
  1036. {
  1037.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  1038.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  1039.     size_t size = (off_t)IPC_GET_ARG3(*request);
  1040.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  1041.     fat_bs_t *bs;
  1042.     uint16_t bps;
  1043.     uint8_t spc;
  1044.     unsigned bpc;   /* bytes per cluster */
  1045.     int rc;
  1046.  
  1047.     if (!nodep) {
  1048.         ipc_answer_0(rid, ENOENT);
  1049.         return;
  1050.     }
  1051.  
  1052.     bs = block_bb_get(dev_handle);
  1053.     bps = uint16_t_le2host(bs->bps);
  1054.     spc = bs->spc;
  1055.     bpc = bps * spc;
  1056.  
  1057.     if (nodep->size == size) {
  1058.         rc = EOK;
  1059.     } else if (nodep->size < size) {
  1060.         /*
  1061.          * The standard says we have the freedom to grow the node.
  1062.          * For now, we simply return an error.
  1063.          */
  1064.         rc = EINVAL;
  1065.     } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
  1066.         /*
  1067.          * The node will be shrunk, but no clusters will be deallocated.
  1068.          */
  1069.         nodep->size = size;
  1070.         nodep->dirty = true;        /* need to sync node */
  1071.         rc = EOK;  
  1072.     } else {
  1073.         /*
  1074.          * The node will be shrunk, clusters will be deallocated.
  1075.          */
  1076.         if (size == 0) {
  1077.             fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
  1078.         } else {
  1079.             fat_cluster_t lastc;
  1080.             (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
  1081.                 &lastc, (size - 1) / bpc);
  1082.             fat_chop_clusters(bs, nodep, lastc);
  1083.         }
  1084.         nodep->size = size;
  1085.         nodep->dirty = true;        /* need to sync node */
  1086.         rc = EOK;  
  1087.     }
  1088.     fat_node_put(nodep);
  1089.     ipc_answer_0(rid, rc);
  1090.     return;
  1091. }
  1092.  
  1093. void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
  1094. {
  1095.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  1096.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  1097.     int rc;
  1098.  
  1099.     fat_node_t *nodep = fat_node_get(dev_handle, index);
  1100.     if (!nodep) {
  1101.         ipc_answer_0(rid, ENOENT);
  1102.         return;
  1103.     }
  1104.  
  1105.     rc = fat_destroy_node(nodep);
  1106.     ipc_answer_0(rid, rc);
  1107. }
  1108.  
  1109. /**
  1110.  * @}
  1111.  */
  1112.