Subversion Repositories HelenOS

Rev

Rev 3625 | Rev 3629 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2008 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup fs
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file    fat_ops.c
  35.  * @brief   Implementation of VFS operations for the FAT file system server.
  36.  */
  37.  
  38. #include "fat.h"
  39. #include "fat_dentry.h"
  40. #include "fat_fat.h"
  41. #include "../../vfs/vfs.h"
  42. #include <libfs.h>
  43. #include <libblock.h>
  44. #include <ipc/ipc.h>
  45. #include <ipc/services.h>
  46. #include <ipc/devmap.h>
  47. #include <async.h>
  48. #include <errno.h>
  49. #include <string.h>
  50. #include <byteorder.h>
  51. #include <libadt/hash_table.h>
  52. #include <libadt/list.h>
  53. #include <assert.h>
  54. #include <futex.h>
  55. #include <sys/mman.h>
  56. #include <align.h>
  57.  
  58. /** Futex protecting the list of cached free FAT nodes. */
  59. static futex_t ffn_futex = FUTEX_INITIALIZER;
  60.  
  61. /** List of cached free FAT nodes. */
  62. static LIST_INITIALIZE(ffn_head);
  63.  
  64. static void fat_node_initialize(fat_node_t *node)
  65. {
  66.     futex_initialize(&node->lock, 1);
  67.     node->idx = NULL;
  68.     node->type = 0;
  69.     link_initialize(&node->ffn_link);
  70.     node->size = 0;
  71.     node->lnkcnt = 0;
  72.     node->refcnt = 0;
  73.     node->dirty = false;
  74. }
  75.  
  76. static void fat_node_sync(fat_node_t *node)
  77. {
  78.     block_t *b;
  79.     fat_bs_t *bs;
  80.     fat_dentry_t *d;
  81.     uint16_t bps;
  82.     unsigned dps;
  83.    
  84.     assert(node->dirty);
  85.  
  86.     bs = block_bb_get(node->idx->dev_handle);
  87.     bps = uint16_t_le2host(bs->bps);
  88.     dps = bps / sizeof(fat_dentry_t);
  89.    
  90.     /* Read the block that contains the dentry of interest. */
  91.     b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc,
  92.         (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
  93.  
  94.     d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps);
  95.  
  96.     d->firstc = host2uint16_t_le(node->firstc);
  97.     if (node->type == FAT_FILE)
  98.         d->size = host2uint32_t_le(node->size);
  99.     /* TODO: update other fields? (e.g time fields, attr field) */
  100.    
  101.     b->dirty = true;        /* need to sync block */
  102.     block_put(b);
  103. }
  104.  
  105. static fat_node_t *fat_node_get_new(void)
  106. {
  107.     fat_node_t *nodep;
  108.  
  109.     futex_down(&ffn_futex);
  110.     if (!list_empty(&ffn_head)) {
  111.         /* Try to use a cached free node structure. */
  112.         fat_idx_t *idxp_tmp;
  113.         nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
  114.         if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
  115.             goto skip_cache;
  116.         idxp_tmp = nodep->idx;
  117.         if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
  118.             futex_up(&nodep->lock);
  119.             goto skip_cache;
  120.         }
  121.         list_remove(&nodep->ffn_link);
  122.         futex_up(&ffn_futex);
  123.         if (nodep->dirty)
  124.             fat_node_sync(nodep);
  125.         idxp_tmp->nodep = NULL;
  126.         futex_up(&nodep->lock);
  127.         futex_up(&idxp_tmp->lock);
  128.     } else {
  129. skip_cache:
  130.         /* Try to allocate a new node structure. */
  131.         futex_up(&ffn_futex);
  132.         nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
  133.         if (!nodep)
  134.             return NULL;
  135.     }
  136.     fat_node_initialize(nodep);
  137.    
  138.     return nodep;
  139. }
  140.  
  141. /** Internal version of fat_node_get().
  142.  *
  143.  * @param idxp      Locked index structure.
  144.  */
  145. static void *fat_node_get_core(fat_idx_t *idxp)
  146. {
  147.     block_t *b;
  148.     fat_bs_t *bs;
  149.     fat_dentry_t *d;
  150.     fat_node_t *nodep = NULL;
  151.     unsigned bps;
  152.     unsigned spc;
  153.     unsigned dps;
  154.  
  155.     if (idxp->nodep) {
  156.         /*
  157.          * We are lucky.
  158.          * The node is already instantiated in memory.
  159.          */
  160.         futex_down(&idxp->nodep->lock);
  161.         if (!idxp->nodep->refcnt++)
  162.             list_remove(&idxp->nodep->ffn_link);
  163.         futex_up(&idxp->nodep->lock);
  164.         return idxp->nodep;
  165.     }
  166.  
  167.     /*
  168.      * We must instantiate the node from the file system.
  169.      */
  170.    
  171.     assert(idxp->pfc);
  172.  
  173.     nodep = fat_node_get_new();
  174.     if (!nodep)
  175.         return NULL;
  176.  
  177.     bs = block_bb_get(idxp->dev_handle);
  178.     bps = uint16_t_le2host(bs->bps);
  179.     spc = bs->spc;
  180.     dps = bps / sizeof(fat_dentry_t);
  181.  
  182.     /* Read the block that contains the dentry of interest. */
  183.     b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc,
  184.         (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE);
  185.     assert(b);
  186.  
  187.     d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
  188.     if (d->attr & FAT_ATTR_SUBDIR) {
  189.         /*
  190.          * The only directory which does not have this bit set is the
  191.          * root directory itself. The root directory node is handled
  192.          * and initialized elsewhere.
  193.          */
  194.         nodep->type = FAT_DIRECTORY;
  195.         /*
  196.          * Unfortunately, the 'size' field of the FAT dentry is not
  197.          * defined for the directory entry type. We must determine the
  198.          * size of the directory by walking the FAT.
  199.          */
  200.         nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle,
  201.             uint16_t_le2host(d->firstc));
  202.     } else {
  203.         nodep->type = FAT_FILE;
  204.         nodep->size = uint32_t_le2host(d->size);
  205.     }
  206.     nodep->firstc = uint16_t_le2host(d->firstc);
  207.     nodep->lnkcnt = 1;
  208.     nodep->refcnt = 1;
  209.  
  210.     block_put(b);
  211.  
  212.     /* Link the idx structure with the node structure. */
  213.     nodep->idx = idxp;
  214.     idxp->nodep = nodep;
  215.  
  216.     return nodep;
  217. }
  218.  
  219. /*
  220.  * Forward declarations of FAT libfs operations.
  221.  */
  222. static void *fat_node_get(dev_handle_t, fs_index_t);
  223. static void fat_node_put(void *);
  224. static void *fat_create_node(dev_handle_t, int);
  225. static int fat_destroy_node(void *);
  226. static int fat_link(void *, void *, const char *);
  227. static int fat_unlink(void *, void *);
  228. static void *fat_match(void *, const char *);
  229. static fs_index_t fat_index_get(void *);
  230. static size_t fat_size_get(void *);
  231. static unsigned fat_lnkcnt_get(void *);
  232. static bool fat_has_children(void *);
  233. static void *fat_root_get(dev_handle_t);
  234. static char fat_plb_get_char(unsigned);
  235. static bool fat_is_directory(void *);
  236. static bool fat_is_file(void *node);
  237.  
  238. /*
  239.  * FAT libfs operations.
  240.  */
  241.  
  242. /** Instantiate a FAT in-core node. */
  243. void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
  244. {
  245.     void *node;
  246.     fat_idx_t *idxp;
  247.  
  248.     idxp = fat_idx_get_by_index(dev_handle, index);
  249.     if (!idxp)
  250.         return NULL;
  251.     /* idxp->lock held */
  252.     node = fat_node_get_core(idxp);
  253.     futex_up(&idxp->lock);
  254.     return node;
  255. }
  256.  
  257. void fat_node_put(void *node)
  258. {
  259.     fat_node_t *nodep = (fat_node_t *)node;
  260.     bool destroy = false;
  261.  
  262.     futex_down(&nodep->lock);
  263.     if (!--nodep->refcnt) {
  264.         if (nodep->idx) {
  265.             futex_down(&ffn_futex);
  266.             list_append(&nodep->ffn_link, &ffn_head);
  267.             futex_up(&ffn_futex);
  268.         } else {
  269.             /*
  270.              * The node does not have any index structure associated
  271.              * with itself. This can only mean that we are releasing
  272.              * the node after a failed attempt to allocate the index
  273.              * structure for it.
  274.              */
  275.             destroy = true;
  276.         }
  277.     }
  278.     futex_up(&nodep->lock);
  279.     if (destroy)
  280.         free(node);
  281. }
  282.  
  283. void *fat_create_node(dev_handle_t dev_handle, int flags)
  284. {
  285.     fat_idx_t *idxp;
  286.     fat_node_t *nodep;
  287.  
  288.     nodep = fat_node_get_new();
  289.     if (!nodep)
  290.         return NULL;
  291.     idxp = fat_idx_get_new(dev_handle);
  292.     if (!idxp) {
  293.         fat_node_put(nodep);
  294.         return NULL;
  295.     }
  296.     /* idxp->lock held */
  297.     if (flags & L_DIRECTORY) {
  298.         nodep->type = FAT_DIRECTORY;
  299.     } else {
  300.         nodep->type = FAT_FILE;
  301.     }
  302.     nodep->size = 0;
  303.     nodep->firstc = FAT_CLST_RES0;
  304.     nodep->lnkcnt = 0;  /* not linked anywhere */
  305.     nodep->refcnt = 1;
  306.  
  307.     nodep->idx = idxp;
  308.     idxp->nodep = nodep;
  309.  
  310.     futex_up(&idxp->lock);
  311.     return nodep;
  312. }
  313.  
  314. int fat_destroy_node(void *node)
  315. {
  316.     fat_node_t *nodep = (fat_node_t *)node;
  317.     fat_bs_t *bs;
  318.  
  319.     /*
  320.      * The node is not reachable from the file system. This means that the
  321.      * link count should be zero and that the index structure cannot be
  322.      * found in the position hash. Obviously, we don't need to lock the node
  323.      * nor its index structure.
  324.      */
  325.     assert(nodep->lnkcnt == 0);
  326.  
  327.     /*
  328.      * The node may not have any children.
  329.      */
  330.     assert(fat_has_children(node) == false);
  331.  
  332.     bs = block_bb_get(nodep->idx->dev_handle);
  333.     if (nodep->firstc != FAT_CLST_RES0) {
  334.         assert(nodep->size);
  335.         /* Free all clusters allocated to the node. */
  336.         fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc);
  337.     }
  338.  
  339.     fat_idx_destroy(nodep->idx);
  340.     free(nodep);
  341.     return EOK;
  342. }
  343.  
  344. int fat_link(void *prnt, void *chld, const char *name)
  345. {
  346.     fat_node_t *parentp = (fat_node_t *)prnt;
  347.     fat_node_t *childp = (fat_node_t *)chld;
  348.     fat_dentry_t *d;
  349.     fat_bs_t *bs;
  350.     block_t *b;
  351.     int i, j;
  352.     uint16_t bps;
  353.     unsigned dps;
  354.     unsigned blocks;
  355.  
  356.     futex_down(&childp->lock);
  357.     if (childp->lnkcnt == 1) {
  358.         /*
  359.          * On FAT, we don't support multiple hard links.
  360.          */
  361.         futex_up(&childp->lock);
  362.         return EMLINK;
  363.     }
  364.     assert(childp->lnkcnt == 0);
  365.     futex_up(&childp->lock);
  366.  
  367.     if (!fat_dentry_name_verify(name)) {
  368.         /*
  369.          * Attempt to create unsupported name.
  370.          */
  371.         return ENOTSUP;
  372.     }
  373.  
  374.     /*
  375.      * Get us an unused parent node's dentry or grow the parent and allocate
  376.      * a new one.
  377.      */
  378.    
  379.     futex_down(&parentp->idx->lock);
  380.     bs = block_bb_get(parentp->idx->dev_handle);
  381.     bps = uint16_t_le2host(bs->bps);
  382.     dps = bps / sizeof(fat_dentry_t);
  383.  
  384.     blocks = parentp->size / bps;
  385.  
  386.     for (i = 0; i < blocks; i++) {
  387.         b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
  388.         for (j = 0; j < dps; j++) {
  389.             d = ((fat_dentry_t *)b->data) + j;
  390.             switch (fat_classify_dentry(d)) {
  391.             case FAT_DENTRY_SKIP:
  392.             case FAT_DENTRY_VALID:
  393.                 /* skipping used and meta entries */
  394.                 continue;
  395.             case FAT_DENTRY_FREE:
  396.             case FAT_DENTRY_LAST:
  397.                 /* found an empty slot */
  398.                 goto hit;
  399.             }
  400.         }
  401.         block_put(b);
  402.     }
  403.    
  404.     /*
  405.      * We need to grow the parent in order to create a new unused dentry.
  406.      */
  407.     futex_up(&parentp->idx->lock);
  408.     return ENOTSUP; /* XXX */
  409.  
  410. hit:
  411.     /*
  412.      * At this point we only establish the link between the parent and the
  413.      * child.  The dentry, except of the name and the extension, will remain
  414.      * uninitialized until the the corresponding node is synced. Thus the
  415.      * valid dentry data is kept in the child node structure.
  416.      */
  417.     memset(d, 0, sizeof(fat_dentry_t));
  418.     fat_dentry_name_set(d, name);
  419.     b->dirty = true;        /* need to sync block */
  420.     block_put(b);
  421.     futex_up(&parentp->idx->lock);
  422.  
  423.     futex_down(&childp->idx->lock);
  424.     childp->idx->pfc = parentp->firstc;
  425.     childp->idx->pdi = i * dps + j;
  426.     futex_up(&childp->idx->lock);
  427.  
  428.     futex_down(&childp->lock);
  429.     childp->lnkcnt = 1;
  430.     childp->dirty = true;       /* need to sync node */
  431.     futex_up(&childp->lock);
  432.  
  433.     /*
  434.      * Hash in the index structure into the position hash.
  435.      */
  436.     fat_idx_hashin(childp->idx);
  437.  
  438.     return EOK;
  439. }
  440.  
  441. int fat_unlink(void *prnt, void *chld)
  442. {
  443.     return ENOTSUP; /* not supported at the moment */
  444. }
  445.  
  446. void *fat_match(void *prnt, const char *component)
  447. {
  448.     fat_bs_t *bs;
  449.     fat_node_t *parentp = (fat_node_t *)prnt;
  450.     char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  451.     unsigned i, j;
  452.     unsigned bps;       /* bytes per sector */
  453.     unsigned dps;       /* dentries per sector */
  454.     unsigned blocks;
  455.     fat_dentry_t *d;
  456.     block_t *b;
  457.  
  458.     futex_down(&parentp->idx->lock);
  459.     bs = block_bb_get(parentp->idx->dev_handle);
  460.     bps = uint16_t_le2host(bs->bps);
  461.     dps = bps / sizeof(fat_dentry_t);
  462.     blocks = parentp->size / bps;
  463.     for (i = 0; i < blocks; i++) {
  464.         b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE);
  465.         for (j = 0; j < dps; j++) {
  466.             d = ((fat_dentry_t *)b->data) + j;
  467.             switch (fat_classify_dentry(d)) {
  468.             case FAT_DENTRY_SKIP:
  469.             case FAT_DENTRY_FREE:
  470.                 continue;
  471.             case FAT_DENTRY_LAST:
  472.                 block_put(b);
  473.                 futex_up(&parentp->idx->lock);
  474.                 return NULL;
  475.             default:
  476.             case FAT_DENTRY_VALID:
  477.                 fat_dentry_name_get(d, name);
  478.                 break;
  479.             }
  480.             if (stricmp(name, component) == 0) {
  481.                 /* hit */
  482.                 void *node;
  483.                 /*
  484.                  * Assume tree hierarchy for locking.  We
  485.                  * already have the parent and now we are going
  486.                  * to lock the child.  Never lock in the oposite
  487.                  * order.
  488.                  */
  489.                 fat_idx_t *idx = fat_idx_get_by_pos(
  490.                     parentp->idx->dev_handle, parentp->firstc,
  491.                     i * dps + j);
  492.                 futex_up(&parentp->idx->lock);
  493.                 if (!idx) {
  494.                     /*
  495.                      * Can happen if memory is low or if we
  496.                      * run out of 32-bit indices.
  497.                      */
  498.                     block_put(b);
  499.                     return NULL;
  500.                 }
  501.                 node = fat_node_get_core(idx);
  502.                 futex_up(&idx->lock);
  503.                 block_put(b);
  504.                 return node;
  505.             }
  506.         }
  507.         block_put(b);
  508.     }
  509.  
  510.     futex_up(&parentp->idx->lock);
  511.     return NULL;
  512. }
  513.  
  514. fs_index_t fat_index_get(void *node)
  515. {
  516.     fat_node_t *fnodep = (fat_node_t *)node;
  517.     if (!fnodep)
  518.         return 0;
  519.     return fnodep->idx->index;
  520. }
  521.  
  522. size_t fat_size_get(void *node)
  523. {
  524.     return ((fat_node_t *)node)->size;
  525. }
  526.  
  527. unsigned fat_lnkcnt_get(void *node)
  528. {
  529.     return ((fat_node_t *)node)->lnkcnt;
  530. }
  531.  
  532. bool fat_has_children(void *node)
  533. {
  534.     fat_bs_t *bs;
  535.     fat_node_t *nodep = (fat_node_t *)node;
  536.     unsigned bps;
  537.     unsigned dps;
  538.     unsigned blocks;
  539.     block_t *b;
  540.     unsigned i, j;
  541.  
  542.     if (nodep->type != FAT_DIRECTORY)
  543.         return false;
  544.    
  545.     futex_down(&nodep->idx->lock);
  546.     bs = block_bb_get(nodep->idx->dev_handle);
  547.     bps = uint16_t_le2host(bs->bps);
  548.     dps = bps / sizeof(fat_dentry_t);
  549.  
  550.     blocks = nodep->size / bps;
  551.  
  552.     for (i = 0; i < blocks; i++) {
  553.         fat_dentry_t *d;
  554.    
  555.         b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE);
  556.         for (j = 0; j < dps; j++) {
  557.             d = ((fat_dentry_t *)b->data) + j;
  558.             switch (fat_classify_dentry(d)) {
  559.             case FAT_DENTRY_SKIP:
  560.             case FAT_DENTRY_FREE:
  561.                 continue;
  562.             case FAT_DENTRY_LAST:
  563.                 block_put(b);
  564.                 futex_up(&nodep->idx->lock);
  565.                 return false;
  566.             default:
  567.             case FAT_DENTRY_VALID:
  568.                 block_put(b);
  569.                 futex_up(&nodep->idx->lock);
  570.                 return true;
  571.             }
  572.             block_put(b);
  573.             futex_up(&nodep->idx->lock);
  574.             return true;
  575.         }
  576.         block_put(b);
  577.     }
  578.  
  579.     futex_up(&nodep->idx->lock);
  580.     return false;
  581. }
  582.  
  583. void *fat_root_get(dev_handle_t dev_handle)
  584. {
  585.     return fat_node_get(dev_handle, 0);
  586. }
  587.  
  588. char fat_plb_get_char(unsigned pos)
  589. {
  590.     return fat_reg.plb_ro[pos % PLB_SIZE];
  591. }
  592.  
  593. bool fat_is_directory(void *node)
  594. {
  595.     return ((fat_node_t *)node)->type == FAT_DIRECTORY;
  596. }
  597.  
  598. bool fat_is_file(void *node)
  599. {
  600.     return ((fat_node_t *)node)->type == FAT_FILE;
  601. }
  602.  
  603. /** libfs operations */
  604. libfs_ops_t fat_libfs_ops = {
  605.     .match = fat_match,
  606.     .node_get = fat_node_get,
  607.     .node_put = fat_node_put,
  608.     .create = fat_create_node,
  609.     .destroy = fat_destroy_node,
  610.     .link = fat_link,
  611.     .unlink = fat_unlink,
  612.     .index_get = fat_index_get,
  613.     .size_get = fat_size_get,
  614.     .lnkcnt_get = fat_lnkcnt_get,
  615.     .has_children = fat_has_children,
  616.     .root_get = fat_root_get,
  617.     .plb_get_char = fat_plb_get_char,
  618.     .is_directory = fat_is_directory,
  619.     .is_file = fat_is_file
  620. };
  621.  
  622. /*
  623.  * VFS operations.
  624.  */
  625.  
  626. void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
  627. {
  628.     dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
  629.     fat_bs_t *bs;
  630.     uint16_t bps;
  631.     uint16_t rde;
  632.     int rc;
  633.  
  634.     /* initialize libblock */
  635.     rc = block_init(dev_handle, BS_SIZE);
  636.     if (rc != EOK) {
  637.         ipc_answer_0(rid, rc);
  638.         return;
  639.     }
  640.  
  641.     /* prepare the boot block */
  642.     rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE);
  643.     if (rc != EOK) {
  644.         block_fini(dev_handle);
  645.         ipc_answer_0(rid, rc);
  646.         return;
  647.     }
  648.  
  649.     /* get the buffer with the boot sector */
  650.     bs = block_bb_get(dev_handle);
  651.    
  652.     /* Read the number of root directory entries. */
  653.     bps = uint16_t_le2host(bs->bps);
  654.     rde = uint16_t_le2host(bs->root_ent_max);
  655.  
  656.     if (bps != BS_SIZE) {
  657.         block_fini(dev_handle);
  658.         ipc_answer_0(rid, ENOTSUP);
  659.         return;
  660.     }
  661.  
  662.     /* Initialize the block cache */
  663.     rc = block_cache_init(dev_handle, bps, 0 /* XXX */);
  664.     if (rc != EOK) {
  665.         block_fini(dev_handle);
  666.         ipc_answer_0(rid, rc);
  667.         return;
  668.     }
  669.  
  670.     rc = fat_idx_init_by_dev_handle(dev_handle);
  671.     if (rc != EOK) {
  672.         block_fini(dev_handle);
  673.         ipc_answer_0(rid, rc);
  674.         return;
  675.     }
  676.  
  677.     /* Initialize the root node. */
  678.     fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
  679.     if (!rootp) {
  680.         block_fini(dev_handle);
  681.         fat_idx_fini_by_dev_handle(dev_handle);
  682.         ipc_answer_0(rid, ENOMEM);
  683.         return;
  684.     }
  685.     fat_node_initialize(rootp);
  686.  
  687.     fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
  688.     if (!ridxp) {
  689.         block_fini(dev_handle);
  690.         free(rootp);
  691.         fat_idx_fini_by_dev_handle(dev_handle);
  692.         ipc_answer_0(rid, ENOMEM);
  693.         return;
  694.     }
  695.     assert(ridxp->index == 0);
  696.     /* ridxp->lock held */
  697.  
  698.     rootp->type = FAT_DIRECTORY;
  699.     rootp->firstc = FAT_CLST_ROOT;
  700.     rootp->refcnt = 1;
  701.     rootp->lnkcnt = 0;  /* FS root is not linked */
  702.     rootp->size = rde * sizeof(fat_dentry_t);
  703.     rootp->idx = ridxp;
  704.     ridxp->nodep = rootp;
  705.    
  706.     futex_up(&ridxp->lock);
  707.  
  708.     ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
  709. }
  710.  
  711. void fat_mount(ipc_callid_t rid, ipc_call_t *request)
  712. {
  713.     ipc_answer_0(rid, ENOTSUP);
  714. }
  715.  
  716. void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
  717. {
  718.     libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
  719. }
  720.  
  721. void fat_read(ipc_callid_t rid, ipc_call_t *request)
  722. {
  723.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  724.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  725.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  726.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  727.     fat_bs_t *bs;
  728.     uint16_t bps;
  729.     size_t bytes;
  730.     block_t *b;
  731.  
  732.     if (!nodep) {
  733.         ipc_answer_0(rid, ENOENT);
  734.         return;
  735.     }
  736.  
  737.     ipc_callid_t callid;
  738.     size_t len;
  739.     if (!ipc_data_read_receive(&callid, &len)) {
  740.         fat_node_put(nodep);
  741.         ipc_answer_0(callid, EINVAL);
  742.         ipc_answer_0(rid, EINVAL);
  743.         return;
  744.     }
  745.  
  746.     bs = block_bb_get(dev_handle);
  747.     bps = uint16_t_le2host(bs->bps);
  748.  
  749.     if (nodep->type == FAT_FILE) {
  750.         /*
  751.          * Our strategy for regular file reads is to read one block at
  752.          * most and make use of the possibility to return less data than
  753.          * requested. This keeps the code very simple.
  754.          */
  755.         if (pos >= nodep->size) {
  756.             /* reading beyond the EOF */
  757.             bytes = 0;
  758.             (void) ipc_data_read_finalize(callid, NULL, 0);
  759.         } else {
  760.             bytes = min(len, bps - pos % bps);
  761.             bytes = min(bytes, nodep->size - pos);
  762.             b = fat_block_get(bs, nodep, pos / bps,
  763.                 BLOCK_FLAGS_NONE);
  764.             (void) ipc_data_read_finalize(callid, b->data + pos % bps,
  765.                 bytes);
  766.             block_put(b);
  767.         }
  768.     } else {
  769.         unsigned bnum;
  770.         off_t spos = pos;
  771.         char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  772.         fat_dentry_t *d;
  773.  
  774.         assert(nodep->type == FAT_DIRECTORY);
  775.         assert(nodep->size % bps == 0);
  776.         assert(bps % sizeof(fat_dentry_t) == 0);
  777.  
  778.         /*
  779.          * Our strategy for readdir() is to use the position pointer as
  780.          * an index into the array of all dentries. On entry, it points
  781.          * to the first unread dentry. If we skip any dentries, we bump
  782.          * the position pointer accordingly.
  783.          */
  784.         bnum = (pos * sizeof(fat_dentry_t)) / bps;
  785.         while (bnum < nodep->size / bps) {
  786.             off_t o;
  787.  
  788.             b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE);
  789.             for (o = pos % (bps / sizeof(fat_dentry_t));
  790.                 o < bps / sizeof(fat_dentry_t);
  791.                 o++, pos++) {
  792.                 d = ((fat_dentry_t *)b->data) + o;
  793.                 switch (fat_classify_dentry(d)) {
  794.                 case FAT_DENTRY_SKIP:
  795.                 case FAT_DENTRY_FREE:
  796.                     continue;
  797.                 case FAT_DENTRY_LAST:
  798.                     block_put(b);
  799.                     goto miss;
  800.                 default:
  801.                 case FAT_DENTRY_VALID:
  802.                     fat_dentry_name_get(d, name);
  803.                     block_put(b);
  804.                     goto hit;
  805.                 }
  806.             }
  807.             block_put(b);
  808.             bnum++;
  809.         }
  810. miss:
  811.         fat_node_put(nodep);
  812.         ipc_answer_0(callid, ENOENT);
  813.         ipc_answer_1(rid, ENOENT, 0);
  814.         return;
  815. hit:
  816.         (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
  817.         bytes = (pos - spos) + 1;
  818.     }
  819.  
  820.     fat_node_put(nodep);
  821.     ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
  822. }
  823.  
  824. void fat_write(ipc_callid_t rid, ipc_call_t *request)
  825. {
  826.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  827.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  828.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  829.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  830.     fat_bs_t *bs;
  831.     size_t bytes;
  832.     block_t *b;
  833.     uint16_t bps;
  834.     unsigned spc;
  835.     unsigned bpc;       /* bytes per cluster */
  836.     off_t boundary;
  837.     int flags = BLOCK_FLAGS_NONE;
  838.    
  839.     if (!nodep) {
  840.         ipc_answer_0(rid, ENOENT);
  841.         return;
  842.     }
  843.    
  844.     ipc_callid_t callid;
  845.     size_t len;
  846.     if (!ipc_data_write_receive(&callid, &len)) {
  847.         fat_node_put(nodep);
  848.         ipc_answer_0(callid, EINVAL);
  849.         ipc_answer_0(rid, EINVAL);
  850.         return;
  851.     }
  852.  
  853.     bs = block_bb_get(dev_handle);
  854.     bps = uint16_t_le2host(bs->bps);
  855.     spc = bs->spc;
  856.     bpc = bps * spc;
  857.  
  858.     /*
  859.      * In all scenarios, we will attempt to write out only one block worth
  860.      * of data at maximum. There might be some more efficient approaches,
  861.      * but this one greatly simplifies fat_write(). Note that we can afford
  862.      * to do this because the client must be ready to handle the return
  863.      * value signalizing a smaller number of bytes written.
  864.      */
  865.     bytes = min(len, bps - pos % bps);
  866.     if (bytes == bps)
  867.         flags |= BLOCK_FLAGS_NOREAD;
  868.    
  869.     boundary = ROUND_UP(nodep->size, bpc);
  870.     if (pos < boundary) {
  871.         /*
  872.          * This is the easier case - we are either overwriting already
  873.          * existing contents or writing behind the EOF, but still within
  874.          * the limits of the last cluster. The node size may grow to the
  875.          * next block size boundary.
  876.          */
  877.         fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos);
  878.         b = fat_block_get(bs, nodep, pos / bps, flags);
  879.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  880.             bytes);
  881.         b->dirty = true;        /* need to sync block */
  882.         block_put(b);
  883.         if (pos + bytes > nodep->size) {
  884.             nodep->size = pos + bytes;
  885.             nodep->dirty = true;    /* need to sync node */
  886.         }
  887.         ipc_answer_2(rid, EOK, bytes, nodep->size);
  888.         fat_node_put(nodep);
  889.         return;
  890.     } else {
  891.         /*
  892.          * This is the more difficult case. We must allocate new
  893.          * clusters for the node and zero them out.
  894.          */
  895.         int status;
  896.         unsigned nclsts;
  897.         fat_cluster_t mcl, lcl;
  898.  
  899.         nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc;
  900.         /* create an independent chain of nclsts clusters in all FATs */
  901.         status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl);
  902.         if (status != EOK) {
  903.             /* could not allocate a chain of nclsts clusters */
  904.             fat_node_put(nodep);
  905.             ipc_answer_0(callid, status);
  906.             ipc_answer_0(rid, status);
  907.             return;
  908.         }
  909.         /* zero fill any gaps */
  910.         fat_fill_gap(bs, nodep, mcl, pos);
  911.         b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc,
  912.             flags);
  913.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  914.             bytes);
  915.         b->dirty = true;        /* need to sync block */
  916.         block_put(b);
  917.         /*
  918.          * Append the cluster chain starting in mcl to the end of the
  919.          * node's cluster chain.
  920.          */
  921.         fat_append_clusters(bs, nodep, mcl);
  922.         nodep->size = pos + bytes;
  923.         nodep->dirty = true;        /* need to sync node */
  924.         ipc_answer_2(rid, EOK, bytes, nodep->size);
  925.         fat_node_put(nodep);
  926.         return;
  927.     }
  928. }
  929.  
  930. void fat_truncate(ipc_callid_t rid, ipc_call_t *request)
  931. {
  932.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  933.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  934.     size_t size = (off_t)IPC_GET_ARG3(*request);
  935.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  936.     fat_bs_t *bs;
  937.     uint16_t bps;
  938.     uint8_t spc;
  939.     unsigned bpc;   /* bytes per cluster */
  940.     int rc;
  941.  
  942.     if (!nodep) {
  943.         ipc_answer_0(rid, ENOENT);
  944.         return;
  945.     }
  946.  
  947.     bs = block_bb_get(dev_handle);
  948.     bps = uint16_t_le2host(bs->bps);
  949.     spc = bs->spc;
  950.     bpc = bps * spc;
  951.  
  952.     if (nodep->size == size) {
  953.         rc = EOK;
  954.     } else if (nodep->size < size) {
  955.         /*
  956.          * The standard says we have the freedom to grow the node.
  957.          * For now, we simply return an error.
  958.          */
  959.         rc = EINVAL;
  960.     } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) {
  961.         /*
  962.          * The node will be shrunk, but no clusters will be deallocated.
  963.          */
  964.         nodep->size = size;
  965.         nodep->dirty = true;        /* need to sync node */
  966.         rc = EOK;  
  967.     } else {
  968.         /*
  969.          * The node will be shrunk, clusters will be deallocated.
  970.          */
  971.         if (size == 0) {
  972.             fat_chop_clusters(bs, nodep, FAT_CLST_RES0);
  973.         } else {
  974.             fat_cluster_t lastc;
  975.             (void) fat_cluster_walk(bs, dev_handle, nodep->firstc,
  976.                 &lastc, (size - 1) / bpc);
  977.             fat_chop_clusters(bs, nodep, lastc);
  978.         }
  979.         nodep->size = size;
  980.         nodep->dirty = true;        /* need to sync node */
  981.         rc = EOK;  
  982.     }
  983.     fat_node_put(nodep);
  984.     ipc_answer_0(rid, rc);
  985.     return;
  986. }
  987.  
  988. void fat_destroy(ipc_callid_t rid, ipc_call_t *request)
  989. {
  990.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  991.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  992.     int rc;
  993.  
  994.     fat_node_t *nodep = fat_node_get(dev_handle, index);
  995.     if (!nodep) {
  996.         ipc_answer_0(rid, ENOENT);
  997.         return;
  998.     }
  999.  
  1000.     rc = fat_destroy_node(nodep);
  1001.     ipc_answer_0(rid, rc);
  1002. }
  1003.  
  1004. /**
  1005.  * @}
  1006.  */
  1007.