Subversion Repositories HelenOS

Rev

Rev 3503 | Rev 3506 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2008 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /** @addtogroup fs
  30.  * @{
  31.  */
  32.  
  33. /**
  34.  * @file    fat_ops.c
  35.  * @brief   Implementation of VFS operations for the FAT file system server.
  36.  */
  37.  
  38. #include "fat.h"
  39. #include "fat_dentry.h"
  40. #include "fat_fat.h"
  41. #include "../../vfs/vfs.h"
  42. #include <libfs.h>
  43. #include <ipc/ipc.h>
  44. #include <ipc/services.h>
  45. #include <ipc/devmap.h>
  46. #include <async.h>
  47. #include <errno.h>
  48. #include <string.h>
  49. #include <byteorder.h>
  50. #include <libadt/hash_table.h>
  51. #include <libadt/list.h>
  52. #include <assert.h>
  53. #include <futex.h>
  54. #include <sys/mman.h>
  55. #include <align.h>
  56.  
  57. #define BS_BLOCK        0
  58. #define BS_SIZE         512
  59.  
  60. /** Futex protecting the list of cached free FAT nodes. */
  61. static futex_t ffn_futex = FUTEX_INITIALIZER;
  62.  
  63. /** List of cached free FAT nodes. */
  64. static LIST_INITIALIZE(ffn_head);
  65.  
  66. #define min(a, b)       ((a) < (b) ? (a) : (b))
  67.  
  68. static int dev_phone = -1;      /* FIXME */
  69. static void *dev_buffer = NULL;     /* FIXME */
  70.  
  71. /* TODO move somewhere else */
  72. typedef struct {
  73.     void *data;
  74.     size_t size;
  75.     bool dirty;
  76. } block_t;
  77.  
  78. static block_t *block_get(dev_handle_t dev_handle, off_t offset, size_t bs)
  79. {
  80.     /* FIXME */
  81.     block_t *b;
  82.     off_t bufpos = 0;
  83.     size_t buflen = 0;
  84.     off_t pos = offset * bs;
  85.  
  86.     assert(dev_phone != -1);
  87.     assert(dev_buffer);
  88.  
  89.     b = malloc(sizeof(block_t));
  90.     if (!b)
  91.         return NULL;
  92.    
  93.     b->data = malloc(bs);
  94.     if (!b->data) {
  95.         free(b);
  96.         return NULL;
  97.     }
  98.     b->size = bs;
  99.  
  100.     if (!libfs_blockread(dev_phone, dev_buffer, &bufpos, &buflen, &pos,
  101.         b->data, bs, bs)) {
  102.         free(b->data);
  103.         free(b);
  104.         return NULL;
  105.     }
  106.  
  107.     return b;
  108. }
  109.  
  110. static void block_put(block_t *block)
  111. {
  112.     /* FIXME */
  113.     free(block->data);
  114.     free(block);
  115. }
  116.  
  117. #define FAT1        0
  118.  
  119. #define FAT_BS(b)       ((fat_bs_t *)((b)->data))
  120.  
  121. #define FAT_CLST_RES0   0x0000
  122. #define FAT_CLST_RES1   0x0001
  123. #define FAT_CLST_FIRST  0x0002
  124. #define FAT_CLST_BAD    0xfff7
  125. #define FAT_CLST_LAST1  0xfff8
  126. #define FAT_CLST_LAST8  0xffff
  127.  
  128. /* internally used to mark root directory's parent */
  129. #define FAT_CLST_ROOTPAR    FAT_CLST_RES0
  130. /* internally used to mark root directory */
  131. #define FAT_CLST_ROOT       FAT_CLST_RES1
  132.  
  133. #define fat_block_get(np, off) \
  134.     _fat_block_get((np)->idx->dev_handle, (np)->firstc, (off))
  135.  
  136. static block_t *
  137. _fat_block_get(dev_handle_t dev_handle, fat_cluster_t firstc, off_t offset)
  138. {
  139.     block_t *bb;
  140.     block_t *b;
  141.     unsigned bps;
  142.     unsigned spc;
  143.     unsigned rscnt;     /* block address of the first FAT */
  144.     unsigned fatcnt;
  145.     unsigned rde;
  146.     unsigned rds;       /* root directory size */
  147.     unsigned sf;
  148.     unsigned ssa;       /* size of the system area */
  149.     unsigned clusters;
  150.     fat_cluster_t clst = firstc;
  151.     unsigned i;
  152.  
  153.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  154.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  155.     spc = FAT_BS(bb)->spc;
  156.     rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
  157.     fatcnt = FAT_BS(bb)->fatcnt;
  158.     rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
  159.     sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
  160.     block_put(bb);
  161.  
  162.     rds = (sizeof(fat_dentry_t) * rde) / bps;
  163.     rds += ((sizeof(fat_dentry_t) * rde) % bps != 0);
  164.     ssa = rscnt + fatcnt * sf + rds;
  165.  
  166.     if (firstc == FAT_CLST_ROOT) {
  167.         /* root directory special case */
  168.         assert(offset < rds);
  169.         b = block_get(dev_handle, rscnt + fatcnt * sf + offset, bps);
  170.         return b;
  171.     }
  172.  
  173.     clusters = offset / spc;
  174.     for (i = 0; i < clusters; i++) {
  175.         unsigned fsec;  /* sector offset relative to FAT1 */
  176.         unsigned fidx;  /* FAT1 entry index */
  177.  
  178.         assert(clst >= FAT_CLST_FIRST && clst < FAT_CLST_BAD);
  179.         fsec = (clst * sizeof(fat_cluster_t)) / bps;
  180.         fidx = clst % (bps / sizeof(fat_cluster_t));
  181.         /* read FAT1 */
  182.         b = block_get(dev_handle, rscnt + fsec, bps);
  183.         clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
  184.         assert(clst != FAT_CLST_BAD);
  185.         assert(clst < FAT_CLST_LAST1);
  186.         block_put(b);
  187.     }
  188.  
  189.     b = block_get(dev_handle, ssa + (clst - FAT_CLST_FIRST) * spc +
  190.         offset % spc, bps);
  191.  
  192.     return b;
  193. }
  194.  
  195. /** Return number of blocks allocated to a file.
  196.  *
  197.  * @param dev_handle    Device handle of the device with the file.
  198.  * @param firstc    First cluster of the file.
  199.  *
  200.  * @return      Number of blocks allocated to the file.
  201.  */
  202. static uint16_t
  203. _fat_blcks_get(dev_handle_t dev_handle, fat_cluster_t firstc)
  204. {
  205.     block_t *bb;
  206.     block_t *b;
  207.     unsigned bps;
  208.     unsigned spc;
  209.     unsigned rscnt;     /* block address of the first FAT */
  210.     unsigned clusters = 0;
  211.     fat_cluster_t clst = firstc;
  212.  
  213.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  214.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  215.     spc = FAT_BS(bb)->spc;
  216.     rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
  217.     block_put(bb);
  218.  
  219.     if (firstc == FAT_CLST_RES0) {
  220.         /* No space allocated to the file. */
  221.         return 0;
  222.     }
  223.  
  224.     while (clst < FAT_CLST_LAST1) {
  225.         unsigned fsec;  /* sector offset relative to FAT1 */
  226.         unsigned fidx;  /* FAT1 entry index */
  227.  
  228.         assert(clst >= FAT_CLST_FIRST);
  229.         fsec = (clst * sizeof(fat_cluster_t)) / bps;
  230.         fidx = clst % (bps / sizeof(fat_cluster_t));
  231.         /* read FAT1 */
  232.         b = block_get(dev_handle, rscnt + fsec, bps);
  233.         clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
  234.         assert(clst != FAT_CLST_BAD);
  235.         block_put(b);
  236.         clusters++;
  237.     }
  238.  
  239.     return clusters * spc;
  240. }
  241.  
  242. static void fat_node_initialize(fat_node_t *node)
  243. {
  244.     futex_initialize(&node->lock, 1);
  245.     node->idx = NULL;
  246.     node->type = 0;
  247.     link_initialize(&node->ffn_link);
  248.     node->size = 0;
  249.     node->lnkcnt = 0;
  250.     node->refcnt = 0;
  251.     node->dirty = false;
  252. }
  253.  
  254. static uint16_t fat_bps_get(dev_handle_t dev_handle)
  255. {
  256.     block_t *bb;
  257.     uint16_t bps;
  258.    
  259.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  260.     assert(bb != NULL);
  261.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  262.     block_put(bb);
  263.  
  264.     return bps;
  265. }
  266.  
  267. static void fat_node_sync(fat_node_t *node)
  268. {
  269.     /* TODO */
  270. }
  271.  
  272. /** Internal version of fat_node_get().
  273.  *
  274.  * @param idxp      Locked index structure.
  275.  */
  276. static void *fat_node_get_core(fat_idx_t *idxp)
  277. {
  278.     block_t *b;
  279.     fat_dentry_t *d;
  280.     fat_node_t *nodep = NULL;
  281.     unsigned bps;
  282.     unsigned dps;
  283.  
  284.     if (idxp->nodep) {
  285.         /*
  286.          * We are lucky.
  287.          * The node is already instantiated in memory.
  288.          */
  289.         futex_down(&idxp->nodep->lock);
  290.         if (!idxp->nodep->refcnt++)
  291.             list_remove(&idxp->nodep->ffn_link);
  292.         futex_up(&idxp->nodep->lock);
  293.         return idxp->nodep;
  294.     }
  295.  
  296.     /*
  297.      * We must instantiate the node from the file system.
  298.      */
  299.    
  300.     assert(idxp->pfc);
  301.  
  302.     futex_down(&ffn_futex);
  303.     if (!list_empty(&ffn_head)) {
  304.         /* Try to use a cached free node structure. */
  305.         fat_idx_t *idxp_tmp;
  306.         nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link);
  307.         if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK)
  308.             goto skip_cache;
  309.         idxp_tmp = nodep->idx;
  310.         if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) {
  311.             futex_up(&nodep->lock);
  312.             goto skip_cache;
  313.         }
  314.         list_remove(&nodep->ffn_link);
  315.         futex_up(&ffn_futex);
  316.         if (nodep->dirty)
  317.             fat_node_sync(nodep);
  318.         idxp_tmp->nodep = NULL;
  319.         futex_up(&nodep->lock);
  320.         futex_up(&idxp_tmp->lock);
  321.     } else {
  322. skip_cache:
  323.         /* Try to allocate a new node structure. */
  324.         futex_up(&ffn_futex);
  325.         nodep = (fat_node_t *)malloc(sizeof(fat_node_t));
  326.         if (!nodep)
  327.             return NULL;
  328.     }
  329.     fat_node_initialize(nodep);
  330.  
  331.     bps = fat_bps_get(idxp->dev_handle);
  332.     dps = bps / sizeof(fat_dentry_t);
  333.  
  334.     /* Read the block that contains the dentry of interest. */
  335.     b = _fat_block_get(idxp->dev_handle, idxp->pfc,
  336.         (idxp->pdi * sizeof(fat_dentry_t)) / bps);
  337.     assert(b);
  338.  
  339.     d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps);
  340.     if (d->attr & FAT_ATTR_SUBDIR) {
  341.         /*
  342.          * The only directory which does not have this bit set is the
  343.          * root directory itself. The root directory node is handled
  344.          * and initialized elsewhere.
  345.          */
  346.         nodep->type = FAT_DIRECTORY;
  347.         /*
  348.          * Unfortunately, the 'size' field of the FAT dentry is not
  349.          * defined for the directory entry type. We must determine the
  350.          * size of the directory by walking the FAT.
  351.          */
  352.         nodep->size = bps * _fat_blcks_get(idxp->dev_handle,
  353.             uint16_t_le2host(d->firstc));
  354.     } else {
  355.         nodep->type = FAT_FILE;
  356.         nodep->size = uint32_t_le2host(d->size);
  357.     }
  358.     nodep->firstc = uint16_t_le2host(d->firstc);
  359.     nodep->lnkcnt = 1;
  360.     nodep->refcnt = 1;
  361.  
  362.     block_put(b);
  363.  
  364.     /* Link the idx structure with the node structure. */
  365.     nodep->idx = idxp;
  366.     idxp->nodep = nodep;
  367.  
  368.     return nodep;
  369. }
  370.  
  371. /** Instantiate a FAT in-core node. */
  372. static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index)
  373. {
  374.     void *node;
  375.     fat_idx_t *idxp;
  376.  
  377.     idxp = fat_idx_get_by_index(dev_handle, index);
  378.     if (!idxp)
  379.         return NULL;
  380.     /* idxp->lock held */
  381.     node = fat_node_get_core(idxp);
  382.     futex_up(&idxp->lock);
  383.     return node;
  384. }
  385.  
  386. static void fat_node_put(void *node)
  387. {
  388.     fat_node_t *nodep = (fat_node_t *)node;
  389.  
  390.     futex_down(&nodep->lock);
  391.     if (!--nodep->refcnt) {
  392.         futex_down(&ffn_futex);
  393.         list_append(&nodep->ffn_link, &ffn_head);
  394.         futex_up(&ffn_futex);
  395.     }
  396.     futex_up(&nodep->lock);
  397. }
  398.  
  399. static void *fat_create(int flags)
  400. {
  401.     return NULL;    /* not supported at the moment */
  402. }
  403.  
  404. static int fat_destroy(void *node)
  405. {
  406.     return ENOTSUP; /* not supported at the moment */
  407. }
  408.  
  409. static bool fat_link(void *prnt, void *chld, const char *name)
  410. {
  411.     return false;   /* not supported at the moment */
  412. }
  413.  
  414. static int fat_unlink(void *prnt, void *chld)
  415. {
  416.     return ENOTSUP; /* not supported at the moment */
  417. }
  418.  
  419. static void *fat_match(void *prnt, const char *component)
  420. {
  421.     fat_node_t *parentp = (fat_node_t *)prnt;
  422.     char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  423.     unsigned i, j;
  424.     unsigned bps;       /* bytes per sector */
  425.     unsigned dps;       /* dentries per sector */
  426.     unsigned blocks;
  427.     fat_dentry_t *d;
  428.     block_t *b;
  429.  
  430.     futex_down(&parentp->idx->lock);
  431.     bps = fat_bps_get(parentp->idx->dev_handle);
  432.     dps = bps / sizeof(fat_dentry_t);
  433.     blocks = parentp->size / bps + (parentp->size % bps != 0);
  434.     for (i = 0; i < blocks; i++) {
  435.         unsigned dentries;
  436.        
  437.         b = fat_block_get(parentp, i);
  438.         dentries = (i == blocks - 1) ?
  439.             parentp->size % sizeof(fat_dentry_t) :
  440.             dps;
  441.         for (j = 0; j < dentries; j++) {
  442.             d = ((fat_dentry_t *)b->data) + j;
  443.             switch (fat_classify_dentry(d)) {
  444.             case FAT_DENTRY_SKIP:
  445.                 continue;
  446.             case FAT_DENTRY_LAST:
  447.                 block_put(b);
  448.                 futex_up(&parentp->idx->lock);
  449.                 return NULL;
  450.             default:
  451.             case FAT_DENTRY_VALID:
  452.                 dentry_name_canonify(d, name);
  453.                 break;
  454.             }
  455.             if (stricmp(name, component) == 0) {
  456.                 /* hit */
  457.                 void *node;
  458.                 /*
  459.                  * Assume tree hierarchy for locking.  We
  460.                  * already have the parent and now we are going
  461.                  * to lock the child.  Never lock in the oposite
  462.                  * order.
  463.                  */
  464.                 fat_idx_t *idx = fat_idx_get_by_pos(
  465.                     parentp->idx->dev_handle, parentp->firstc,
  466.                     i * dps + j);
  467.                 futex_up(&parentp->idx->lock);
  468.                 if (!idx) {
  469.                     /*
  470.                      * Can happen if memory is low or if we
  471.                      * run out of 32-bit indices.
  472.                      */
  473.                     block_put(b);
  474.                     return NULL;
  475.                 }
  476.                 node = fat_node_get_core(idx);
  477.                 futex_up(&idx->lock);
  478.                 block_put(b);
  479.                 return node;
  480.             }
  481.         }
  482.         block_put(b);
  483.     }
  484.     futex_up(&parentp->idx->lock);
  485.     return NULL;
  486. }
  487.  
  488. static fs_index_t fat_index_get(void *node)
  489. {
  490.     fat_node_t *fnodep = (fat_node_t *)node;
  491.     if (!fnodep)
  492.         return 0;
  493.     return fnodep->idx->index;
  494. }
  495.  
  496. static size_t fat_size_get(void *node)
  497. {
  498.     return ((fat_node_t *)node)->size;
  499. }
  500.  
  501. static unsigned fat_lnkcnt_get(void *node)
  502. {
  503.     return ((fat_node_t *)node)->lnkcnt;
  504. }
  505.  
  506. static bool fat_has_children(void *node)
  507. {
  508.     fat_node_t *nodep = (fat_node_t *)node;
  509.     unsigned bps;
  510.     unsigned dps;
  511.     unsigned blocks;
  512.     block_t *b;
  513.     unsigned i, j;
  514.  
  515.     if (nodep->type != FAT_DIRECTORY)
  516.         return false;
  517.  
  518.     futex_down(&nodep->idx->lock);
  519.     bps = fat_bps_get(nodep->idx->dev_handle);
  520.     dps = bps / sizeof(fat_dentry_t);
  521.  
  522.     blocks = nodep->size / bps + (nodep->size % bps != 0);
  523.  
  524.     for (i = 0; i < blocks; i++) {
  525.         unsigned dentries;
  526.         fat_dentry_t *d;
  527.    
  528.         b = fat_block_get(nodep, i);
  529.         dentries = (i == blocks - 1) ?
  530.             nodep->size % sizeof(fat_dentry_t) :
  531.             dps;
  532.         for (j = 0; j < dentries; j++) {
  533.             d = ((fat_dentry_t *)b->data) + j;
  534.             switch (fat_classify_dentry(d)) {
  535.             case FAT_DENTRY_SKIP:
  536.                 continue;
  537.             case FAT_DENTRY_LAST:
  538.                 block_put(b);
  539.                 futex_up(&nodep->idx->lock);
  540.                 return false;
  541.             default:
  542.             case FAT_DENTRY_VALID:
  543.                 block_put(b);
  544.                 futex_up(&nodep->idx->lock);
  545.                 return true;
  546.             }
  547.             block_put(b);
  548.             futex_up(&nodep->idx->lock);
  549.             return true;
  550.         }
  551.         block_put(b);
  552.     }
  553.  
  554.     futex_up(&nodep->idx->lock);
  555.     return false;
  556. }
  557.  
  558. static void *fat_root_get(dev_handle_t dev_handle)
  559. {
  560.     return fat_node_get(dev_handle, 0);
  561. }
  562.  
  563. static char fat_plb_get_char(unsigned pos)
  564. {
  565.     return fat_reg.plb_ro[pos % PLB_SIZE];
  566. }
  567.  
  568. static bool fat_is_directory(void *node)
  569. {
  570.     return ((fat_node_t *)node)->type == FAT_DIRECTORY;
  571. }
  572.  
  573. static bool fat_is_file(void *node)
  574. {
  575.     return ((fat_node_t *)node)->type == FAT_FILE;
  576. }
  577.  
  578. /** libfs operations */
  579. libfs_ops_t fat_libfs_ops = {
  580.     .match = fat_match,
  581.     .node_get = fat_node_get,
  582.     .node_put = fat_node_put,
  583.     .create = fat_create,
  584.     .destroy = fat_destroy,
  585.     .link = fat_link,
  586.     .unlink = fat_unlink,
  587.     .index_get = fat_index_get,
  588.     .size_get = fat_size_get,
  589.     .lnkcnt_get = fat_lnkcnt_get,
  590.     .has_children = fat_has_children,
  591.     .root_get = fat_root_get,
  592.     .plb_get_char = fat_plb_get_char,
  593.     .is_directory = fat_is_directory,
  594.     .is_file = fat_is_file
  595. };
  596.  
  597. void fat_mounted(ipc_callid_t rid, ipc_call_t *request)
  598. {
  599.     dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request);
  600.     block_t *bb;
  601.     uint16_t bps;
  602.     uint16_t rde;
  603.     int rc;
  604.  
  605.     /*
  606.      * For now, we don't bother to remember dev_handle, dev_phone or
  607.      * dev_buffer in some data structure. We use global variables because we
  608.      * know there will be at most one mount on this file system.
  609.      * Of course, this is a huge TODO item.
  610.      */
  611.     dev_buffer = mmap(NULL, BS_SIZE, PROTO_READ | PROTO_WRITE,
  612.         MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  613.    
  614.     if (!dev_buffer) {
  615.         ipc_answer_0(rid, ENOMEM);
  616.         return;
  617.     }
  618.  
  619.     dev_phone = ipc_connect_me_to(PHONE_NS, SERVICE_DEVMAP,
  620.         DEVMAP_CONNECT_TO_DEVICE, dev_handle);
  621.  
  622.     if (dev_phone < 0) {
  623.         munmap(dev_buffer, BS_SIZE);
  624.         ipc_answer_0(rid, dev_phone);
  625.         return;
  626.     }
  627.  
  628.     rc = ipc_share_out_start(dev_phone, dev_buffer,
  629.         AS_AREA_READ | AS_AREA_WRITE);
  630.     if (rc != EOK) {
  631.             munmap(dev_buffer, BS_SIZE);
  632.         ipc_answer_0(rid, rc);
  633.         return;
  634.     }
  635.  
  636.     /* Read the number of root directory entries. */
  637.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  638.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  639.     rde = uint16_t_le2host(FAT_BS(bb)->root_ent_max);
  640.     block_put(bb);
  641.  
  642.     if (bps != BS_SIZE) {
  643.         munmap(dev_buffer, BS_SIZE);
  644.         ipc_answer_0(rid, ENOTSUP);
  645.         return;
  646.     }
  647.  
  648.     rc = fat_idx_init_by_dev_handle(dev_handle);
  649.     if (rc != EOK) {
  650.             munmap(dev_buffer, BS_SIZE);
  651.         ipc_answer_0(rid, rc);
  652.         return;
  653.     }
  654.  
  655.     /* Initialize the root node. */
  656.     fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t));
  657.     if (!rootp) {
  658.             munmap(dev_buffer, BS_SIZE);
  659.         fat_idx_fini_by_dev_handle(dev_handle);
  660.         ipc_answer_0(rid, ENOMEM);
  661.         return;
  662.     }
  663.     fat_node_initialize(rootp);
  664.  
  665.     fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0);
  666.     if (!ridxp) {
  667.             munmap(dev_buffer, BS_SIZE);
  668.         free(rootp);
  669.         fat_idx_fini_by_dev_handle(dev_handle);
  670.         ipc_answer_0(rid, ENOMEM);
  671.         return;
  672.     }
  673.     assert(ridxp->index == 0);
  674.     /* ridxp->lock held */
  675.  
  676.     rootp->type = FAT_DIRECTORY;
  677.     rootp->firstc = FAT_CLST_ROOT;
  678.     rootp->refcnt = 1;
  679.     rootp->lnkcnt = 0;  /* FS root is not linked */
  680.     rootp->size = rde * sizeof(fat_dentry_t);
  681.     rootp->idx = ridxp;
  682.     ridxp->nodep = rootp;
  683.    
  684.     futex_up(&ridxp->lock);
  685.  
  686.     ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);
  687. }
  688.  
  689. void fat_mount(ipc_callid_t rid, ipc_call_t *request)
  690. {
  691.     ipc_answer_0(rid, ENOTSUP);
  692. }
  693.  
  694. void fat_lookup(ipc_callid_t rid, ipc_call_t *request)
  695. {
  696.     libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request);
  697. }
  698.  
  699. void fat_read(ipc_callid_t rid, ipc_call_t *request)
  700. {
  701.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  702.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  703.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  704.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  705.     uint16_t bps = fat_bps_get(dev_handle);
  706.     size_t bytes;
  707.     block_t *b;
  708.  
  709.     if (!nodep) {
  710.         ipc_answer_0(rid, ENOENT);
  711.         return;
  712.     }
  713.  
  714.     ipc_callid_t callid;
  715.     size_t len;
  716.     if (!ipc_data_read_receive(&callid, &len)) {
  717.         fat_node_put(nodep);
  718.         ipc_answer_0(callid, EINVAL);
  719.         ipc_answer_0(rid, EINVAL);
  720.         return;
  721.     }
  722.  
  723.     if (nodep->type == FAT_FILE) {
  724.         /*
  725.          * Our strategy for regular file reads is to read one block at
  726.          * most and make use of the possibility to return less data than
  727.          * requested. This keeps the code very simple.
  728.          */
  729.         bytes = min(len, bps - pos % bps);
  730.         b = fat_block_get(nodep, pos / bps);
  731.         (void) ipc_data_read_finalize(callid, b->data + pos % bps,
  732.             bytes);
  733.         block_put(b);
  734.     } else {
  735.         unsigned bnum;
  736.         off_t spos = pos;
  737.         char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];
  738.         fat_dentry_t *d;
  739.  
  740.         assert(nodep->type == FAT_DIRECTORY);
  741.         assert(nodep->size % bps == 0);
  742.         assert(bps % sizeof(fat_dentry_t) == 0);
  743.  
  744.         /*
  745.          * Our strategy for readdir() is to use the position pointer as
  746.          * an index into the array of all dentries. On entry, it points
  747.          * to the first unread dentry. If we skip any dentries, we bump
  748.          * the position pointer accordingly.
  749.          */
  750.         bnum = (pos * sizeof(fat_dentry_t)) / bps;
  751.         while (bnum < nodep->size / bps) {
  752.             off_t o;
  753.  
  754.             b = fat_block_get(nodep, bnum);
  755.             for (o = pos % (bps / sizeof(fat_dentry_t));
  756.                 o < bps / sizeof(fat_dentry_t);
  757.                 o++, pos++) {
  758.                 d = ((fat_dentry_t *)b->data) + o;
  759.                 switch (fat_classify_dentry(d)) {
  760.                 case FAT_DENTRY_SKIP:
  761.                     continue;
  762.                 case FAT_DENTRY_LAST:
  763.                     block_put(b);
  764.                     goto miss;
  765.                 default:
  766.                 case FAT_DENTRY_VALID:
  767.                     dentry_name_canonify(d, name);
  768.                     block_put(b);
  769.                     goto hit;
  770.                 }
  771.             }
  772.             block_put(b);
  773.             bnum++;
  774.         }
  775. miss:
  776.         fat_node_put(nodep);
  777.         ipc_answer_0(callid, ENOENT);
  778.         ipc_answer_1(rid, ENOENT, 0);
  779.         return;
  780. hit:
  781.         (void) ipc_data_read_finalize(callid, name, strlen(name) + 1);
  782.         bytes = (pos - spos) + 1;
  783.     }
  784.  
  785.     fat_node_put(nodep);
  786.     ipc_answer_1(rid, EOK, (ipcarg_t)bytes);
  787. }
  788.  
  789. /** Fill the gap between EOF and a new file position.
  790.  *
  791.  * @param nodep     FAT node with the gap.
  792.  * @param mcl       First cluster in an independent cluster chain that will
  793.  *          be later appended to the end of the node's own cluster
  794.  *          chain. If pos is still in the last allocated cluster,
  795.  *          this argument is ignored.
  796.  * @param pos       Position in the last node block.
  797.  */
  798. static void
  799. fat_fill_gap(fat_node_t *nodep, fat_cluster_t mcl, off_t pos)
  800. {
  801.     uint16_t bps;
  802.     unsigned spc;
  803.     block_t *bb, *b;
  804.     off_t o, boundary;
  805.  
  806.     bb = block_get(nodep->idx->dev_handle, BS_BLOCK, BS_SIZE);
  807.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  808.     spc = FAT_BS(bb)->spc;
  809.     block_put(bb);
  810.    
  811.     boundary = ROUND_UP(nodep->size, bps * spc);
  812.  
  813.     /* zero out already allocated space */
  814.     for (o = nodep->size - 1; o < pos && o < boundary;
  815.         o = ALIGN_DOWN(o + bps, bps)) {
  816.         b = fat_block_get(nodep, o / bps);
  817.         memset(b->data + o % bps, 0, bps - o % bps);
  818.         b->dirty = true;        /* need to sync node */
  819.         block_put(b);
  820.     }
  821.    
  822.     if (o >= pos)
  823.         return;
  824.    
  825.     /* zero out the initial part of the new cluster chain */
  826.     for (o = boundary; o < pos; o += bps) {
  827.         b = _fat_block_get(nodep->idx->dev_handle, mcl,
  828.             (o - boundary) / bps);
  829.         memset(b->data, 0, min(bps, pos - o));
  830.         b->dirty = true;        /* need to sync node */
  831.         block_put(b);
  832.     }
  833. }
  834.  
  835. static void
  836. fat_mark_cluster(dev_handle_t dev_handle, unsigned fatno, fat_cluster_t clst,
  837.     fat_cluster_t value)
  838. {
  839.     /* TODO */
  840. }
  841.  
  842. static void
  843. fat_alloc_shadow_clusters(dev_handle_t dev_handle, fat_cluster_t *lifo,
  844.     unsigned nclsts)
  845. {
  846.     /* TODO */
  847. }
  848.  
  849. static int
  850. fat_alloc_clusters(dev_handle_t dev_handle, unsigned nclsts, fat_cluster_t *mcl,
  851.     fat_cluster_t *lcl)
  852. {
  853.     uint16_t bps;
  854.     uint16_t rscnt;
  855.     uint16_t sf;
  856.     block_t *bb, *blk;
  857.     fat_cluster_t *lifo;    /* stack for storing free cluster numbers */
  858.     unsigned found = 0; /* top of the free cluster number stack */
  859.     unsigned b, c, cl;
  860.  
  861.     lifo = (fat_cluster_t *) malloc(nclsts * sizeof(fat_cluster_t));
  862.     if (lifo)
  863.         return ENOMEM;
  864.    
  865.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  866.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  867.     rscnt = uint16_t_le2host(FAT_BS(bb)->rscnt);
  868.     sf = uint16_t_le2host(FAT_BS(bb)->sec_per_fat);
  869.     block_put(bb);
  870.    
  871.     /*
  872.      * Search FAT1 for unused clusters.
  873.      */
  874.     for (b = 0, cl = 0; b < sf; blk++) {
  875.         blk = block_get(dev_handle, rscnt + b, bps);
  876.         for (c = 0; c < bps / sizeof(fat_cluster_t); c++, cl++) {
  877.             fat_cluster_t *clst = (fat_cluster_t *)blk->data + c;
  878.             if (*clst == FAT_CLST_RES0) {
  879.                 /*
  880.                  * The cluster is free. Put it into our stack
  881.                  * of found clusters and mark it as non-free.
  882.                  */
  883.                 lifo[found] = cl;
  884.                 if (found == 0)
  885.                     *clst = FAT_CLST_LAST1;
  886.                 else
  887.                     *clst = lifo[found - 1];
  888.                 blk->dirty = true;  /* need to sync block */
  889.                 if (++found == nclsts) {
  890.                     /* we are almost done */
  891.                     block_put(blk);
  892.                     /* update the shadow copies of FAT */
  893.                     fat_alloc_shadow_clusters(dev_handle,
  894.                         lifo, nclsts);
  895.                     *mcl = lifo[found - 1];
  896.                     *lcl = lifo[0];
  897.                     free(lifo);
  898.                     return EOK;
  899.                 }
  900.             }
  901.         }
  902.         block_put(blk);
  903.     }
  904.  
  905.     /*
  906.      * We could not find enough clusters. Now we need to free the clusters
  907.      * we have allocated so far.
  908.      */
  909.     while (found--)
  910.         fat_mark_cluster(dev_handle, FAT1, lifo[found], FAT_CLST_RES0);
  911.    
  912.     free(lifo);
  913.     return ENOSPC;
  914. }
  915.  
  916. static void
  917. fat_append_clusters(fat_node_t *nodep, fat_cluster_t mcl)
  918. {
  919. }
  920.  
  921. void fat_write(ipc_callid_t rid, ipc_call_t *request)
  922. {
  923.     dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request);
  924.     fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request);
  925.     off_t pos = (off_t)IPC_GET_ARG3(*request);
  926.     fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index);
  927.     size_t bytes;
  928.     block_t *b, *bb;
  929.     uint16_t bps;
  930.     unsigned spc;
  931.     off_t boundary;
  932.    
  933.     if (!nodep) {
  934.         ipc_answer_0(rid, ENOENT);
  935.         return;
  936.     }
  937.    
  938.     /* XXX remove me when you are ready */
  939.     {
  940.         ipc_answer_0(rid, ENOTSUP);
  941.         fat_node_put(nodep);
  942.         return;
  943.     }
  944.  
  945.     ipc_callid_t callid;
  946.     size_t len;
  947.     if (!ipc_data_write_receive(&callid, &len)) {
  948.         fat_node_put(nodep);
  949.         ipc_answer_0(callid, EINVAL);
  950.         ipc_answer_0(rid, EINVAL);
  951.         return;
  952.     }
  953.  
  954.     /*
  955.      * In all scenarios, we will attempt to write out only one block worth
  956.      * of data at maximum. There might be some more efficient approaches,
  957.      * but this one greatly simplifies fat_write(). Note that we can afford
  958.      * to do this because the client must be ready to handle the return
  959.      * value signalizing a smaller number of bytes written.
  960.      */
  961.     bytes = min(len, bps - pos % bps);
  962.  
  963.     bb = block_get(dev_handle, BS_BLOCK, BS_SIZE);
  964.     bps = uint16_t_le2host(FAT_BS(bb)->bps);
  965.     spc = FAT_BS(bb)->spc;
  966.     block_put(bb);
  967.    
  968.     boundary = ROUND_UP(nodep->size, bps * spc);
  969.     if (pos < boundary) {
  970.         /*
  971.          * This is the easier case - we are either overwriting already
  972.          * existing contents or writing behind the EOF, but still within
  973.          * the limits of the last cluster. The node size may grow to the
  974.          * next block size boundary.
  975.          */
  976.         fat_fill_gap(nodep, FAT_CLST_RES0, pos);
  977.         b = fat_block_get(nodep, pos / bps);
  978.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  979.             bytes);
  980.         b->dirty = true;        /* need to sync block */
  981.         block_put(b);
  982.         if (pos + bytes > nodep->size) {
  983.             nodep->size = pos + bytes;
  984.             nodep->dirty = true;    /* need to sync node */
  985.         }
  986.         fat_node_put(nodep);
  987.         ipc_answer_1(rid, EOK, bytes); 
  988.         return;
  989.     } else {
  990.         /*
  991.          * This is the more difficult case. We must allocate new
  992.          * clusters for the node and zero them out.
  993.          */
  994.         int status;
  995.         unsigned nclsts;
  996.         fat_cluster_t mcl, lcl;
  997.    
  998.         nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) /
  999.             bps * spc;
  1000.         /* create an independent chain of nclsts clusters in all FATs */
  1001.         status = fat_alloc_clusters(dev_handle, nclsts, &mcl, &lcl);
  1002.         if (status != EOK) {
  1003.             /* could not allocate a chain of nclsts clusters */
  1004.             fat_node_put(nodep);
  1005.             ipc_answer_0(callid, status);
  1006.             ipc_answer_0(rid, status);
  1007.             return;
  1008.         }
  1009.         /* zero fill any gaps */
  1010.         fat_fill_gap(nodep, mcl, pos);
  1011.         b = _fat_block_get(dev_handle, lcl, (pos / bps) % spc);
  1012.         (void) ipc_data_write_finalize(callid, b->data + pos % bps,
  1013.             bytes);
  1014.         b->dirty = true;        /* need to sync block */
  1015.         block_put(b);
  1016.         /*
  1017.          * Append the cluster chain starting in mcl to the end of the
  1018.          * node's cluster chain.
  1019.          */
  1020.         fat_append_clusters(nodep, mcl);
  1021.         nodep->size = pos + bytes;
  1022.         nodep->dirty = true;        /* need to sync node */
  1023.         fat_node_put(nodep);
  1024.         ipc_answer_1(rid, EOK, bytes);
  1025.         return;
  1026.     }
  1027. }
  1028.  
  1029. /**
  1030.  * @}
  1031.  */
  1032.