Rev 3602 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2627 | jermar | 1 | /* |
2793 | jermar | 2 | * Copyright (c) 2008 Jakub Jermar |
2627 | jermar | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | /** @addtogroup fs |
||
30 | * @{ |
||
31 | */ |
||
32 | |||
33 | /** |
||
34 | * @file fat_ops.c |
||
35 | * @brief Implementation of VFS operations for the FAT file system server. |
||
36 | */ |
||
37 | |||
38 | #include "fat.h" |
||
3593 | rimsky | 39 | #include "fat_dentry.h" |
40 | #include "fat_fat.h" |
||
2638 | jermar | 41 | #include "../../vfs/vfs.h" |
2793 | jermar | 42 | #include <libfs.h> |
3593 | rimsky | 43 | #include <libblock.h> |
2627 | jermar | 44 | #include <ipc/ipc.h> |
3257 | jermar | 45 | #include <ipc/services.h> |
46 | #include <ipc/devmap.h> |
||
2627 | jermar | 47 | #include <async.h> |
48 | #include <errno.h> |
||
2793 | jermar | 49 | #include <string.h> |
2798 | jermar | 50 | #include <byteorder.h> |
2831 | jermar | 51 | #include <libadt/hash_table.h> |
52 | #include <libadt/list.h> |
||
53 | #include <assert.h> |
||
2856 | jermar | 54 | #include <futex.h> |
3257 | jermar | 55 | #include <sys/mman.h> |
3593 | rimsky | 56 | #include <align.h> |
2627 | jermar | 57 | |
2951 | jermar | 58 | /** Futex protecting the list of cached free FAT nodes. */ |
59 | static futex_t ffn_futex = FUTEX_INITIALIZER; |
||
2843 | jermar | 60 | |
2951 | jermar | 61 | /** List of cached free FAT nodes. */ |
62 | static LIST_INITIALIZE(ffn_head); |
||
63 | |||
2831 | jermar | 64 | static void fat_node_initialize(fat_node_t *node) |
2793 | jermar | 65 | { |
2951 | jermar | 66 | futex_initialize(&node->lock, 1); |
2864 | jermar | 67 | node->idx = NULL; |
2831 | jermar | 68 | node->type = 0; |
69 | link_initialize(&node->ffn_link); |
||
70 | node->size = 0; |
||
71 | node->lnkcnt = 0; |
||
72 | node->refcnt = 0; |
||
73 | node->dirty = false; |
||
2793 | jermar | 74 | } |
75 | |||
3593 | rimsky | 76 | static void fat_node_sync(fat_node_t *node) |
2843 | jermar | 77 | { |
3593 | rimsky | 78 | block_t *b; |
79 | fat_bs_t *bs; |
||
80 | fat_dentry_t *d; |
||
2843 | jermar | 81 | uint16_t bps; |
3593 | rimsky | 82 | unsigned dps; |
2843 | jermar | 83 | |
3593 | rimsky | 84 | assert(node->dirty); |
2843 | jermar | 85 | |
3593 | rimsky | 86 | bs = block_bb_get(node->idx->dev_handle); |
87 | bps = uint16_t_le2host(bs->bps); |
||
88 | dps = bps / sizeof(fat_dentry_t); |
||
89 | |||
90 | /* Read the block that contains the dentry of interest. */ |
||
91 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
||
3602 | rimsky | 92 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
2843 | jermar | 93 | |
3593 | rimsky | 94 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
2845 | jermar | 95 | |
3593 | rimsky | 96 | d->firstc = host2uint16_t_le(node->firstc); |
3665 | rimsky | 97 | if (node->type == FAT_FILE) { |
3593 | rimsky | 98 | d->size = host2uint32_t_le(node->size); |
3665 | rimsky | 99 | } else if (node->type == FAT_DIRECTORY) { |
100 | d->attr = FAT_ATTR_SUBDIR; |
||
101 | } |
||
3593 | rimsky | 102 | |
3665 | rimsky | 103 | /* TODO: update other fields? (e.g time fields) */ |
104 | |||
3593 | rimsky | 105 | b->dirty = true; /* need to sync block */ |
106 | block_put(b); |
||
2845 | jermar | 107 | } |
108 | |||
3593 | rimsky | 109 | static fat_node_t *fat_node_get_new(void) |
2831 | jermar | 110 | { |
3593 | rimsky | 111 | fat_node_t *nodep; |
112 | |||
113 | futex_down(&ffn_futex); |
||
114 | if (!list_empty(&ffn_head)) { |
||
115 | /* Try to use a cached free node structure. */ |
||
116 | fat_idx_t *idxp_tmp; |
||
117 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
||
118 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
||
119 | goto skip_cache; |
||
120 | idxp_tmp = nodep->idx; |
||
121 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
||
122 | futex_up(&nodep->lock); |
||
123 | goto skip_cache; |
||
124 | } |
||
125 | list_remove(&nodep->ffn_link); |
||
126 | futex_up(&ffn_futex); |
||
127 | if (nodep->dirty) |
||
128 | fat_node_sync(nodep); |
||
129 | idxp_tmp->nodep = NULL; |
||
130 | futex_up(&nodep->lock); |
||
131 | futex_up(&idxp_tmp->lock); |
||
132 | } else { |
||
133 | skip_cache: |
||
134 | /* Try to allocate a new node structure. */ |
||
135 | futex_up(&ffn_futex); |
||
136 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
||
137 | if (!nodep) |
||
138 | return NULL; |
||
139 | } |
||
140 | fat_node_initialize(nodep); |
||
141 | |||
142 | return nodep; |
||
2831 | jermar | 143 | } |
144 | |||
2951 | jermar | 145 | /** Internal version of fat_node_get(). |
146 | * |
||
147 | * @param idxp Locked index structure. |
||
148 | */ |
||
149 | static void *fat_node_get_core(fat_idx_t *idxp) |
||
2831 | jermar | 150 | { |
2891 | jermar | 151 | block_t *b; |
3593 | rimsky | 152 | fat_bs_t *bs; |
2891 | jermar | 153 | fat_dentry_t *d; |
3312 | jermar | 154 | fat_node_t *nodep = NULL; |
2891 | jermar | 155 | unsigned bps; |
3593 | rimsky | 156 | unsigned spc; |
2891 | jermar | 157 | unsigned dps; |
158 | |||
2951 | jermar | 159 | if (idxp->nodep) { |
2891 | jermar | 160 | /* |
161 | * We are lucky. |
||
162 | * The node is already instantiated in memory. |
||
163 | */ |
||
2951 | jermar | 164 | futex_down(&idxp->nodep->lock); |
165 | if (!idxp->nodep->refcnt++) |
||
3312 | jermar | 166 | list_remove(&idxp->nodep->ffn_link); |
2951 | jermar | 167 | futex_up(&idxp->nodep->lock); |
168 | return idxp->nodep; |
||
2891 | jermar | 169 | } |
170 | |||
171 | /* |
||
172 | * We must instantiate the node from the file system. |
||
173 | */ |
||
174 | |||
2951 | jermar | 175 | assert(idxp->pfc); |
2891 | jermar | 176 | |
3593 | rimsky | 177 | nodep = fat_node_get_new(); |
178 | if (!nodep) |
||
179 | return NULL; |
||
2891 | jermar | 180 | |
3593 | rimsky | 181 | bs = block_bb_get(idxp->dev_handle); |
182 | bps = uint16_t_le2host(bs->bps); |
||
183 | spc = bs->spc; |
||
2891 | jermar | 184 | dps = bps / sizeof(fat_dentry_t); |
185 | |||
2893 | jermar | 186 | /* Read the block that contains the dentry of interest. */ |
3593 | rimsky | 187 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
3602 | rimsky | 188 | (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
2891 | jermar | 189 | assert(b); |
190 | |||
2951 | jermar | 191 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
2893 | jermar | 192 | if (d->attr & FAT_ATTR_SUBDIR) { |
193 | /* |
||
194 | * The only directory which does not have this bit set is the |
||
195 | * root directory itself. The root directory node is handled |
||
196 | * and initialized elsewhere. |
||
197 | */ |
||
198 | nodep->type = FAT_DIRECTORY; |
||
3282 | jermar | 199 | /* |
3325 | jermar | 200 | * Unfortunately, the 'size' field of the FAT dentry is not |
201 | * defined for the directory entry type. We must determine the |
||
202 | * size of the directory by walking the FAT. |
||
3282 | jermar | 203 | */ |
3593 | rimsky | 204 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
3325 | jermar | 205 | uint16_t_le2host(d->firstc)); |
2893 | jermar | 206 | } else { |
207 | nodep->type = FAT_FILE; |
||
3282 | jermar | 208 | nodep->size = uint32_t_le2host(d->size); |
2893 | jermar | 209 | } |
210 | nodep->firstc = uint16_t_le2host(d->firstc); |
||
211 | nodep->lnkcnt = 1; |
||
212 | nodep->refcnt = 1; |
||
213 | |||
214 | block_put(b); |
||
215 | |||
216 | /* Link the idx structure with the node structure. */ |
||
2951 | jermar | 217 | nodep->idx = idxp; |
218 | idxp->nodep = nodep; |
||
2893 | jermar | 219 | |
220 | return nodep; |
||
2831 | jermar | 221 | } |
222 | |||
3665 | rimsky | 223 | /* |
224 | * Forward declarations of FAT libfs operations. |
||
225 | */ |
||
226 | static void *fat_node_get(dev_handle_t, fs_index_t); |
||
227 | static void fat_node_put(void *); |
||
228 | static void *fat_create_node(dev_handle_t, int); |
||
229 | static int fat_destroy_node(void *); |
||
230 | static int fat_link(void *, void *, const char *); |
||
231 | static int fat_unlink(void *, void *); |
||
232 | static void *fat_match(void *, const char *); |
||
233 | static fs_index_t fat_index_get(void *); |
||
234 | static size_t fat_size_get(void *); |
||
235 | static unsigned fat_lnkcnt_get(void *); |
||
236 | static bool fat_has_children(void *); |
||
237 | static void *fat_root_get(dev_handle_t); |
||
238 | static char fat_plb_get_char(unsigned); |
||
239 | static bool fat_is_directory(void *); |
||
240 | static bool fat_is_file(void *node); |
||
241 | |||
242 | /* |
||
243 | * FAT libfs operations. |
||
244 | */ |
||
245 | |||
2951 | jermar | 246 | /** Instantiate a FAT in-core node. */ |
3665 | rimsky | 247 | void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
2951 | jermar | 248 | { |
249 | void *node; |
||
250 | fat_idx_t *idxp; |
||
251 | |||
252 | idxp = fat_idx_get_by_index(dev_handle, index); |
||
253 | if (!idxp) |
||
254 | return NULL; |
||
255 | /* idxp->lock held */ |
||
256 | node = fat_node_get_core(idxp); |
||
257 | futex_up(&idxp->lock); |
||
258 | return node; |
||
259 | } |
||
260 | |||
3665 | rimsky | 261 | void fat_node_put(void *node) |
2852 | jermar | 262 | { |
2910 | jermar | 263 | fat_node_t *nodep = (fat_node_t *)node; |
3665 | rimsky | 264 | bool destroy = false; |
2910 | jermar | 265 | |
2951 | jermar | 266 | futex_down(&nodep->lock); |
2910 | jermar | 267 | if (!--nodep->refcnt) { |
3665 | rimsky | 268 | if (nodep->idx) { |
269 | futex_down(&ffn_futex); |
||
270 | list_append(&nodep->ffn_link, &ffn_head); |
||
271 | futex_up(&ffn_futex); |
||
272 | } else { |
||
273 | /* |
||
274 | * The node does not have any index structure associated |
||
275 | * with itself. This can only mean that we are releasing |
||
276 | * the node after a failed attempt to allocate the index |
||
277 | * structure for it. |
||
278 | */ |
||
279 | destroy = true; |
||
280 | } |
||
2910 | jermar | 281 | } |
2951 | jermar | 282 | futex_up(&nodep->lock); |
3665 | rimsky | 283 | if (destroy) |
284 | free(node); |
||
2852 | jermar | 285 | } |
286 | |||
3665 | rimsky | 287 | void *fat_create_node(dev_handle_t dev_handle, int flags) |
2857 | jermar | 288 | { |
3665 | rimsky | 289 | fat_idx_t *idxp; |
290 | fat_node_t *nodep; |
||
291 | fat_bs_t *bs; |
||
292 | fat_cluster_t mcl, lcl; |
||
293 | uint16_t bps; |
||
294 | int rc; |
||
295 | |||
296 | bs = block_bb_get(dev_handle); |
||
297 | bps = uint16_t_le2host(bs->bps); |
||
298 | if (flags & L_DIRECTORY) { |
||
299 | /* allocate a cluster */ |
||
300 | rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl); |
||
301 | if (rc != EOK) |
||
302 | return NULL; |
||
303 | } |
||
304 | |||
305 | nodep = fat_node_get_new(); |
||
306 | if (!nodep) { |
||
307 | fat_free_clusters(bs, dev_handle, mcl); |
||
308 | return NULL; |
||
309 | } |
||
310 | idxp = fat_idx_get_new(dev_handle); |
||
311 | if (!idxp) { |
||
312 | fat_free_clusters(bs, dev_handle, mcl); |
||
313 | fat_node_put(nodep); |
||
314 | return NULL; |
||
315 | } |
||
316 | /* idxp->lock held */ |
||
317 | if (flags & L_DIRECTORY) { |
||
318 | int i; |
||
319 | block_t *b; |
||
320 | |||
321 | /* |
||
322 | * Populate the new cluster with unused dentries. |
||
323 | */ |
||
324 | for (i = 0; i < bs->spc; i++) { |
||
325 | b = _fat_block_get(bs, dev_handle, mcl, i, |
||
326 | BLOCK_FLAGS_NOREAD); |
||
327 | /* mark all dentries as never-used */ |
||
328 | memset(b->data, 0, bps); |
||
329 | b->dirty = false; |
||
330 | block_put(b); |
||
331 | } |
||
332 | nodep->type = FAT_DIRECTORY; |
||
333 | nodep->firstc = mcl; |
||
334 | nodep->size = bps * bs->spc; |
||
335 | } else { |
||
336 | nodep->type = FAT_FILE; |
||
337 | nodep->firstc = FAT_CLST_RES0; |
||
338 | nodep->size = 0; |
||
339 | } |
||
340 | nodep->lnkcnt = 0; /* not linked anywhere */ |
||
341 | nodep->refcnt = 1; |
||
342 | nodep->dirty = true; |
||
343 | |||
344 | nodep->idx = idxp; |
||
345 | idxp->nodep = nodep; |
||
346 | |||
347 | futex_up(&idxp->lock); |
||
348 | return nodep; |
||
2857 | jermar | 349 | } |
350 | |||
3665 | rimsky | 351 | int fat_destroy_node(void *node) |
2857 | jermar | 352 | { |
3665 | rimsky | 353 | fat_node_t *nodep = (fat_node_t *)node; |
354 | fat_bs_t *bs; |
||
355 | |||
356 | /* |
||
357 | * The node is not reachable from the file system. This means that the |
||
358 | * link count should be zero and that the index structure cannot be |
||
359 | * found in the position hash. Obviously, we don't need to lock the node |
||
360 | * nor its index structure. |
||
361 | */ |
||
362 | assert(nodep->lnkcnt == 0); |
||
363 | |||
364 | /* |
||
365 | * The node may not have any children. |
||
366 | */ |
||
367 | assert(fat_has_children(node) == false); |
||
368 | |||
369 | bs = block_bb_get(nodep->idx->dev_handle); |
||
370 | if (nodep->firstc != FAT_CLST_RES0) { |
||
371 | assert(nodep->size); |
||
372 | /* Free all clusters allocated to the node. */ |
||
373 | fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc); |
||
374 | } |
||
375 | |||
376 | fat_idx_destroy(nodep->idx); |
||
377 | free(nodep); |
||
378 | return EOK; |
||
2857 | jermar | 379 | } |
380 | |||
3665 | rimsky | 381 | int fat_link(void *prnt, void *chld, const char *name) |
2857 | jermar | 382 | { |
3665 | rimsky | 383 | fat_node_t *parentp = (fat_node_t *)prnt; |
384 | fat_node_t *childp = (fat_node_t *)chld; |
||
385 | fat_dentry_t *d; |
||
386 | fat_bs_t *bs; |
||
387 | block_t *b; |
||
388 | int i, j; |
||
389 | uint16_t bps; |
||
390 | unsigned dps; |
||
391 | unsigned blocks; |
||
392 | |||
393 | futex_down(&childp->lock); |
||
394 | if (childp->lnkcnt == 1) { |
||
395 | /* |
||
396 | * On FAT, we don't support multiple hard links. |
||
397 | */ |
||
398 | futex_up(&childp->lock); |
||
399 | return EMLINK; |
||
400 | } |
||
401 | assert(childp->lnkcnt == 0); |
||
402 | futex_up(&childp->lock); |
||
403 | |||
404 | if (!fat_dentry_name_verify(name)) { |
||
405 | /* |
||
406 | * Attempt to create unsupported name. |
||
407 | */ |
||
408 | return ENOTSUP; |
||
409 | } |
||
410 | |||
411 | /* |
||
412 | * Get us an unused parent node's dentry or grow the parent and allocate |
||
413 | * a new one. |
||
414 | */ |
||
415 | |||
416 | futex_down(&parentp->idx->lock); |
||
417 | bs = block_bb_get(parentp->idx->dev_handle); |
||
418 | bps = uint16_t_le2host(bs->bps); |
||
419 | dps = bps / sizeof(fat_dentry_t); |
||
420 | |||
421 | blocks = parentp->size / bps; |
||
422 | |||
423 | for (i = 0; i < blocks; i++) { |
||
424 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
||
425 | for (j = 0; j < dps; j++) { |
||
426 | d = ((fat_dentry_t *)b->data) + j; |
||
427 | switch (fat_classify_dentry(d)) { |
||
428 | case FAT_DENTRY_SKIP: |
||
429 | case FAT_DENTRY_VALID: |
||
430 | /* skipping used and meta entries */ |
||
431 | continue; |
||
432 | case FAT_DENTRY_FREE: |
||
433 | case FAT_DENTRY_LAST: |
||
434 | /* found an empty slot */ |
||
435 | goto hit; |
||
436 | } |
||
437 | } |
||
438 | block_put(b); |
||
439 | } |
||
440 | |||
441 | /* |
||
442 | * We need to grow the parent in order to create a new unused dentry. |
||
443 | */ |
||
444 | futex_up(&parentp->idx->lock); |
||
445 | return ENOTSUP; /* XXX */ |
||
446 | |||
447 | hit: |
||
448 | /* |
||
449 | * At this point we only establish the link between the parent and the |
||
450 | * child. The dentry, except of the name and the extension, will remain |
||
451 | * uninitialized until the the corresponding node is synced. Thus the |
||
452 | * valid dentry data is kept in the child node structure. |
||
453 | */ |
||
454 | memset(d, 0, sizeof(fat_dentry_t)); |
||
455 | fat_dentry_name_set(d, name); |
||
456 | b->dirty = true; /* need to sync block */ |
||
457 | block_put(b); |
||
458 | futex_up(&parentp->idx->lock); |
||
459 | |||
460 | futex_down(&childp->idx->lock); |
||
461 | |||
462 | /* |
||
463 | * If possible, create the Sub-directory Identifier Entry and the |
||
464 | * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries |
||
465 | * are not mandatory according to Standard ECMA-107 and HelenOS VFS does |
||
466 | * not use them anyway, so this is rather a sign of our good will. |
||
467 | */ |
||
468 | b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE); |
||
469 | d = (fat_dentry_t *)b->data; |
||
470 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
||
471 | strcmp(d->name, FAT_NAME_DOT) == 0) { |
||
472 | memset(d, 0, sizeof(fat_dentry_t)); |
||
473 | strcpy(d->name, FAT_NAME_DOT); |
||
474 | strcpy(d->ext, FAT_EXT_PAD); |
||
475 | d->attr = FAT_ATTR_SUBDIR; |
||
476 | d->firstc = host2uint16_t_le(childp->firstc); |
||
477 | /* TODO: initialize also the date/time members. */ |
||
478 | } |
||
479 | d++; |
||
480 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
||
481 | strcmp(d->name, FAT_NAME_DOT_DOT) == 0) { |
||
482 | memset(d, 0, sizeof(fat_dentry_t)); |
||
483 | strcpy(d->name, FAT_NAME_DOT_DOT); |
||
484 | strcpy(d->ext, FAT_EXT_PAD); |
||
485 | d->attr = FAT_ATTR_SUBDIR; |
||
486 | d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? |
||
487 | host2uint16_t_le(FAT_CLST_RES0) : |
||
488 | host2uint16_t_le(parentp->firstc); |
||
489 | /* TODO: initialize also the date/time members. */ |
||
490 | } |
||
491 | b->dirty = true; /* need to sync block */ |
||
492 | block_put(b); |
||
493 | |||
494 | childp->idx->pfc = parentp->firstc; |
||
495 | childp->idx->pdi = i * dps + j; |
||
496 | futex_up(&childp->idx->lock); |
||
497 | |||
498 | futex_down(&childp->lock); |
||
499 | childp->lnkcnt = 1; |
||
500 | childp->dirty = true; /* need to sync node */ |
||
501 | futex_up(&childp->lock); |
||
502 | |||
503 | /* |
||
504 | * Hash in the index structure into the position hash. |
||
505 | */ |
||
506 | fat_idx_hashin(childp->idx); |
||
507 | |||
508 | return EOK; |
||
2857 | jermar | 509 | } |
510 | |||
3665 | rimsky | 511 | int fat_unlink(void *prnt, void *chld) |
2857 | jermar | 512 | { |
3665 | rimsky | 513 | fat_node_t *parentp = (fat_node_t *)prnt; |
514 | fat_node_t *childp = (fat_node_t *)chld; |
||
515 | fat_bs_t *bs; |
||
516 | fat_dentry_t *d; |
||
517 | uint16_t bps; |
||
518 | block_t *b; |
||
519 | |||
520 | futex_down(&parentp->lock); |
||
521 | futex_down(&childp->lock); |
||
522 | assert(childp->lnkcnt == 1); |
||
523 | futex_down(&childp->idx->lock); |
||
524 | bs = block_bb_get(childp->idx->dev_handle); |
||
525 | bps = uint16_t_le2host(bs->bps); |
||
526 | |||
527 | b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc, |
||
528 | (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, |
||
529 | BLOCK_FLAGS_NONE); |
||
530 | d = (fat_dentry_t *)b->data + |
||
531 | (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); |
||
532 | /* mark the dentry as not-currently-used */ |
||
533 | d->name[0] = FAT_DENTRY_ERASED; |
||
534 | b->dirty = true; /* need to sync block */ |
||
535 | block_put(b); |
||
536 | |||
537 | /* remove the index structure from the position hash */ |
||
538 | fat_idx_hashout(childp->idx); |
||
539 | /* clear position information */ |
||
540 | childp->idx->pfc = FAT_CLST_RES0; |
||
541 | childp->idx->pdi = 0; |
||
542 | futex_up(&childp->idx->lock); |
||
543 | childp->lnkcnt = 0; |
||
544 | childp->dirty = true; |
||
545 | futex_up(&childp->lock); |
||
546 | futex_up(&parentp->lock); |
||
547 | |||
548 | return EOK; |
||
2857 | jermar | 549 | } |
550 | |||
3665 | rimsky | 551 | void *fat_match(void *prnt, const char *component) |
2793 | jermar | 552 | { |
3593 | rimsky | 553 | fat_bs_t *bs; |
2793 | jermar | 554 | fat_node_t *parentp = (fat_node_t *)prnt; |
555 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
||
2822 | jermar | 556 | unsigned i, j; |
2828 | jermar | 557 | unsigned bps; /* bytes per sector */ |
2822 | jermar | 558 | unsigned dps; /* dentries per sector */ |
559 | unsigned blocks; |
||
2793 | jermar | 560 | fat_dentry_t *d; |
2822 | jermar | 561 | block_t *b; |
2793 | jermar | 562 | |
2953 | jermar | 563 | futex_down(&parentp->idx->lock); |
3593 | rimsky | 564 | bs = block_bb_get(parentp->idx->dev_handle); |
565 | bps = uint16_t_le2host(bs->bps); |
||
2828 | jermar | 566 | dps = bps / sizeof(fat_dentry_t); |
3593 | rimsky | 567 | blocks = parentp->size / bps; |
2822 | jermar | 568 | for (i = 0; i < blocks; i++) { |
3602 | rimsky | 569 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
3593 | rimsky | 570 | for (j = 0; j < dps; j++) { |
2822 | jermar | 571 | d = ((fat_dentry_t *)b->data) + j; |
2845 | jermar | 572 | switch (fat_classify_dentry(d)) { |
573 | case FAT_DENTRY_SKIP: |
||
3665 | rimsky | 574 | case FAT_DENTRY_FREE: |
2822 | jermar | 575 | continue; |
2845 | jermar | 576 | case FAT_DENTRY_LAST: |
2822 | jermar | 577 | block_put(b); |
2953 | jermar | 578 | futex_up(&parentp->idx->lock); |
2822 | jermar | 579 | return NULL; |
2845 | jermar | 580 | default: |
581 | case FAT_DENTRY_VALID: |
||
3665 | rimsky | 582 | fat_dentry_name_get(d, name); |
2845 | jermar | 583 | break; |
2822 | jermar | 584 | } |
3665 | rimsky | 585 | if (fat_dentry_namecmp(name, component) == 0) { |
2822 | jermar | 586 | /* hit */ |
2951 | jermar | 587 | void *node; |
2953 | jermar | 588 | /* |
589 | * Assume tree hierarchy for locking. We |
||
590 | * already have the parent and now we are going |
||
591 | * to lock the child. Never lock in the oposite |
||
592 | * order. |
||
593 | */ |
||
2890 | jermar | 594 | fat_idx_t *idx = fat_idx_get_by_pos( |
2881 | jermar | 595 | parentp->idx->dev_handle, parentp->firstc, |
2864 | jermar | 596 | i * dps + j); |
2953 | jermar | 597 | futex_up(&parentp->idx->lock); |
2890 | jermar | 598 | if (!idx) { |
599 | /* |
||
600 | * Can happen if memory is low or if we |
||
601 | * run out of 32-bit indices. |
||
602 | */ |
||
603 | block_put(b); |
||
604 | return NULL; |
||
605 | } |
||
2951 | jermar | 606 | node = fat_node_get_core(idx); |
607 | futex_up(&idx->lock); |
||
2822 | jermar | 608 | block_put(b); |
609 | return node; |
||
610 | } |
||
2793 | jermar | 611 | } |
2822 | jermar | 612 | block_put(b); |
2639 | jermar | 613 | } |
3593 | rimsky | 614 | |
2953 | jermar | 615 | futex_up(&parentp->idx->lock); |
2793 | jermar | 616 | return NULL; |
2638 | jermar | 617 | } |
618 | |||
3665 | rimsky | 619 | fs_index_t fat_index_get(void *node) |
2831 | jermar | 620 | { |
621 | fat_node_t *fnodep = (fat_node_t *)node; |
||
622 | if (!fnodep) |
||
623 | return 0; |
||
2864 | jermar | 624 | return fnodep->idx->index; |
2831 | jermar | 625 | } |
626 | |||
3665 | rimsky | 627 | size_t fat_size_get(void *node) |
2831 | jermar | 628 | { |
629 | return ((fat_node_t *)node)->size; |
||
630 | } |
||
631 | |||
3665 | rimsky | 632 | unsigned fat_lnkcnt_get(void *node) |
2831 | jermar | 633 | { |
634 | return ((fat_node_t *)node)->lnkcnt; |
||
635 | } |
||
636 | |||
3665 | rimsky | 637 | bool fat_has_children(void *node) |
2845 | jermar | 638 | { |
3593 | rimsky | 639 | fat_bs_t *bs; |
2845 | jermar | 640 | fat_node_t *nodep = (fat_node_t *)node; |
641 | unsigned bps; |
||
642 | unsigned dps; |
||
643 | unsigned blocks; |
||
644 | block_t *b; |
||
645 | unsigned i, j; |
||
646 | |||
647 | if (nodep->type != FAT_DIRECTORY) |
||
648 | return false; |
||
3593 | rimsky | 649 | |
2951 | jermar | 650 | futex_down(&nodep->idx->lock); |
3593 | rimsky | 651 | bs = block_bb_get(nodep->idx->dev_handle); |
652 | bps = uint16_t_le2host(bs->bps); |
||
2845 | jermar | 653 | dps = bps / sizeof(fat_dentry_t); |
654 | |||
3593 | rimsky | 655 | blocks = nodep->size / bps; |
2845 | jermar | 656 | |
657 | for (i = 0; i < blocks; i++) { |
||
658 | fat_dentry_t *d; |
||
659 | |||
3602 | rimsky | 660 | b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); |
3593 | rimsky | 661 | for (j = 0; j < dps; j++) { |
2845 | jermar | 662 | d = ((fat_dentry_t *)b->data) + j; |
663 | switch (fat_classify_dentry(d)) { |
||
664 | case FAT_DENTRY_SKIP: |
||
3665 | rimsky | 665 | case FAT_DENTRY_FREE: |
2845 | jermar | 666 | continue; |
667 | case FAT_DENTRY_LAST: |
||
668 | block_put(b); |
||
2951 | jermar | 669 | futex_up(&nodep->idx->lock); |
2845 | jermar | 670 | return false; |
671 | default: |
||
672 | case FAT_DENTRY_VALID: |
||
673 | block_put(b); |
||
2951 | jermar | 674 | futex_up(&nodep->idx->lock); |
2845 | jermar | 675 | return true; |
676 | } |
||
677 | block_put(b); |
||
2951 | jermar | 678 | futex_up(&nodep->idx->lock); |
2845 | jermar | 679 | return true; |
680 | } |
||
681 | block_put(b); |
||
682 | } |
||
683 | |||
2951 | jermar | 684 | futex_up(&nodep->idx->lock); |
2845 | jermar | 685 | return false; |
686 | } |
||
687 | |||
3665 | rimsky | 688 | void *fat_root_get(dev_handle_t dev_handle) |
2844 | jermar | 689 | { |
3119 | jermar | 690 | return fat_node_get(dev_handle, 0); |
2844 | jermar | 691 | } |
692 | |||
3665 | rimsky | 693 | char fat_plb_get_char(unsigned pos) |
2844 | jermar | 694 | { |
695 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
||
696 | } |
||
697 | |||
3665 | rimsky | 698 | bool fat_is_directory(void *node) |
2831 | jermar | 699 | { |
700 | return ((fat_node_t *)node)->type == FAT_DIRECTORY; |
||
701 | } |
||
702 | |||
3665 | rimsky | 703 | bool fat_is_file(void *node) |
2831 | jermar | 704 | { |
705 | return ((fat_node_t *)node)->type == FAT_FILE; |
||
706 | } |
||
707 | |||
2793 | jermar | 708 | /** libfs operations */ |
709 | libfs_ops_t fat_libfs_ops = { |
||
710 | .match = fat_match, |
||
711 | .node_get = fat_node_get, |
||
2852 | jermar | 712 | .node_put = fat_node_put, |
3665 | rimsky | 713 | .create = fat_create_node, |
714 | .destroy = fat_destroy_node, |
||
2857 | jermar | 715 | .link = fat_link, |
716 | .unlink = fat_unlink, |
||
2831 | jermar | 717 | .index_get = fat_index_get, |
718 | .size_get = fat_size_get, |
||
719 | .lnkcnt_get = fat_lnkcnt_get, |
||
2845 | jermar | 720 | .has_children = fat_has_children, |
2844 | jermar | 721 | .root_get = fat_root_get, |
722 | .plb_get_char = fat_plb_get_char, |
||
2831 | jermar | 723 | .is_directory = fat_is_directory, |
724 | .is_file = fat_is_file |
||
2793 | jermar | 725 | }; |
726 | |||
3665 | rimsky | 727 | /* |
728 | * VFS operations. |
||
729 | */ |
||
730 | |||
3110 | jermar | 731 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
732 | { |
||
733 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
||
3593 | rimsky | 734 | fat_bs_t *bs; |
3257 | jermar | 735 | uint16_t bps; |
3119 | jermar | 736 | uint16_t rde; |
3110 | jermar | 737 | int rc; |
738 | |||
3593 | rimsky | 739 | /* initialize libblock */ |
740 | rc = block_init(dev_handle, BS_SIZE); |
||
741 | if (rc != EOK) { |
||
742 | ipc_answer_0(rid, rc); |
||
3257 | jermar | 743 | return; |
744 | } |
||
745 | |||
3593 | rimsky | 746 | /* prepare the boot block */ |
747 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
||
3257 | jermar | 748 | if (rc != EOK) { |
3593 | rimsky | 749 | block_fini(dev_handle); |
3257 | jermar | 750 | ipc_answer_0(rid, rc); |
751 | return; |
||
752 | } |
||
753 | |||
3593 | rimsky | 754 | /* get the buffer with the boot sector */ |
755 | bs = block_bb_get(dev_handle); |
||
756 | |||
3119 | jermar | 757 | /* Read the number of root directory entries. */ |
3593 | rimsky | 758 | bps = uint16_t_le2host(bs->bps); |
759 | rde = uint16_t_le2host(bs->root_ent_max); |
||
3119 | jermar | 760 | |
3257 | jermar | 761 | if (bps != BS_SIZE) { |
3593 | rimsky | 762 | block_fini(dev_handle); |
3257 | jermar | 763 | ipc_answer_0(rid, ENOTSUP); |
764 | return; |
||
765 | } |
||
766 | |||
3593 | rimsky | 767 | /* Initialize the block cache */ |
768 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */); |
||
769 | if (rc != EOK) { |
||
770 | block_fini(dev_handle); |
||
771 | ipc_answer_0(rid, rc); |
||
772 | return; |
||
773 | } |
||
774 | |||
3110 | jermar | 775 | rc = fat_idx_init_by_dev_handle(dev_handle); |
776 | if (rc != EOK) { |
||
3593 | rimsky | 777 | block_fini(dev_handle); |
3110 | jermar | 778 | ipc_answer_0(rid, rc); |
779 | return; |
||
780 | } |
||
781 | |||
3119 | jermar | 782 | /* Initialize the root node. */ |
783 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
||
784 | if (!rootp) { |
||
3593 | rimsky | 785 | block_fini(dev_handle); |
3119 | jermar | 786 | fat_idx_fini_by_dev_handle(dev_handle); |
787 | ipc_answer_0(rid, ENOMEM); |
||
788 | return; |
||
789 | } |
||
790 | fat_node_initialize(rootp); |
||
791 | |||
792 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
||
793 | if (!ridxp) { |
||
3593 | rimsky | 794 | block_fini(dev_handle); |
3119 | jermar | 795 | free(rootp); |
796 | fat_idx_fini_by_dev_handle(dev_handle); |
||
797 | ipc_answer_0(rid, ENOMEM); |
||
798 | return; |
||
799 | } |
||
800 | assert(ridxp->index == 0); |
||
801 | /* ridxp->lock held */ |
||
802 | |||
803 | rootp->type = FAT_DIRECTORY; |
||
804 | rootp->firstc = FAT_CLST_ROOT; |
||
805 | rootp->refcnt = 1; |
||
3397 | rimsky | 806 | rootp->lnkcnt = 0; /* FS root is not linked */ |
3119 | jermar | 807 | rootp->size = rde * sizeof(fat_dentry_t); |
808 | rootp->idx = ridxp; |
||
809 | ridxp->nodep = rootp; |
||
810 | |||
811 | futex_up(&ridxp->lock); |
||
812 | |||
3397 | rimsky | 813 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
3110 | jermar | 814 | } |
815 | |||
816 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
||
817 | { |
||
818 | ipc_answer_0(rid, ENOTSUP); |
||
819 | } |
||
820 | |||
2627 | jermar | 821 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
822 | { |
||
2793 | jermar | 823 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
2627 | jermar | 824 | } |
825 | |||
3307 | jermar | 826 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
827 | { |
||
828 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
||
829 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
||
830 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
||
831 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
||
3593 | rimsky | 832 | fat_bs_t *bs; |
833 | uint16_t bps; |
||
3308 | jermar | 834 | size_t bytes; |
3335 | jermar | 835 | block_t *b; |
3308 | jermar | 836 | |
3307 | jermar | 837 | if (!nodep) { |
838 | ipc_answer_0(rid, ENOENT); |
||
839 | return; |
||
840 | } |
||
841 | |||
842 | ipc_callid_t callid; |
||
843 | size_t len; |
||
3314 | jermar | 844 | if (!ipc_data_read_receive(&callid, &len)) { |
3307 | jermar | 845 | fat_node_put(nodep); |
846 | ipc_answer_0(callid, EINVAL); |
||
847 | ipc_answer_0(rid, EINVAL); |
||
848 | return; |
||
849 | } |
||
850 | |||
3593 | rimsky | 851 | bs = block_bb_get(dev_handle); |
852 | bps = uint16_t_le2host(bs->bps); |
||
853 | |||
3307 | jermar | 854 | if (nodep->type == FAT_FILE) { |
3335 | jermar | 855 | /* |
856 | * Our strategy for regular file reads is to read one block at |
||
857 | * most and make use of the possibility to return less data than |
||
858 | * requested. This keeps the code very simple. |
||
859 | */ |
||
3593 | rimsky | 860 | if (pos >= nodep->size) { |
861 | /* reading beyond the EOF */ |
||
862 | bytes = 0; |
||
863 | (void) ipc_data_read_finalize(callid, NULL, 0); |
||
864 | } else { |
||
865 | bytes = min(len, bps - pos % bps); |
||
866 | bytes = min(bytes, nodep->size - pos); |
||
3602 | rimsky | 867 | b = fat_block_get(bs, nodep, pos / bps, |
868 | BLOCK_FLAGS_NONE); |
||
3593 | rimsky | 869 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
870 | bytes); |
||
871 | block_put(b); |
||
872 | } |
||
3307 | jermar | 873 | } else { |
3335 | jermar | 874 | unsigned bnum; |
875 | off_t spos = pos; |
||
876 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
||
877 | fat_dentry_t *d; |
||
878 | |||
3307 | jermar | 879 | assert(nodep->type == FAT_DIRECTORY); |
3335 | jermar | 880 | assert(nodep->size % bps == 0); |
881 | assert(bps % sizeof(fat_dentry_t) == 0); |
||
882 | |||
883 | /* |
||
884 | * Our strategy for readdir() is to use the position pointer as |
||
885 | * an index into the array of all dentries. On entry, it points |
||
886 | * to the first unread dentry. If we skip any dentries, we bump |
||
887 | * the position pointer accordingly. |
||
888 | */ |
||
889 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
||
890 | while (bnum < nodep->size / bps) { |
||
891 | off_t o; |
||
892 | |||
3602 | rimsky | 893 | b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); |
3335 | jermar | 894 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
895 | o < bps / sizeof(fat_dentry_t); |
||
896 | o++, pos++) { |
||
897 | d = ((fat_dentry_t *)b->data) + o; |
||
898 | switch (fat_classify_dentry(d)) { |
||
899 | case FAT_DENTRY_SKIP: |
||
3665 | rimsky | 900 | case FAT_DENTRY_FREE: |
3335 | jermar | 901 | continue; |
902 | case FAT_DENTRY_LAST: |
||
903 | block_put(b); |
||
904 | goto miss; |
||
905 | default: |
||
906 | case FAT_DENTRY_VALID: |
||
3665 | rimsky | 907 | fat_dentry_name_get(d, name); |
3335 | jermar | 908 | block_put(b); |
909 | goto hit; |
||
910 | } |
||
911 | } |
||
912 | block_put(b); |
||
913 | bnum++; |
||
914 | } |
||
915 | miss: |
||
3307 | jermar | 916 | fat_node_put(nodep); |
3335 | jermar | 917 | ipc_answer_0(callid, ENOENT); |
918 | ipc_answer_1(rid, ENOENT, 0); |
||
3307 | jermar | 919 | return; |
3335 | jermar | 920 | hit: |
921 | (void) ipc_data_read_finalize(callid, name, strlen(name) + 1); |
||
922 | bytes = (pos - spos) + 1; |
||
3307 | jermar | 923 | } |
924 | |||
925 | fat_node_put(nodep); |
||
3308 | jermar | 926 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
3307 | jermar | 927 | } |
928 | |||
3593 | rimsky | 929 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
930 | { |
||
931 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
||
932 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
||
933 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
||
934 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
||
935 | fat_bs_t *bs; |
||
936 | size_t bytes; |
||
937 | block_t *b; |
||
938 | uint16_t bps; |
||
939 | unsigned spc; |
||
940 | unsigned bpc; /* bytes per cluster */ |
||
941 | off_t boundary; |
||
3602 | rimsky | 942 | int flags = BLOCK_FLAGS_NONE; |
3593 | rimsky | 943 | |
944 | if (!nodep) { |
||
945 | ipc_answer_0(rid, ENOENT); |
||
946 | return; |
||
947 | } |
||
948 | |||
949 | ipc_callid_t callid; |
||
950 | size_t len; |
||
951 | if (!ipc_data_write_receive(&callid, &len)) { |
||
952 | fat_node_put(nodep); |
||
953 | ipc_answer_0(callid, EINVAL); |
||
954 | ipc_answer_0(rid, EINVAL); |
||
955 | return; |
||
956 | } |
||
957 | |||
958 | bs = block_bb_get(dev_handle); |
||
959 | bps = uint16_t_le2host(bs->bps); |
||
960 | spc = bs->spc; |
||
961 | bpc = bps * spc; |
||
962 | |||
963 | /* |
||
964 | * In all scenarios, we will attempt to write out only one block worth |
||
965 | * of data at maximum. There might be some more efficient approaches, |
||
966 | * but this one greatly simplifies fat_write(). Note that we can afford |
||
967 | * to do this because the client must be ready to handle the return |
||
968 | * value signalizing a smaller number of bytes written. |
||
969 | */ |
||
970 | bytes = min(len, bps - pos % bps); |
||
3602 | rimsky | 971 | if (bytes == bps) |
972 | flags |= BLOCK_FLAGS_NOREAD; |
||
3593 | rimsky | 973 | |
974 | boundary = ROUND_UP(nodep->size, bpc); |
||
975 | if (pos < boundary) { |
||
976 | /* |
||
977 | * This is the easier case - we are either overwriting already |
||
978 | * existing contents or writing behind the EOF, but still within |
||
979 | * the limits of the last cluster. The node size may grow to the |
||
980 | * next block size boundary. |
||
981 | */ |
||
982 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
||
3602 | rimsky | 983 | b = fat_block_get(bs, nodep, pos / bps, flags); |
3593 | rimsky | 984 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
985 | bytes); |
||
986 | b->dirty = true; /* need to sync block */ |
||
987 | block_put(b); |
||
988 | if (pos + bytes > nodep->size) { |
||
989 | nodep->size = pos + bytes; |
||
990 | nodep->dirty = true; /* need to sync node */ |
||
991 | } |
||
992 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
||
993 | fat_node_put(nodep); |
||
994 | return; |
||
995 | } else { |
||
996 | /* |
||
997 | * This is the more difficult case. We must allocate new |
||
998 | * clusters for the node and zero them out. |
||
999 | */ |
||
1000 | int status; |
||
1001 | unsigned nclsts; |
||
1002 | fat_cluster_t mcl, lcl; |
||
1003 | |||
1004 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
||
1005 | /* create an independent chain of nclsts clusters in all FATs */ |
||
1006 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
||
1007 | if (status != EOK) { |
||
1008 | /* could not allocate a chain of nclsts clusters */ |
||
1009 | fat_node_put(nodep); |
||
1010 | ipc_answer_0(callid, status); |
||
1011 | ipc_answer_0(rid, status); |
||
1012 | return; |
||
1013 | } |
||
1014 | /* zero fill any gaps */ |
||
1015 | fat_fill_gap(bs, nodep, mcl, pos); |
||
3602 | rimsky | 1016 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, |
1017 | flags); |
||
3593 | rimsky | 1018 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
1019 | bytes); |
||
1020 | b->dirty = true; /* need to sync block */ |
||
1021 | block_put(b); |
||
1022 | /* |
||
1023 | * Append the cluster chain starting in mcl to the end of the |
||
1024 | * node's cluster chain. |
||
1025 | */ |
||
1026 | fat_append_clusters(bs, nodep, mcl); |
||
1027 | nodep->size = pos + bytes; |
||
1028 | nodep->dirty = true; /* need to sync node */ |
||
1029 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
||
1030 | fat_node_put(nodep); |
||
1031 | return; |
||
1032 | } |
||
1033 | } |
||
1034 | |||
1035 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
||
1036 | { |
||
1037 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
||
1038 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
||
1039 | size_t size = (off_t)IPC_GET_ARG3(*request); |
||
1040 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
||
1041 | fat_bs_t *bs; |
||
1042 | uint16_t bps; |
||
1043 | uint8_t spc; |
||
1044 | unsigned bpc; /* bytes per cluster */ |
||
1045 | int rc; |
||
1046 | |||
1047 | if (!nodep) { |
||
1048 | ipc_answer_0(rid, ENOENT); |
||
1049 | return; |
||
1050 | } |
||
1051 | |||
1052 | bs = block_bb_get(dev_handle); |
||
1053 | bps = uint16_t_le2host(bs->bps); |
||
1054 | spc = bs->spc; |
||
1055 | bpc = bps * spc; |
||
1056 | |||
1057 | if (nodep->size == size) { |
||
1058 | rc = EOK; |
||
1059 | } else if (nodep->size < size) { |
||
1060 | /* |
||
1061 | * The standard says we have the freedom to grow the node. |
||
1062 | * For now, we simply return an error. |
||
1063 | */ |
||
1064 | rc = EINVAL; |
||
1065 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
||
1066 | /* |
||
1067 | * The node will be shrunk, but no clusters will be deallocated. |
||
1068 | */ |
||
1069 | nodep->size = size; |
||
1070 | nodep->dirty = true; /* need to sync node */ |
||
1071 | rc = EOK; |
||
1072 | } else { |
||
1073 | /* |
||
1074 | * The node will be shrunk, clusters will be deallocated. |
||
1075 | */ |
||
1076 | if (size == 0) { |
||
1077 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
||
1078 | } else { |
||
1079 | fat_cluster_t lastc; |
||
1080 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
||
1081 | &lastc, (size - 1) / bpc); |
||
1082 | fat_chop_clusters(bs, nodep, lastc); |
||
1083 | } |
||
1084 | nodep->size = size; |
||
1085 | nodep->dirty = true; /* need to sync node */ |
||
1086 | rc = EOK; |
||
1087 | } |
||
1088 | fat_node_put(nodep); |
||
1089 | ipc_answer_0(rid, rc); |
||
1090 | return; |
||
1091 | } |
||
1092 | |||
3665 | rimsky | 1093 | void fat_destroy(ipc_callid_t rid, ipc_call_t *request) |
1094 | { |
||
1095 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
||
1096 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
||
1097 | int rc; |
||
1098 | |||
1099 | fat_node_t *nodep = fat_node_get(dev_handle, index); |
||
1100 | if (!nodep) { |
||
1101 | ipc_answer_0(rid, ENOENT); |
||
1102 | return; |
||
1103 | } |
||
1104 | |||
1105 | rc = fat_destroy_node(nodep); |
||
1106 | ipc_answer_0(rid, rc); |
||
1107 | } |
||
1108 | |||
2627 | jermar | 1109 | /** |
1110 | * @} |
||
1111 | */ |