Rev 4586 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4586 | Rev 4647 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jakub Jermar |
2 | * Copyright (c) 2008 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup fs |
29 | /** @addtogroup fs |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file fat_ops.c |
34 | * @file fat_ops.c |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include "fat.h" |
38 | #include "fat.h" |
39 | #include "fat_dentry.h" |
39 | #include "fat_dentry.h" |
40 | #include "fat_fat.h" |
40 | #include "fat_fat.h" |
41 | #include "../../vfs/vfs.h" |
41 | #include "../../vfs/vfs.h" |
42 | #include <libfs.h> |
42 | #include <libfs.h> |
43 | #include <libblock.h> |
43 | #include <libblock.h> |
44 | #include <ipc/ipc.h> |
44 | #include <ipc/ipc.h> |
45 | #include <ipc/services.h> |
45 | #include <ipc/services.h> |
46 | #include <ipc/devmap.h> |
46 | #include <ipc/devmap.h> |
47 | #include <async.h> |
47 | #include <async.h> |
48 | #include <errno.h> |
48 | #include <errno.h> |
49 | #include <string.h> |
49 | #include <string.h> |
50 | #include <byteorder.h> |
50 | #include <byteorder.h> |
51 | #include <adt/hash_table.h> |
51 | #include <adt/hash_table.h> |
52 | #include <adt/list.h> |
52 | #include <adt/list.h> |
53 | #include <assert.h> |
53 | #include <assert.h> |
54 | #include <fibril_sync.h> |
54 | #include <fibril_sync.h> |
55 | #include <sys/mman.h> |
55 | #include <sys/mman.h> |
56 | #include <align.h> |
56 | #include <align.h> |
57 | 57 | ||
58 | #define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL) |
58 | #define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL) |
59 | #define FS_NODE(node) ((node) ? (node)->bp : NULL) |
59 | #define FS_NODE(node) ((node) ? (node)->bp : NULL) |
60 | 60 | ||
61 | /** Mutex protecting the list of cached free FAT nodes. */ |
61 | /** Mutex protecting the list of cached free FAT nodes. */ |
62 | static FIBRIL_MUTEX_INITIALIZE(ffn_mutex); |
62 | static FIBRIL_MUTEX_INITIALIZE(ffn_mutex); |
63 | 63 | ||
64 | /** List of cached free FAT nodes. */ |
64 | /** List of cached free FAT nodes. */ |
65 | static LIST_INITIALIZE(ffn_head); |
65 | static LIST_INITIALIZE(ffn_head); |
66 | 66 | ||
67 | static void fat_node_initialize(fat_node_t *node) |
67 | static void fat_node_initialize(fat_node_t *node) |
68 | { |
68 | { |
69 | fibril_mutex_initialize(&node->lock); |
69 | fibril_mutex_initialize(&node->lock); |
70 | node->bp = NULL; |
70 | node->bp = NULL; |
71 | node->idx = NULL; |
71 | node->idx = NULL; |
72 | node->type = 0; |
72 | node->type = 0; |
73 | link_initialize(&node->ffn_link); |
73 | link_initialize(&node->ffn_link); |
74 | node->size = 0; |
74 | node->size = 0; |
75 | node->lnkcnt = 0; |
75 | node->lnkcnt = 0; |
76 | node->refcnt = 0; |
76 | node->refcnt = 0; |
77 | node->dirty = false; |
77 | node->dirty = false; |
78 | } |
78 | } |
79 | 79 | ||
80 | static void fat_node_sync(fat_node_t *node) |
80 | static void fat_node_sync(fat_node_t *node) |
81 | { |
81 | { |
82 | block_t *b; |
82 | block_t *b; |
83 | fat_bs_t *bs; |
83 | fat_bs_t *bs; |
84 | fat_dentry_t *d; |
84 | fat_dentry_t *d; |
85 | uint16_t bps; |
85 | uint16_t bps; |
86 | unsigned dps; |
86 | unsigned dps; |
87 | 87 | ||
88 | assert(node->dirty); |
88 | assert(node->dirty); |
89 | 89 | ||
90 | bs = block_bb_get(node->idx->dev_handle); |
90 | bs = block_bb_get(node->idx->dev_handle); |
91 | bps = uint16_t_le2host(bs->bps); |
91 | bps = uint16_t_le2host(bs->bps); |
92 | dps = bps / sizeof(fat_dentry_t); |
92 | dps = bps / sizeof(fat_dentry_t); |
93 | 93 | ||
94 | /* Read the block that contains the dentry of interest. */ |
94 | /* Read the block that contains the dentry of interest. */ |
95 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
95 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
96 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
96 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
97 | 97 | ||
98 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
98 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
99 | 99 | ||
100 | d->firstc = host2uint16_t_le(node->firstc); |
100 | d->firstc = host2uint16_t_le(node->firstc); |
101 | if (node->type == FAT_FILE) { |
101 | if (node->type == FAT_FILE) { |
102 | d->size = host2uint32_t_le(node->size); |
102 | d->size = host2uint32_t_le(node->size); |
103 | } else if (node->type == FAT_DIRECTORY) { |
103 | } else if (node->type == FAT_DIRECTORY) { |
104 | d->attr = FAT_ATTR_SUBDIR; |
104 | d->attr = FAT_ATTR_SUBDIR; |
105 | } |
105 | } |
106 | 106 | ||
107 | /* TODO: update other fields? (e.g time fields) */ |
107 | /* TODO: update other fields? (e.g time fields) */ |
108 | 108 | ||
109 | b->dirty = true; /* need to sync block */ |
109 | b->dirty = true; /* need to sync block */ |
110 | block_put(b); |
110 | block_put(b); |
111 | } |
111 | } |
112 | 112 | ||
113 | static fat_node_t *fat_node_get_new(void) |
113 | static fat_node_t *fat_node_get_new(void) |
114 | { |
114 | { |
115 | fs_node_t *fn; |
115 | fs_node_t *fn; |
116 | fat_node_t *nodep; |
116 | fat_node_t *nodep; |
117 | 117 | ||
118 | fibril_mutex_lock(&ffn_mutex); |
118 | fibril_mutex_lock(&ffn_mutex); |
119 | if (!list_empty(&ffn_head)) { |
119 | if (!list_empty(&ffn_head)) { |
120 | /* Try to use a cached free node structure. */ |
120 | /* Try to use a cached free node structure. */ |
121 | fat_idx_t *idxp_tmp; |
121 | fat_idx_t *idxp_tmp; |
122 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
122 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
123 | if (!fibril_mutex_trylock(&nodep->lock)) |
123 | if (!fibril_mutex_trylock(&nodep->lock)) |
124 | goto skip_cache; |
124 | goto skip_cache; |
125 | idxp_tmp = nodep->idx; |
125 | idxp_tmp = nodep->idx; |
126 | if (!fibril_mutex_trylock(&idxp_tmp->lock)) { |
126 | if (!fibril_mutex_trylock(&idxp_tmp->lock)) { |
127 | fibril_mutex_unlock(&nodep->lock); |
127 | fibril_mutex_unlock(&nodep->lock); |
128 | goto skip_cache; |
128 | goto skip_cache; |
129 | } |
129 | } |
130 | list_remove(&nodep->ffn_link); |
130 | list_remove(&nodep->ffn_link); |
131 | fibril_mutex_unlock(&ffn_mutex); |
131 | fibril_mutex_unlock(&ffn_mutex); |
132 | if (nodep->dirty) |
132 | if (nodep->dirty) |
133 | fat_node_sync(nodep); |
133 | fat_node_sync(nodep); |
134 | idxp_tmp->nodep = NULL; |
134 | idxp_tmp->nodep = NULL; |
135 | fibril_mutex_unlock(&nodep->lock); |
135 | fibril_mutex_unlock(&nodep->lock); |
136 | fibril_mutex_unlock(&idxp_tmp->lock); |
136 | fibril_mutex_unlock(&idxp_tmp->lock); |
137 | fn = FS_NODE(nodep); |
137 | fn = FS_NODE(nodep); |
138 | } else { |
138 | } else { |
139 | skip_cache: |
139 | skip_cache: |
140 | /* Try to allocate a new node structure. */ |
140 | /* Try to allocate a new node structure. */ |
141 | fibril_mutex_unlock(&ffn_mutex); |
141 | fibril_mutex_unlock(&ffn_mutex); |
142 | fn = (fs_node_t *)malloc(sizeof(fs_node_t)); |
142 | fn = (fs_node_t *)malloc(sizeof(fs_node_t)); |
143 | if (!fn) |
143 | if (!fn) |
144 | return NULL; |
144 | return NULL; |
145 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
145 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
146 | if (!nodep) { |
146 | if (!nodep) { |
147 | free(fn); |
147 | free(fn); |
148 | return NULL; |
148 | return NULL; |
149 | } |
149 | } |
150 | } |
150 | } |
151 | fat_node_initialize(nodep); |
151 | fat_node_initialize(nodep); |
152 | fs_node_initialize(fn); |
152 | fs_node_initialize(fn); |
153 | fn->data = nodep; |
153 | fn->data = nodep; |
154 | nodep->bp = fn; |
154 | nodep->bp = fn; |
155 | 155 | ||
156 | return nodep; |
156 | return nodep; |
157 | } |
157 | } |
158 | 158 | ||
159 | /** Internal version of fat_node_get(). |
159 | /** Internal version of fat_node_get(). |
160 | * |
160 | * |
161 | * @param idxp Locked index structure. |
161 | * @param idxp Locked index structure. |
162 | */ |
162 | */ |
163 | static fat_node_t *fat_node_get_core(fat_idx_t *idxp) |
163 | static fat_node_t *fat_node_get_core(fat_idx_t *idxp) |
164 | { |
164 | { |
165 | block_t *b; |
165 | block_t *b; |
166 | fat_bs_t *bs; |
166 | fat_bs_t *bs; |
167 | fat_dentry_t *d; |
167 | fat_dentry_t *d; |
168 | fat_node_t *nodep = NULL; |
168 | fat_node_t *nodep = NULL; |
169 | unsigned bps; |
169 | unsigned bps; |
170 | unsigned spc; |
170 | unsigned spc; |
171 | unsigned dps; |
171 | unsigned dps; |
172 | 172 | ||
173 | if (idxp->nodep) { |
173 | if (idxp->nodep) { |
174 | /* |
174 | /* |
175 | * We are lucky. |
175 | * We are lucky. |
176 | * The node is already instantiated in memory. |
176 | * The node is already instantiated in memory. |
177 | */ |
177 | */ |
178 | fibril_mutex_lock(&idxp->nodep->lock); |
178 | fibril_mutex_lock(&idxp->nodep->lock); |
179 | if (!idxp->nodep->refcnt++) |
179 | if (!idxp->nodep->refcnt++) |
180 | list_remove(&idxp->nodep->ffn_link); |
180 | list_remove(&idxp->nodep->ffn_link); |
181 | fibril_mutex_unlock(&idxp->nodep->lock); |
181 | fibril_mutex_unlock(&idxp->nodep->lock); |
182 | return idxp->nodep; |
182 | return idxp->nodep; |
183 | } |
183 | } |
184 | 184 | ||
185 | /* |
185 | /* |
186 | * We must instantiate the node from the file system. |
186 | * We must instantiate the node from the file system. |
187 | */ |
187 | */ |
188 | 188 | ||
189 | assert(idxp->pfc); |
189 | assert(idxp->pfc); |
190 | 190 | ||
191 | nodep = fat_node_get_new(); |
191 | nodep = fat_node_get_new(); |
192 | if (!nodep) |
192 | if (!nodep) |
193 | return NULL; |
193 | return NULL; |
194 | 194 | ||
195 | bs = block_bb_get(idxp->dev_handle); |
195 | bs = block_bb_get(idxp->dev_handle); |
196 | bps = uint16_t_le2host(bs->bps); |
196 | bps = uint16_t_le2host(bs->bps); |
197 | spc = bs->spc; |
197 | spc = bs->spc; |
198 | dps = bps / sizeof(fat_dentry_t); |
198 | dps = bps / sizeof(fat_dentry_t); |
199 | 199 | ||
200 | /* Read the block that contains the dentry of interest. */ |
200 | /* Read the block that contains the dentry of interest. */ |
201 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
201 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
202 | (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
202 | (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
203 | assert(b); |
203 | assert(b); |
204 | 204 | ||
205 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
205 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
206 | if (d->attr & FAT_ATTR_SUBDIR) { |
206 | if (d->attr & FAT_ATTR_SUBDIR) { |
207 | /* |
207 | /* |
208 | * The only directory which does not have this bit set is the |
208 | * The only directory which does not have this bit set is the |
209 | * root directory itself. The root directory node is handled |
209 | * root directory itself. The root directory node is handled |
210 | * and initialized elsewhere. |
210 | * and initialized elsewhere. |
211 | */ |
211 | */ |
212 | nodep->type = FAT_DIRECTORY; |
212 | nodep->type = FAT_DIRECTORY; |
213 | /* |
213 | /* |
214 | * Unfortunately, the 'size' field of the FAT dentry is not |
214 | * Unfortunately, the 'size' field of the FAT dentry is not |
215 | * defined for the directory entry type. We must determine the |
215 | * defined for the directory entry type. We must determine the |
216 | * size of the directory by walking the FAT. |
216 | * size of the directory by walking the FAT. |
217 | */ |
217 | */ |
218 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
218 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
219 | uint16_t_le2host(d->firstc)); |
219 | uint16_t_le2host(d->firstc)); |
220 | } else { |
220 | } else { |
221 | nodep->type = FAT_FILE; |
221 | nodep->type = FAT_FILE; |
222 | nodep->size = uint32_t_le2host(d->size); |
222 | nodep->size = uint32_t_le2host(d->size); |
223 | } |
223 | } |
224 | nodep->firstc = uint16_t_le2host(d->firstc); |
224 | nodep->firstc = uint16_t_le2host(d->firstc); |
225 | nodep->lnkcnt = 1; |
225 | nodep->lnkcnt = 1; |
226 | nodep->refcnt = 1; |
226 | nodep->refcnt = 1; |
227 | 227 | ||
228 | block_put(b); |
228 | block_put(b); |
229 | 229 | ||
230 | /* Link the idx structure with the node structure. */ |
230 | /* Link the idx structure with the node structure. */ |
231 | nodep->idx = idxp; |
231 | nodep->idx = idxp; |
232 | idxp->nodep = nodep; |
232 | idxp->nodep = nodep; |
233 | 233 | ||
234 | return nodep; |
234 | return nodep; |
235 | } |
235 | } |
236 | 236 | ||
237 | /* |
237 | /* |
238 | * Forward declarations of FAT libfs operations. |
238 | * Forward declarations of FAT libfs operations. |
239 | */ |
239 | */ |
240 | static fs_node_t *fat_node_get(dev_handle_t, fs_index_t); |
240 | static fs_node_t *fat_node_get(dev_handle_t, fs_index_t); |
241 | static void fat_node_put(fs_node_t *); |
241 | static void fat_node_put(fs_node_t *); |
242 | static fs_node_t *fat_create_node(dev_handle_t, int); |
242 | static fs_node_t *fat_create_node(dev_handle_t, int); |
243 | static int fat_destroy_node(fs_node_t *); |
243 | static int fat_destroy_node(fs_node_t *); |
244 | static int fat_link(fs_node_t *, fs_node_t *, const char *); |
244 | static int fat_link(fs_node_t *, fs_node_t *, const char *); |
245 | static int fat_unlink(fs_node_t *, fs_node_t *, const char *); |
245 | static int fat_unlink(fs_node_t *, fs_node_t *, const char *); |
246 | static fs_node_t *fat_match(fs_node_t *, const char *); |
246 | static fs_node_t *fat_match(fs_node_t *, const char *); |
247 | static fs_index_t fat_index_get(fs_node_t *); |
247 | static fs_index_t fat_index_get(fs_node_t *); |
248 | static size_t fat_size_get(fs_node_t *); |
248 | static size_t fat_size_get(fs_node_t *); |
249 | static unsigned fat_lnkcnt_get(fs_node_t *); |
249 | static unsigned fat_lnkcnt_get(fs_node_t *); |
250 | static bool fat_has_children(fs_node_t *); |
250 | static bool fat_has_children(fs_node_t *); |
251 | static fs_node_t *fat_root_get(dev_handle_t); |
251 | static fs_node_t *fat_root_get(dev_handle_t); |
252 | static char fat_plb_get_char(unsigned); |
252 | static char fat_plb_get_char(unsigned); |
253 | static bool fat_is_directory(fs_node_t *); |
253 | static bool fat_is_directory(fs_node_t *); |
254 | static bool fat_is_file(fs_node_t *node); |
254 | static bool fat_is_file(fs_node_t *node); |
255 | 255 | ||
256 | /* |
256 | /* |
257 | * FAT libfs operations. |
257 | * FAT libfs operations. |
258 | */ |
258 | */ |
259 | 259 | ||
260 | /** Instantiate a FAT in-core node. */ |
260 | /** Instantiate a FAT in-core node. */ |
261 | fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
261 | fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
262 | { |
262 | { |
263 | fat_node_t *nodep; |
263 | fat_node_t *nodep; |
264 | fat_idx_t *idxp; |
264 | fat_idx_t *idxp; |
265 | 265 | ||
266 | idxp = fat_idx_get_by_index(dev_handle, index); |
266 | idxp = fat_idx_get_by_index(dev_handle, index); |
267 | if (!idxp) |
267 | if (!idxp) |
268 | return NULL; |
268 | return NULL; |
269 | /* idxp->lock held */ |
269 | /* idxp->lock held */ |
270 | nodep = fat_node_get_core(idxp); |
270 | nodep = fat_node_get_core(idxp); |
271 | fibril_mutex_unlock(&idxp->lock); |
271 | fibril_mutex_unlock(&idxp->lock); |
272 | return FS_NODE(nodep); |
272 | return FS_NODE(nodep); |
273 | } |
273 | } |
274 | 274 | ||
275 | void fat_node_put(fs_node_t *fn) |
275 | void fat_node_put(fs_node_t *fn) |
276 | { |
276 | { |
277 | fat_node_t *nodep = FAT_NODE(fn); |
277 | fat_node_t *nodep = FAT_NODE(fn); |
278 | bool destroy = false; |
278 | bool destroy = false; |
279 | 279 | ||
280 | fibril_mutex_lock(&nodep->lock); |
280 | fibril_mutex_lock(&nodep->lock); |
281 | if (!--nodep->refcnt) { |
281 | if (!--nodep->refcnt) { |
282 | if (nodep->idx) { |
282 | if (nodep->idx) { |
283 | fibril_mutex_lock(&ffn_mutex); |
283 | fibril_mutex_lock(&ffn_mutex); |
284 | list_append(&nodep->ffn_link, &ffn_head); |
284 | list_append(&nodep->ffn_link, &ffn_head); |
285 | fibril_mutex_unlock(&ffn_mutex); |
285 | fibril_mutex_unlock(&ffn_mutex); |
286 | } else { |
286 | } else { |
287 | /* |
287 | /* |
288 | * The node does not have any index structure associated |
288 | * The node does not have any index structure associated |
289 | * with itself. This can only mean that we are releasing |
289 | * with itself. This can only mean that we are releasing |
290 | * the node after a failed attempt to allocate the index |
290 | * the node after a failed attempt to allocate the index |
291 | * structure for it. |
291 | * structure for it. |
292 | */ |
292 | */ |
293 | destroy = true; |
293 | destroy = true; |
294 | } |
294 | } |
295 | } |
295 | } |
296 | fibril_mutex_unlock(&nodep->lock); |
296 | fibril_mutex_unlock(&nodep->lock); |
297 | if (destroy) { |
297 | if (destroy) { |
298 | free(nodep->bp); |
298 | free(nodep->bp); |
299 | free(nodep); |
299 | free(nodep); |
300 | } |
300 | } |
301 | } |
301 | } |
302 | 302 | ||
303 | fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags) |
303 | fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags) |
304 | { |
304 | { |
305 | fat_idx_t *idxp; |
305 | fat_idx_t *idxp; |
306 | fat_node_t *nodep; |
306 | fat_node_t *nodep; |
307 | fat_bs_t *bs; |
307 | fat_bs_t *bs; |
308 | fat_cluster_t mcl, lcl; |
308 | fat_cluster_t mcl, lcl; |
309 | uint16_t bps; |
309 | uint16_t bps; |
310 | int rc; |
310 | int rc; |
311 | 311 | ||
312 | bs = block_bb_get(dev_handle); |
312 | bs = block_bb_get(dev_handle); |
313 | bps = uint16_t_le2host(bs->bps); |
313 | bps = uint16_t_le2host(bs->bps); |
314 | if (flags & L_DIRECTORY) { |
314 | if (flags & L_DIRECTORY) { |
315 | /* allocate a cluster */ |
315 | /* allocate a cluster */ |
316 | rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl); |
316 | rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl); |
317 | if (rc != EOK) |
317 | if (rc != EOK) |
318 | return NULL; |
318 | return NULL; |
319 | } |
319 | } |
320 | 320 | ||
321 | nodep = fat_node_get_new(); |
321 | nodep = fat_node_get_new(); |
322 | if (!nodep) { |
322 | if (!nodep) { |
323 | fat_free_clusters(bs, dev_handle, mcl); |
323 | fat_free_clusters(bs, dev_handle, mcl); |
324 | return NULL; |
324 | return NULL; |
325 | } |
325 | } |
326 | idxp = fat_idx_get_new(dev_handle); |
326 | idxp = fat_idx_get_new(dev_handle); |
327 | if (!idxp) { |
327 | if (!idxp) { |
328 | fat_free_clusters(bs, dev_handle, mcl); |
328 | fat_free_clusters(bs, dev_handle, mcl); |
329 | fat_node_put(FS_NODE(nodep)); |
329 | fat_node_put(FS_NODE(nodep)); |
330 | return NULL; |
330 | return NULL; |
331 | } |
331 | } |
332 | /* idxp->lock held */ |
332 | /* idxp->lock held */ |
333 | if (flags & L_DIRECTORY) { |
333 | if (flags & L_DIRECTORY) { |
334 | int i; |
334 | int i; |
335 | block_t *b; |
335 | block_t *b; |
336 | 336 | ||
337 | /* |
337 | /* |
338 | * Populate the new cluster with unused dentries. |
338 | * Populate the new cluster with unused dentries. |
339 | */ |
339 | */ |
340 | for (i = 0; i < bs->spc; i++) { |
340 | for (i = 0; i < bs->spc; i++) { |
341 | b = _fat_block_get(bs, dev_handle, mcl, i, |
341 | b = _fat_block_get(bs, dev_handle, mcl, i, |
342 | BLOCK_FLAGS_NOREAD); |
342 | BLOCK_FLAGS_NOREAD); |
343 | /* mark all dentries as never-used */ |
343 | /* mark all dentries as never-used */ |
344 | memset(b->data, 0, bps); |
344 | memset(b->data, 0, bps); |
345 | b->dirty = false; |
345 | b->dirty = false; |
346 | block_put(b); |
346 | block_put(b); |
347 | } |
347 | } |
348 | nodep->type = FAT_DIRECTORY; |
348 | nodep->type = FAT_DIRECTORY; |
349 | nodep->firstc = mcl; |
349 | nodep->firstc = mcl; |
350 | nodep->size = bps * bs->spc; |
350 | nodep->size = bps * bs->spc; |
351 | } else { |
351 | } else { |
352 | nodep->type = FAT_FILE; |
352 | nodep->type = FAT_FILE; |
353 | nodep->firstc = FAT_CLST_RES0; |
353 | nodep->firstc = FAT_CLST_RES0; |
354 | nodep->size = 0; |
354 | nodep->size = 0; |
355 | } |
355 | } |
356 | nodep->lnkcnt = 0; /* not linked anywhere */ |
356 | nodep->lnkcnt = 0; /* not linked anywhere */ |
357 | nodep->refcnt = 1; |
357 | nodep->refcnt = 1; |
358 | nodep->dirty = true; |
358 | nodep->dirty = true; |
359 | 359 | ||
360 | nodep->idx = idxp; |
360 | nodep->idx = idxp; |
361 | idxp->nodep = nodep; |
361 | idxp->nodep = nodep; |
362 | 362 | ||
363 | fibril_mutex_unlock(&idxp->lock); |
363 | fibril_mutex_unlock(&idxp->lock); |
364 | return FS_NODE(nodep); |
364 | return FS_NODE(nodep); |
365 | } |
365 | } |
366 | 366 | ||
367 | int fat_destroy_node(fs_node_t *fn) |
367 | int fat_destroy_node(fs_node_t *fn) |
368 | { |
368 | { |
369 | fat_node_t *nodep = FAT_NODE(fn); |
369 | fat_node_t *nodep = FAT_NODE(fn); |
370 | fat_bs_t *bs; |
370 | fat_bs_t *bs; |
371 | 371 | ||
372 | /* |
372 | /* |
373 | * The node is not reachable from the file system. This means that the |
373 | * The node is not reachable from the file system. This means that the |
374 | * link count should be zero and that the index structure cannot be |
374 | * link count should be zero and that the index structure cannot be |
375 | * found in the position hash. Obviously, we don't need to lock the node |
375 | * found in the position hash. Obviously, we don't need to lock the node |
376 | * nor its index structure. |
376 | * nor its index structure. |
377 | */ |
377 | */ |
378 | assert(nodep->lnkcnt == 0); |
378 | assert(nodep->lnkcnt == 0); |
379 | 379 | ||
380 | /* |
380 | /* |
381 | * The node may not have any children. |
381 | * The node may not have any children. |
382 | */ |
382 | */ |
383 | assert(fat_has_children(fn) == false); |
383 | assert(fat_has_children(fn) == false); |
384 | 384 | ||
385 | bs = block_bb_get(nodep->idx->dev_handle); |
385 | bs = block_bb_get(nodep->idx->dev_handle); |
386 | if (nodep->firstc != FAT_CLST_RES0) { |
386 | if (nodep->firstc != FAT_CLST_RES0) { |
387 | assert(nodep->size); |
387 | assert(nodep->size); |
388 | /* Free all clusters allocated to the node. */ |
388 | /* Free all clusters allocated to the node. */ |
389 | fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc); |
389 | fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc); |
390 | } |
390 | } |
391 | 391 | ||
392 | fat_idx_destroy(nodep->idx); |
392 | fat_idx_destroy(nodep->idx); |
393 | free(nodep->bp); |
393 | free(nodep->bp); |
394 | free(nodep); |
394 | free(nodep); |
395 | return EOK; |
395 | return EOK; |
396 | } |
396 | } |
397 | 397 | ||
398 | int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name) |
398 | int fat_link(fs_node_t *pfn, fs_node_t *cfn, const char *name) |
399 | { |
399 | { |
400 | fat_node_t *parentp = FAT_NODE(pfn); |
400 | fat_node_t *parentp = FAT_NODE(pfn); |
401 | fat_node_t *childp = FAT_NODE(cfn); |
401 | fat_node_t *childp = FAT_NODE(cfn); |
402 | fat_dentry_t *d; |
402 | fat_dentry_t *d; |
403 | fat_bs_t *bs; |
403 | fat_bs_t *bs; |
404 | block_t *b; |
404 | block_t *b; |
405 | int i, j; |
405 | int i, j; |
406 | uint16_t bps; |
406 | uint16_t bps; |
407 | unsigned dps; |
407 | unsigned dps; |
408 | unsigned blocks; |
408 | unsigned blocks; |
409 | fat_cluster_t mcl, lcl; |
409 | fat_cluster_t mcl, lcl; |
410 | int rc; |
410 | int rc; |
411 | 411 | ||
412 | fibril_mutex_lock(&childp->lock); |
412 | fibril_mutex_lock(&childp->lock); |
413 | if (childp->lnkcnt == 1) { |
413 | if (childp->lnkcnt == 1) { |
414 | /* |
414 | /* |
415 | * On FAT, we don't support multiple hard links. |
415 | * On FAT, we don't support multiple hard links. |
416 | */ |
416 | */ |
417 | fibril_mutex_unlock(&childp->lock); |
417 | fibril_mutex_unlock(&childp->lock); |
418 | return EMLINK; |
418 | return EMLINK; |
419 | } |
419 | } |
420 | assert(childp->lnkcnt == 0); |
420 | assert(childp->lnkcnt == 0); |
421 | fibril_mutex_unlock(&childp->lock); |
421 | fibril_mutex_unlock(&childp->lock); |
422 | 422 | ||
423 | if (!fat_dentry_name_verify(name)) { |
423 | if (!fat_dentry_name_verify(name)) { |
424 | /* |
424 | /* |
425 | * Attempt to create unsupported name. |
425 | * Attempt to create unsupported name. |
426 | */ |
426 | */ |
427 | return ENOTSUP; |
427 | return ENOTSUP; |
428 | } |
428 | } |
429 | 429 | ||
430 | /* |
430 | /* |
431 | * Get us an unused parent node's dentry or grow the parent and allocate |
431 | * Get us an unused parent node's dentry or grow the parent and allocate |
432 | * a new one. |
432 | * a new one. |
433 | */ |
433 | */ |
434 | 434 | ||
435 | fibril_mutex_lock(&parentp->idx->lock); |
435 | fibril_mutex_lock(&parentp->idx->lock); |
436 | bs = block_bb_get(parentp->idx->dev_handle); |
436 | bs = block_bb_get(parentp->idx->dev_handle); |
437 | bps = uint16_t_le2host(bs->bps); |
437 | bps = uint16_t_le2host(bs->bps); |
438 | dps = bps / sizeof(fat_dentry_t); |
438 | dps = bps / sizeof(fat_dentry_t); |
439 | 439 | ||
440 | blocks = parentp->size / bps; |
440 | blocks = parentp->size / bps; |
441 | 441 | ||
442 | for (i = 0; i < blocks; i++) { |
442 | for (i = 0; i < blocks; i++) { |
443 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
443 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
444 | for (j = 0; j < dps; j++) { |
444 | for (j = 0; j < dps; j++) { |
445 | d = ((fat_dentry_t *)b->data) + j; |
445 | d = ((fat_dentry_t *)b->data) + j; |
446 | switch (fat_classify_dentry(d)) { |
446 | switch (fat_classify_dentry(d)) { |
447 | case FAT_DENTRY_SKIP: |
447 | case FAT_DENTRY_SKIP: |
448 | case FAT_DENTRY_VALID: |
448 | case FAT_DENTRY_VALID: |
449 | /* skipping used and meta entries */ |
449 | /* skipping used and meta entries */ |
450 | continue; |
450 | continue; |
451 | case FAT_DENTRY_FREE: |
451 | case FAT_DENTRY_FREE: |
452 | case FAT_DENTRY_LAST: |
452 | case FAT_DENTRY_LAST: |
453 | /* found an empty slot */ |
453 | /* found an empty slot */ |
454 | goto hit; |
454 | goto hit; |
455 | } |
455 | } |
456 | } |
456 | } |
457 | block_put(b); |
457 | block_put(b); |
458 | } |
458 | } |
459 | j = 0; |
459 | j = 0; |
460 | 460 | ||
461 | /* |
461 | /* |
462 | * We need to grow the parent in order to create a new unused dentry. |
462 | * We need to grow the parent in order to create a new unused dentry. |
463 | */ |
463 | */ |
464 | if (parentp->idx->pfc == FAT_CLST_ROOT) { |
464 | if (parentp->idx->pfc == FAT_CLST_ROOT) { |
465 | /* Can't grow the root directory. */ |
465 | /* Can't grow the root directory. */ |
466 | fibril_mutex_unlock(&parentp->idx->lock); |
466 | fibril_mutex_unlock(&parentp->idx->lock); |
467 | return ENOSPC; |
467 | return ENOSPC; |
468 | } |
468 | } |
469 | rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl); |
469 | rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl); |
470 | if (rc != EOK) { |
470 | if (rc != EOK) { |
471 | fibril_mutex_unlock(&parentp->idx->lock); |
471 | fibril_mutex_unlock(&parentp->idx->lock); |
472 | return rc; |
472 | return rc; |
473 | } |
473 | } |
474 | fat_append_clusters(bs, parentp, mcl); |
474 | fat_append_clusters(bs, parentp, mcl); |
475 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NOREAD); |
475 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NOREAD); |
476 | d = (fat_dentry_t *)b->data; |
476 | d = (fat_dentry_t *)b->data; |
477 | /* |
477 | /* |
478 | * Clear all dentries in the block except for the first one (the first |
478 | * Clear all dentries in the block except for the first one (the first |
479 | * dentry will be cleared in the next step). |
479 | * dentry will be cleared in the next step). |
480 | */ |
480 | */ |
481 | memset(d + 1, 0, bps - sizeof(fat_dentry_t)); |
481 | memset(d + 1, 0, bps - sizeof(fat_dentry_t)); |
482 | 482 | ||
483 | hit: |
483 | hit: |
484 | /* |
484 | /* |
485 | * At this point we only establish the link between the parent and the |
485 | * At this point we only establish the link between the parent and the |
486 | * child. The dentry, except of the name and the extension, will remain |
486 | * child. The dentry, except of the name and the extension, will remain |
487 | * uninitialized until the corresponding node is synced. Thus the valid |
487 | * uninitialized until the corresponding node is synced. Thus the valid |
488 | * dentry data is kept in the child node structure. |
488 | * dentry data is kept in the child node structure. |
489 | */ |
489 | */ |
490 | memset(d, 0, sizeof(fat_dentry_t)); |
490 | memset(d, 0, sizeof(fat_dentry_t)); |
491 | fat_dentry_name_set(d, name); |
491 | fat_dentry_name_set(d, name); |
492 | b->dirty = true; /* need to sync block */ |
492 | b->dirty = true; /* need to sync block */ |
493 | block_put(b); |
493 | block_put(b); |
494 | fibril_mutex_unlock(&parentp->idx->lock); |
494 | fibril_mutex_unlock(&parentp->idx->lock); |
495 | 495 | ||
496 | fibril_mutex_lock(&childp->idx->lock); |
496 | fibril_mutex_lock(&childp->idx->lock); |
497 | 497 | ||
498 | /* |
498 | /* |
499 | * If possible, create the Sub-directory Identifier Entry and the |
499 | * If possible, create the Sub-directory Identifier Entry and the |
500 | * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries |
500 | * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries |
501 | * are not mandatory according to Standard ECMA-107 and HelenOS VFS does |
501 | * are not mandatory according to Standard ECMA-107 and HelenOS VFS does |
502 | * not use them anyway, so this is rather a sign of our good will. |
502 | * not use them anyway, so this is rather a sign of our good will. |
503 | */ |
503 | */ |
504 | b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE); |
504 | b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE); |
505 | d = (fat_dentry_t *)b->data; |
505 | d = (fat_dentry_t *)b->data; |
506 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
506 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
507 | str_cmp(d->name, FAT_NAME_DOT) == 0) { |
507 | str_cmp(d->name, FAT_NAME_DOT) == 0) { |
508 | memset(d, 0, sizeof(fat_dentry_t)); |
508 | memset(d, 0, sizeof(fat_dentry_t)); |
509 | str_cpy(d->name, 8, FAT_NAME_DOT); |
509 | str_cpy(d->name, 8, FAT_NAME_DOT); |
510 | str_cpy(d->ext, 3, FAT_EXT_PAD); |
510 | str_cpy(d->ext, 3, FAT_EXT_PAD); |
511 | d->attr = FAT_ATTR_SUBDIR; |
511 | d->attr = FAT_ATTR_SUBDIR; |
512 | d->firstc = host2uint16_t_le(childp->firstc); |
512 | d->firstc = host2uint16_t_le(childp->firstc); |
513 | /* TODO: initialize also the date/time members. */ |
513 | /* TODO: initialize also the date/time members. */ |
514 | } |
514 | } |
515 | d++; |
515 | d++; |
516 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
516 | if (fat_classify_dentry(d) == FAT_DENTRY_LAST || |
517 | str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) { |
517 | str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) { |
518 | memset(d, 0, sizeof(fat_dentry_t)); |
518 | memset(d, 0, sizeof(fat_dentry_t)); |
519 | str_cpy(d->name, 8, FAT_NAME_DOT_DOT); |
519 | str_cpy(d->name, 8, FAT_NAME_DOT_DOT); |
520 | str_cpy(d->ext, 3, FAT_EXT_PAD); |
520 | str_cpy(d->ext, 3, FAT_EXT_PAD); |
521 | d->attr = FAT_ATTR_SUBDIR; |
521 | d->attr = FAT_ATTR_SUBDIR; |
522 | d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? |
522 | d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? |
523 | host2uint16_t_le(FAT_CLST_RES0) : |
523 | host2uint16_t_le(FAT_CLST_RES0) : |
524 | host2uint16_t_le(parentp->firstc); |
524 | host2uint16_t_le(parentp->firstc); |
525 | /* TODO: initialize also the date/time members. */ |
525 | /* TODO: initialize also the date/time members. */ |
526 | } |
526 | } |
527 | b->dirty = true; /* need to sync block */ |
527 | b->dirty = true; /* need to sync block */ |
528 | block_put(b); |
528 | block_put(b); |
529 | 529 | ||
530 | childp->idx->pfc = parentp->firstc; |
530 | childp->idx->pfc = parentp->firstc; |
531 | childp->idx->pdi = i * dps + j; |
531 | childp->idx->pdi = i * dps + j; |
532 | fibril_mutex_unlock(&childp->idx->lock); |
532 | fibril_mutex_unlock(&childp->idx->lock); |
533 | 533 | ||
534 | fibril_mutex_lock(&childp->lock); |
534 | fibril_mutex_lock(&childp->lock); |
535 | childp->lnkcnt = 1; |
535 | childp->lnkcnt = 1; |
536 | childp->dirty = true; /* need to sync node */ |
536 | childp->dirty = true; /* need to sync node */ |
537 | fibril_mutex_unlock(&childp->lock); |
537 | fibril_mutex_unlock(&childp->lock); |
538 | 538 | ||
539 | /* |
539 | /* |
540 | * Hash in the index structure into the position hash. |
540 | * Hash in the index structure into the position hash. |
541 | */ |
541 | */ |
542 | fat_idx_hashin(childp->idx); |
542 | fat_idx_hashin(childp->idx); |
543 | 543 | ||
544 | return EOK; |
544 | return EOK; |
545 | } |
545 | } |
546 | 546 | ||
547 | int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm) |
547 | int fat_unlink(fs_node_t *pfn, fs_node_t *cfn, const char *nm) |
548 | { |
548 | { |
549 | fat_node_t *parentp = FAT_NODE(pfn); |
549 | fat_node_t *parentp = FAT_NODE(pfn); |
550 | fat_node_t *childp = FAT_NODE(cfn); |
550 | fat_node_t *childp = FAT_NODE(cfn); |
551 | fat_bs_t *bs; |
551 | fat_bs_t *bs; |
552 | fat_dentry_t *d; |
552 | fat_dentry_t *d; |
553 | uint16_t bps; |
553 | uint16_t bps; |
554 | block_t *b; |
554 | block_t *b; |
555 | 555 | ||
556 | if (!parentp) |
556 | if (!parentp) |
557 | return EBUSY; |
557 | return EBUSY; |
558 | 558 | ||
559 | if (fat_has_children(cfn)) |
559 | if (fat_has_children(cfn)) |
560 | return ENOTEMPTY; |
560 | return ENOTEMPTY; |
561 | 561 | ||
562 | fibril_mutex_lock(&parentp->lock); |
562 | fibril_mutex_lock(&parentp->lock); |
563 | fibril_mutex_lock(&childp->lock); |
563 | fibril_mutex_lock(&childp->lock); |
564 | assert(childp->lnkcnt == 1); |
564 | assert(childp->lnkcnt == 1); |
565 | fibril_mutex_lock(&childp->idx->lock); |
565 | fibril_mutex_lock(&childp->idx->lock); |
566 | bs = block_bb_get(childp->idx->dev_handle); |
566 | bs = block_bb_get(childp->idx->dev_handle); |
567 | bps = uint16_t_le2host(bs->bps); |
567 | bps = uint16_t_le2host(bs->bps); |
568 | 568 | ||
569 | b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc, |
569 | b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc, |
570 | (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, |
570 | (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, |
571 | BLOCK_FLAGS_NONE); |
571 | BLOCK_FLAGS_NONE); |
572 | d = (fat_dentry_t *)b->data + |
572 | d = (fat_dentry_t *)b->data + |
573 | (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); |
573 | (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); |
574 | /* mark the dentry as not-currently-used */ |
574 | /* mark the dentry as not-currently-used */ |
575 | d->name[0] = FAT_DENTRY_ERASED; |
575 | d->name[0] = FAT_DENTRY_ERASED; |
576 | b->dirty = true; /* need to sync block */ |
576 | b->dirty = true; /* need to sync block */ |
577 | block_put(b); |
577 | block_put(b); |
578 | 578 | ||
579 | /* remove the index structure from the position hash */ |
579 | /* remove the index structure from the position hash */ |
580 | fat_idx_hashout(childp->idx); |
580 | fat_idx_hashout(childp->idx); |
581 | /* clear position information */ |
581 | /* clear position information */ |
582 | childp->idx->pfc = FAT_CLST_RES0; |
582 | childp->idx->pfc = FAT_CLST_RES0; |
583 | childp->idx->pdi = 0; |
583 | childp->idx->pdi = 0; |
584 | fibril_mutex_unlock(&childp->idx->lock); |
584 | fibril_mutex_unlock(&childp->idx->lock); |
585 | childp->lnkcnt = 0; |
585 | childp->lnkcnt = 0; |
586 | childp->dirty = true; |
586 | childp->dirty = true; |
587 | fibril_mutex_unlock(&childp->lock); |
587 | fibril_mutex_unlock(&childp->lock); |
588 | fibril_mutex_unlock(&parentp->lock); |
588 | fibril_mutex_unlock(&parentp->lock); |
589 | 589 | ||
590 | return EOK; |
590 | return EOK; |
591 | } |
591 | } |
592 | 592 | ||
593 | fs_node_t *fat_match(fs_node_t *pfn, const char *component) |
593 | fs_node_t *fat_match(fs_node_t *pfn, const char *component) |
594 | { |
594 | { |
595 | fat_bs_t *bs; |
595 | fat_bs_t *bs; |
596 | fat_node_t *parentp = FAT_NODE(pfn); |
596 | fat_node_t *parentp = FAT_NODE(pfn); |
597 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
597 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
598 | unsigned i, j; |
598 | unsigned i, j; |
599 | unsigned bps; /* bytes per sector */ |
599 | unsigned bps; /* bytes per sector */ |
600 | unsigned dps; /* dentries per sector */ |
600 | unsigned dps; /* dentries per sector */ |
601 | unsigned blocks; |
601 | unsigned blocks; |
602 | fat_dentry_t *d; |
602 | fat_dentry_t *d; |
603 | block_t *b; |
603 | block_t *b; |
604 | 604 | ||
605 | fibril_mutex_lock(&parentp->idx->lock); |
605 | fibril_mutex_lock(&parentp->idx->lock); |
606 | bs = block_bb_get(parentp->idx->dev_handle); |
606 | bs = block_bb_get(parentp->idx->dev_handle); |
607 | bps = uint16_t_le2host(bs->bps); |
607 | bps = uint16_t_le2host(bs->bps); |
608 | dps = bps / sizeof(fat_dentry_t); |
608 | dps = bps / sizeof(fat_dentry_t); |
609 | blocks = parentp->size / bps; |
609 | blocks = parentp->size / bps; |
610 | for (i = 0; i < blocks; i++) { |
610 | for (i = 0; i < blocks; i++) { |
611 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
611 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
612 | for (j = 0; j < dps; j++) { |
612 | for (j = 0; j < dps; j++) { |
613 | d = ((fat_dentry_t *)b->data) + j; |
613 | d = ((fat_dentry_t *)b->data) + j; |
614 | switch (fat_classify_dentry(d)) { |
614 | switch (fat_classify_dentry(d)) { |
615 | case FAT_DENTRY_SKIP: |
615 | case FAT_DENTRY_SKIP: |
616 | case FAT_DENTRY_FREE: |
616 | case FAT_DENTRY_FREE: |
617 | continue; |
617 | continue; |
618 | case FAT_DENTRY_LAST: |
618 | case FAT_DENTRY_LAST: |
619 | block_put(b); |
619 | block_put(b); |
620 | fibril_mutex_unlock(&parentp->idx->lock); |
620 | fibril_mutex_unlock(&parentp->idx->lock); |
621 | return NULL; |
621 | return NULL; |
622 | default: |
622 | default: |
623 | case FAT_DENTRY_VALID: |
623 | case FAT_DENTRY_VALID: |
624 | fat_dentry_name_get(d, name); |
624 | fat_dentry_name_get(d, name); |
625 | break; |
625 | break; |
626 | } |
626 | } |
627 | if (fat_dentry_namecmp(name, component) == 0) { |
627 | if (fat_dentry_namecmp(name, component) == 0) { |
628 | /* hit */ |
628 | /* hit */ |
629 | fat_node_t *nodep; |
629 | fat_node_t *nodep; |
630 | /* |
630 | /* |
631 | * Assume tree hierarchy for locking. We |
631 | * Assume tree hierarchy for locking. We |
632 | * already have the parent and now we are going |
632 | * already have the parent and now we are going |
633 | * to lock the child. Never lock in the oposite |
633 | * to lock the child. Never lock in the oposite |
634 | * order. |
634 | * order. |
635 | */ |
635 | */ |
636 | fat_idx_t *idx = fat_idx_get_by_pos( |
636 | fat_idx_t *idx = fat_idx_get_by_pos( |
637 | parentp->idx->dev_handle, parentp->firstc, |
637 | parentp->idx->dev_handle, parentp->firstc, |
638 | i * dps + j); |
638 | i * dps + j); |
639 | fibril_mutex_unlock(&parentp->idx->lock); |
639 | fibril_mutex_unlock(&parentp->idx->lock); |
640 | if (!idx) { |
640 | if (!idx) { |
641 | /* |
641 | /* |
642 | * Can happen if memory is low or if we |
642 | * Can happen if memory is low or if we |
643 | * run out of 32-bit indices. |
643 | * run out of 32-bit indices. |
644 | */ |
644 | */ |
645 | block_put(b); |
645 | block_put(b); |
646 | return NULL; |
646 | return NULL; |
647 | } |
647 | } |
648 | nodep = fat_node_get_core(idx); |
648 | nodep = fat_node_get_core(idx); |
649 | fibril_mutex_unlock(&idx->lock); |
649 | fibril_mutex_unlock(&idx->lock); |
650 | block_put(b); |
650 | block_put(b); |
651 | return FS_NODE(nodep); |
651 | return FS_NODE(nodep); |
652 | } |
652 | } |
653 | } |
653 | } |
654 | block_put(b); |
654 | block_put(b); |
655 | } |
655 | } |
656 | 656 | ||
657 | fibril_mutex_unlock(&parentp->idx->lock); |
657 | fibril_mutex_unlock(&parentp->idx->lock); |
658 | return NULL; |
658 | return NULL; |
659 | } |
659 | } |
660 | 660 | ||
661 | fs_index_t fat_index_get(fs_node_t *fn) |
661 | fs_index_t fat_index_get(fs_node_t *fn) |
662 | { |
662 | { |
663 | return FAT_NODE(fn)->idx->index; |
663 | return FAT_NODE(fn)->idx->index; |
664 | } |
664 | } |
665 | 665 | ||
666 | size_t fat_size_get(fs_node_t *fn) |
666 | size_t fat_size_get(fs_node_t *fn) |
667 | { |
667 | { |
668 | return FAT_NODE(fn)->size; |
668 | return FAT_NODE(fn)->size; |
669 | } |
669 | } |
670 | 670 | ||
671 | unsigned fat_lnkcnt_get(fs_node_t *fn) |
671 | unsigned fat_lnkcnt_get(fs_node_t *fn) |
672 | { |
672 | { |
673 | return FAT_NODE(fn)->lnkcnt; |
673 | return FAT_NODE(fn)->lnkcnt; |
674 | } |
674 | } |
675 | 675 | ||
676 | bool fat_has_children(fs_node_t *fn) |
676 | bool fat_has_children(fs_node_t *fn) |
677 | { |
677 | { |
678 | fat_bs_t *bs; |
678 | fat_bs_t *bs; |
679 | fat_node_t *nodep = FAT_NODE(fn); |
679 | fat_node_t *nodep = FAT_NODE(fn); |
680 | unsigned bps; |
680 | unsigned bps; |
681 | unsigned dps; |
681 | unsigned dps; |
682 | unsigned blocks; |
682 | unsigned blocks; |
683 | block_t *b; |
683 | block_t *b; |
684 | unsigned i, j; |
684 | unsigned i, j; |
685 | 685 | ||
686 | if (nodep->type != FAT_DIRECTORY) |
686 | if (nodep->type != FAT_DIRECTORY) |
687 | return false; |
687 | return false; |
688 | 688 | ||
689 | fibril_mutex_lock(&nodep->idx->lock); |
689 | fibril_mutex_lock(&nodep->idx->lock); |
690 | bs = block_bb_get(nodep->idx->dev_handle); |
690 | bs = block_bb_get(nodep->idx->dev_handle); |
691 | bps = uint16_t_le2host(bs->bps); |
691 | bps = uint16_t_le2host(bs->bps); |
692 | dps = bps / sizeof(fat_dentry_t); |
692 | dps = bps / sizeof(fat_dentry_t); |
693 | 693 | ||
694 | blocks = nodep->size / bps; |
694 | blocks = nodep->size / bps; |
695 | 695 | ||
696 | for (i = 0; i < blocks; i++) { |
696 | for (i = 0; i < blocks; i++) { |
697 | fat_dentry_t *d; |
697 | fat_dentry_t *d; |
698 | 698 | ||
699 | b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); |
699 | b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); |
700 | for (j = 0; j < dps; j++) { |
700 | for (j = 0; j < dps; j++) { |
701 | d = ((fat_dentry_t *)b->data) + j; |
701 | d = ((fat_dentry_t *)b->data) + j; |
702 | switch (fat_classify_dentry(d)) { |
702 | switch (fat_classify_dentry(d)) { |
703 | case FAT_DENTRY_SKIP: |
703 | case FAT_DENTRY_SKIP: |
704 | case FAT_DENTRY_FREE: |
704 | case FAT_DENTRY_FREE: |
705 | continue; |
705 | continue; |
706 | case FAT_DENTRY_LAST: |
706 | case FAT_DENTRY_LAST: |
707 | block_put(b); |
707 | block_put(b); |
708 | fibril_mutex_unlock(&nodep->idx->lock); |
708 | fibril_mutex_unlock(&nodep->idx->lock); |
709 | return false; |
709 | return false; |
710 | default: |
710 | default: |
711 | case FAT_DENTRY_VALID: |
711 | case FAT_DENTRY_VALID: |
712 | block_put(b); |
712 | block_put(b); |
713 | fibril_mutex_unlock(&nodep->idx->lock); |
713 | fibril_mutex_unlock(&nodep->idx->lock); |
714 | return true; |
714 | return true; |
715 | } |
715 | } |
716 | block_put(b); |
716 | block_put(b); |
717 | fibril_mutex_unlock(&nodep->idx->lock); |
717 | fibril_mutex_unlock(&nodep->idx->lock); |
718 | return true; |
718 | return true; |
719 | } |
719 | } |
720 | block_put(b); |
720 | block_put(b); |
721 | } |
721 | } |
722 | 722 | ||
723 | fibril_mutex_unlock(&nodep->idx->lock); |
723 | fibril_mutex_unlock(&nodep->idx->lock); |
724 | return false; |
724 | return false; |
725 | } |
725 | } |
726 | 726 | ||
727 | fs_node_t *fat_root_get(dev_handle_t dev_handle) |
727 | fs_node_t *fat_root_get(dev_handle_t dev_handle) |
728 | { |
728 | { |
729 | return fat_node_get(dev_handle, 0); |
729 | return fat_node_get(dev_handle, 0); |
730 | } |
730 | } |
731 | 731 | ||
732 | char fat_plb_get_char(unsigned pos) |
732 | char fat_plb_get_char(unsigned pos) |
733 | { |
733 | { |
734 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
734 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
735 | } |
735 | } |
736 | 736 | ||
737 | bool fat_is_directory(fs_node_t *fn) |
737 | bool fat_is_directory(fs_node_t *fn) |
738 | { |
738 | { |
739 | return FAT_NODE(fn)->type == FAT_DIRECTORY; |
739 | return FAT_NODE(fn)->type == FAT_DIRECTORY; |
740 | } |
740 | } |
741 | 741 | ||
742 | bool fat_is_file(fs_node_t *fn) |
742 | bool fat_is_file(fs_node_t *fn) |
743 | { |
743 | { |
744 | return FAT_NODE(fn)->type == FAT_FILE; |
744 | return FAT_NODE(fn)->type == FAT_FILE; |
745 | } |
745 | } |
746 | 746 | ||
747 | /** libfs operations */ |
747 | /** libfs operations */ |
748 | libfs_ops_t fat_libfs_ops = { |
748 | libfs_ops_t fat_libfs_ops = { |
749 | .match = fat_match, |
749 | .match = fat_match, |
750 | .node_get = fat_node_get, |
750 | .node_get = fat_node_get, |
751 | .node_put = fat_node_put, |
751 | .node_put = fat_node_put, |
752 | .create = fat_create_node, |
752 | .create = fat_create_node, |
753 | .destroy = fat_destroy_node, |
753 | .destroy = fat_destroy_node, |
754 | .link = fat_link, |
754 | .link = fat_link, |
755 | .unlink = fat_unlink, |
755 | .unlink = fat_unlink, |
756 | .index_get = fat_index_get, |
756 | .index_get = fat_index_get, |
757 | .size_get = fat_size_get, |
757 | .size_get = fat_size_get, |
758 | .lnkcnt_get = fat_lnkcnt_get, |
758 | .lnkcnt_get = fat_lnkcnt_get, |
759 | .has_children = fat_has_children, |
759 | .has_children = fat_has_children, |
760 | .root_get = fat_root_get, |
760 | .root_get = fat_root_get, |
761 | .plb_get_char = fat_plb_get_char, |
761 | .plb_get_char = fat_plb_get_char, |
762 | .is_directory = fat_is_directory, |
762 | .is_directory = fat_is_directory, |
763 | .is_file = fat_is_file |
763 | .is_file = fat_is_file |
764 | }; |
764 | }; |
765 | 765 | ||
766 | /* |
766 | /* |
767 | * VFS operations. |
767 | * VFS operations. |
768 | */ |
768 | */ |
769 | 769 | ||
770 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
770 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
771 | { |
771 | { |
772 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
772 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
773 | enum cache_mode cmode; |
773 | enum cache_mode cmode; |
774 | fat_bs_t *bs; |
774 | fat_bs_t *bs; |
775 | uint16_t bps; |
775 | uint16_t bps; |
776 | uint16_t rde; |
776 | uint16_t rde; |
777 | int rc; |
777 | int rc; |
778 | 778 | ||
779 | /* accept the mount options */ |
779 | /* accept the mount options */ |
780 | ipc_callid_t callid; |
780 | ipc_callid_t callid; |
781 | size_t size; |
781 | size_t size; |
782 | if (!ipc_data_write_receive(&callid, &size)) { |
782 | if (!ipc_data_write_receive(&callid, &size)) { |
783 | ipc_answer_0(callid, EINVAL); |
783 | ipc_answer_0(callid, EINVAL); |
784 | ipc_answer_0(rid, EINVAL); |
784 | ipc_answer_0(rid, EINVAL); |
785 | return; |
785 | return; |
786 | } |
786 | } |
787 | char *opts = malloc(size + 1); |
787 | char *opts = malloc(size + 1); |
788 | if (!opts) { |
788 | if (!opts) { |
789 | ipc_answer_0(callid, ENOMEM); |
789 | ipc_answer_0(callid, ENOMEM); |
790 | ipc_answer_0(rid, ENOMEM); |
790 | ipc_answer_0(rid, ENOMEM); |
791 | return; |
791 | return; |
792 | } |
792 | } |
793 | ipcarg_t retval = ipc_data_write_finalize(callid, opts, size); |
793 | ipcarg_t retval = ipc_data_write_finalize(callid, opts, size); |
794 | if (retval != EOK) { |
794 | if (retval != EOK) { |
795 | ipc_answer_0(rid, retval); |
795 | ipc_answer_0(rid, retval); |
796 | free(opts); |
796 | free(opts); |
797 | return; |
797 | return; |
798 | } |
798 | } |
799 | opts[size] = '\0'; |
799 | opts[size] = '\0'; |
800 | 800 | ||
801 | /* Check for option enabling write through. */ |
801 | /* Check for option enabling write through. */ |
802 | if (str_cmp(opts, "wtcache") == 0) |
802 | if (str_cmp(opts, "wtcache") == 0) |
803 | cmode = CACHE_MODE_WT; |
803 | cmode = CACHE_MODE_WT; |
804 | else |
804 | else |
805 | cmode = CACHE_MODE_WB; |
805 | cmode = CACHE_MODE_WB; |
806 | 806 | ||
807 | /* initialize libblock */ |
807 | /* initialize libblock */ |
808 | rc = block_init(dev_handle, BS_SIZE); |
808 | rc = block_init(dev_handle, BS_SIZE); |
809 | if (rc != EOK) { |
809 | if (rc != EOK) { |
810 | ipc_answer_0(rid, rc); |
810 | ipc_answer_0(rid, rc); |
811 | return; |
811 | return; |
812 | } |
812 | } |
813 | 813 | ||
814 | /* prepare the boot block */ |
814 | /* prepare the boot block */ |
815 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
815 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
816 | if (rc != EOK) { |
816 | if (rc != EOK) { |
817 | block_fini(dev_handle); |
817 | block_fini(dev_handle); |
818 | ipc_answer_0(rid, rc); |
818 | ipc_answer_0(rid, rc); |
819 | return; |
819 | return; |
820 | } |
820 | } |
821 | 821 | ||
822 | /* get the buffer with the boot sector */ |
822 | /* get the buffer with the boot sector */ |
823 | bs = block_bb_get(dev_handle); |
823 | bs = block_bb_get(dev_handle); |
824 | 824 | ||
825 | /* Read the number of root directory entries. */ |
825 | /* Read the number of root directory entries. */ |
826 | bps = uint16_t_le2host(bs->bps); |
826 | bps = uint16_t_le2host(bs->bps); |
827 | rde = uint16_t_le2host(bs->root_ent_max); |
827 | rde = uint16_t_le2host(bs->root_ent_max); |
828 | 828 | ||
829 | if (bps != BS_SIZE) { |
829 | if (bps != BS_SIZE) { |
830 | block_fini(dev_handle); |
830 | block_fini(dev_handle); |
831 | ipc_answer_0(rid, ENOTSUP); |
831 | ipc_answer_0(rid, ENOTSUP); |
832 | return; |
832 | return; |
833 | } |
833 | } |
834 | 834 | ||
835 | /* Initialize the block cache */ |
835 | /* Initialize the block cache */ |
836 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode); |
836 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode); |
837 | if (rc != EOK) { |
837 | if (rc != EOK) { |
838 | block_fini(dev_handle); |
838 | block_fini(dev_handle); |
839 | ipc_answer_0(rid, rc); |
839 | ipc_answer_0(rid, rc); |
840 | return; |
840 | return; |
841 | } |
841 | } |
842 | 842 | ||
843 | rc = fat_idx_init_by_dev_handle(dev_handle); |
843 | rc = fat_idx_init_by_dev_handle(dev_handle); |
844 | if (rc != EOK) { |
844 | if (rc != EOK) { |
845 | block_fini(dev_handle); |
845 | block_fini(dev_handle); |
846 | ipc_answer_0(rid, rc); |
846 | ipc_answer_0(rid, rc); |
847 | return; |
847 | return; |
848 | } |
848 | } |
849 | 849 | ||
850 | /* Initialize the root node. */ |
850 | /* Initialize the root node. */ |
851 | fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t)); |
851 | fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t)); |
852 | if (!rfn) { |
852 | if (!rfn) { |
853 | block_fini(dev_handle); |
853 | block_fini(dev_handle); |
854 | fat_idx_fini_by_dev_handle(dev_handle); |
854 | fat_idx_fini_by_dev_handle(dev_handle); |
855 | ipc_answer_0(rid, ENOMEM); |
855 | ipc_answer_0(rid, ENOMEM); |
856 | return; |
856 | return; |
857 | } |
857 | } |
858 | fs_node_initialize(rfn); |
858 | fs_node_initialize(rfn); |
859 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
859 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
860 | if (!rootp) { |
860 | if (!rootp) { |
861 | free(rfn); |
861 | free(rfn); |
862 | block_fini(dev_handle); |
862 | block_fini(dev_handle); |
863 | fat_idx_fini_by_dev_handle(dev_handle); |
863 | fat_idx_fini_by_dev_handle(dev_handle); |
864 | ipc_answer_0(rid, ENOMEM); |
864 | ipc_answer_0(rid, ENOMEM); |
865 | return; |
865 | return; |
866 | } |
866 | } |
867 | fat_node_initialize(rootp); |
867 | fat_node_initialize(rootp); |
868 | 868 | ||
869 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
869 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
870 | if (!ridxp) { |
870 | if (!ridxp) { |
871 | free(rfn); |
871 | free(rfn); |
872 | free(rootp); |
872 | free(rootp); |
873 | block_fini(dev_handle); |
873 | block_fini(dev_handle); |
874 | fat_idx_fini_by_dev_handle(dev_handle); |
874 | fat_idx_fini_by_dev_handle(dev_handle); |
875 | ipc_answer_0(rid, ENOMEM); |
875 | ipc_answer_0(rid, ENOMEM); |
876 | return; |
876 | return; |
877 | } |
877 | } |
878 | assert(ridxp->index == 0); |
878 | assert(ridxp->index == 0); |
879 | /* ridxp->lock held */ |
879 | /* ridxp->lock held */ |
880 | 880 | ||
881 | rootp->type = FAT_DIRECTORY; |
881 | rootp->type = FAT_DIRECTORY; |
882 | rootp->firstc = FAT_CLST_ROOT; |
882 | rootp->firstc = FAT_CLST_ROOT; |
883 | rootp->refcnt = 1; |
883 | rootp->refcnt = 1; |
884 | rootp->lnkcnt = 0; /* FS root is not linked */ |
884 | rootp->lnkcnt = 0; /* FS root is not linked */ |
885 | rootp->size = rde * sizeof(fat_dentry_t); |
885 | rootp->size = rde * sizeof(fat_dentry_t); |
886 | rootp->idx = ridxp; |
886 | rootp->idx = ridxp; |
887 | ridxp->nodep = rootp; |
887 | ridxp->nodep = rootp; |
888 | rootp->bp = rfn; |
888 | rootp->bp = rfn; |
889 | rfn->data = rootp; |
889 | rfn->data = rootp; |
890 | 890 | ||
891 | fibril_mutex_unlock(&ridxp->lock); |
891 | fibril_mutex_unlock(&ridxp->lock); |
892 | 892 | ||
893 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
893 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
894 | } |
894 | } |
895 | 895 | ||
896 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
896 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
897 | { |
897 | { |
898 | libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
898 | libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
899 | } |
899 | } |
900 | 900 | ||
901 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
901 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
902 | { |
902 | { |
903 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
903 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
904 | } |
904 | } |
905 | 905 | ||
906 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
906 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
907 | { |
907 | { |
908 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
908 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
909 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
909 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
910 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
910 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
911 | fs_node_t *fn = fat_node_get(dev_handle, index); |
911 | fs_node_t *fn = fat_node_get(dev_handle, index); |
912 | fat_node_t *nodep; |
912 | fat_node_t *nodep; |
913 | fat_bs_t *bs; |
913 | fat_bs_t *bs; |
914 | uint16_t bps; |
914 | uint16_t bps; |
915 | size_t bytes; |
915 | size_t bytes; |
916 | block_t *b; |
916 | block_t *b; |
917 | 917 | ||
918 | if (!fn) { |
918 | if (!fn) { |
919 | ipc_answer_0(rid, ENOENT); |
919 | ipc_answer_0(rid, ENOENT); |
920 | return; |
920 | return; |
921 | } |
921 | } |
922 | nodep = FAT_NODE(fn); |
922 | nodep = FAT_NODE(fn); |
923 | 923 | ||
924 | ipc_callid_t callid; |
924 | ipc_callid_t callid; |
925 | size_t len; |
925 | size_t len; |
926 | if (!ipc_data_read_receive(&callid, &len)) { |
926 | if (!ipc_data_read_receive(&callid, &len)) { |
927 | fat_node_put(fn); |
927 | fat_node_put(fn); |
928 | ipc_answer_0(callid, EINVAL); |
928 | ipc_answer_0(callid, EINVAL); |
929 | ipc_answer_0(rid, EINVAL); |
929 | ipc_answer_0(rid, EINVAL); |
930 | return; |
930 | return; |
931 | } |
931 | } |
932 | 932 | ||
933 | bs = block_bb_get(dev_handle); |
933 | bs = block_bb_get(dev_handle); |
934 | bps = uint16_t_le2host(bs->bps); |
934 | bps = uint16_t_le2host(bs->bps); |
935 | 935 | ||
936 | if (nodep->type == FAT_FILE) { |
936 | if (nodep->type == FAT_FILE) { |
937 | /* |
937 | /* |
938 | * Our strategy for regular file reads is to read one block at |
938 | * Our strategy for regular file reads is to read one block at |
939 | * most and make use of the possibility to return less data than |
939 | * most and make use of the possibility to return less data than |
940 | * requested. This keeps the code very simple. |
940 | * requested. This keeps the code very simple. |
941 | */ |
941 | */ |
942 | if (pos >= nodep->size) { |
942 | if (pos >= nodep->size) { |
943 | /* reading beyond the EOF */ |
943 | /* reading beyond the EOF */ |
944 | bytes = 0; |
944 | bytes = 0; |
945 | (void) ipc_data_read_finalize(callid, NULL, 0); |
945 | (void) ipc_data_read_finalize(callid, NULL, 0); |
946 | } else { |
946 | } else { |
947 | bytes = min(len, bps - pos % bps); |
947 | bytes = min(len, bps - pos % bps); |
948 | bytes = min(bytes, nodep->size - pos); |
948 | bytes = min(bytes, nodep->size - pos); |
949 | b = fat_block_get(bs, nodep, pos / bps, |
949 | b = fat_block_get(bs, nodep, pos / bps, |
950 | BLOCK_FLAGS_NONE); |
950 | BLOCK_FLAGS_NONE); |
951 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
951 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
952 | bytes); |
952 | bytes); |
953 | block_put(b); |
953 | block_put(b); |
954 | } |
954 | } |
955 | } else { |
955 | } else { |
956 | unsigned bnum; |
956 | unsigned bnum; |
957 | off_t spos = pos; |
957 | off_t spos = pos; |
958 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
958 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
959 | fat_dentry_t *d; |
959 | fat_dentry_t *d; |
960 | 960 | ||
961 | assert(nodep->type == FAT_DIRECTORY); |
961 | assert(nodep->type == FAT_DIRECTORY); |
962 | assert(nodep->size % bps == 0); |
962 | assert(nodep->size % bps == 0); |
963 | assert(bps % sizeof(fat_dentry_t) == 0); |
963 | assert(bps % sizeof(fat_dentry_t) == 0); |
964 | 964 | ||
965 | /* |
965 | /* |
966 | * Our strategy for readdir() is to use the position pointer as |
966 | * Our strategy for readdir() is to use the position pointer as |
967 | * an index into the array of all dentries. On entry, it points |
967 | * an index into the array of all dentries. On entry, it points |
968 | * to the first unread dentry. If we skip any dentries, we bump |
968 | * to the first unread dentry. If we skip any dentries, we bump |
969 | * the position pointer accordingly. |
969 | * the position pointer accordingly. |
970 | */ |
970 | */ |
971 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
971 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
972 | while (bnum < nodep->size / bps) { |
972 | while (bnum < nodep->size / bps) { |
973 | off_t o; |
973 | off_t o; |
974 | 974 | ||
975 | b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); |
975 | b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); |
976 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
976 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
977 | o < bps / sizeof(fat_dentry_t); |
977 | o < bps / sizeof(fat_dentry_t); |
978 | o++, pos++) { |
978 | o++, pos++) { |
979 | d = ((fat_dentry_t *)b->data) + o; |
979 | d = ((fat_dentry_t *)b->data) + o; |
980 | switch (fat_classify_dentry(d)) { |
980 | switch (fat_classify_dentry(d)) { |
981 | case FAT_DENTRY_SKIP: |
981 | case FAT_DENTRY_SKIP: |
982 | case FAT_DENTRY_FREE: |
982 | case FAT_DENTRY_FREE: |
983 | continue; |
983 | continue; |
984 | case FAT_DENTRY_LAST: |
984 | case FAT_DENTRY_LAST: |
985 | block_put(b); |
985 | block_put(b); |
986 | goto miss; |
986 | goto miss; |
987 | default: |
987 | default: |
988 | case FAT_DENTRY_VALID: |
988 | case FAT_DENTRY_VALID: |
989 | fat_dentry_name_get(d, name); |
989 | fat_dentry_name_get(d, name); |
990 | block_put(b); |
990 | block_put(b); |
991 | goto hit; |
991 | goto hit; |
992 | } |
992 | } |
993 | } |
993 | } |
994 | block_put(b); |
994 | block_put(b); |
995 | bnum++; |
995 | bnum++; |
996 | } |
996 | } |
997 | miss: |
997 | miss: |
998 | fat_node_put(fn); |
998 | fat_node_put(fn); |
999 | ipc_answer_0(callid, ENOENT); |
999 | ipc_answer_0(callid, ENOENT); |
1000 | ipc_answer_1(rid, ENOENT, 0); |
1000 | ipc_answer_1(rid, ENOENT, 0); |
1001 | return; |
1001 | return; |
1002 | hit: |
1002 | hit: |
1003 | (void) ipc_data_read_finalize(callid, name, str_size(name) + 1); |
1003 | (void) ipc_data_read_finalize(callid, name, str_size(name) + 1); |
1004 | bytes = (pos - spos) + 1; |
1004 | bytes = (pos - spos) + 1; |
1005 | } |
1005 | } |
1006 | 1006 | ||
1007 | fat_node_put(fn); |
1007 | fat_node_put(fn); |
1008 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
1008 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
1009 | } |
1009 | } |
1010 | 1010 | ||
1011 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
1011 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
1012 | { |
1012 | { |
1013 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1013 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1014 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1014 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1015 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
1015 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
1016 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1016 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1017 | fat_node_t *nodep; |
1017 | fat_node_t *nodep; |
1018 | fat_bs_t *bs; |
1018 | fat_bs_t *bs; |
1019 | size_t bytes; |
1019 | size_t bytes; |
1020 | block_t *b; |
1020 | block_t *b; |
1021 | uint16_t bps; |
1021 | uint16_t bps; |
1022 | unsigned spc; |
1022 | unsigned spc; |
1023 | unsigned bpc; /* bytes per cluster */ |
1023 | unsigned bpc; /* bytes per cluster */ |
1024 | off_t boundary; |
1024 | off_t boundary; |
1025 | int flags = BLOCK_FLAGS_NONE; |
1025 | int flags = BLOCK_FLAGS_NONE; |
1026 | 1026 | ||
1027 | if (!fn) { |
1027 | if (!fn) { |
1028 | ipc_answer_0(rid, ENOENT); |
1028 | ipc_answer_0(rid, ENOENT); |
1029 | return; |
1029 | return; |
1030 | } |
1030 | } |
1031 | nodep = FAT_NODE(fn); |
1031 | nodep = FAT_NODE(fn); |
1032 | 1032 | ||
1033 | ipc_callid_t callid; |
1033 | ipc_callid_t callid; |
1034 | size_t len; |
1034 | size_t len; |
1035 | if (!ipc_data_write_receive(&callid, &len)) { |
1035 | if (!ipc_data_write_receive(&callid, &len)) { |
1036 | fat_node_put(fn); |
1036 | fat_node_put(fn); |
1037 | ipc_answer_0(callid, EINVAL); |
1037 | ipc_answer_0(callid, EINVAL); |
1038 | ipc_answer_0(rid, EINVAL); |
1038 | ipc_answer_0(rid, EINVAL); |
1039 | return; |
1039 | return; |
1040 | } |
1040 | } |
1041 | 1041 | ||
1042 | bs = block_bb_get(dev_handle); |
1042 | bs = block_bb_get(dev_handle); |
1043 | bps = uint16_t_le2host(bs->bps); |
1043 | bps = uint16_t_le2host(bs->bps); |
1044 | spc = bs->spc; |
1044 | spc = bs->spc; |
1045 | bpc = bps * spc; |
1045 | bpc = bps * spc; |
1046 | 1046 | ||
1047 | /* |
1047 | /* |
1048 | * In all scenarios, we will attempt to write out only one block worth |
1048 | * In all scenarios, we will attempt to write out only one block worth |
1049 | * of data at maximum. There might be some more efficient approaches, |
1049 | * of data at maximum. There might be some more efficient approaches, |
1050 | * but this one greatly simplifies fat_write(). Note that we can afford |
1050 | * but this one greatly simplifies fat_write(). Note that we can afford |
1051 | * to do this because the client must be ready to handle the return |
1051 | * to do this because the client must be ready to handle the return |
1052 | * value signalizing a smaller number of bytes written. |
1052 | * value signalizing a smaller number of bytes written. |
1053 | */ |
1053 | */ |
1054 | bytes = min(len, bps - pos % bps); |
1054 | bytes = min(len, bps - pos % bps); |
1055 | if (bytes == bps) |
1055 | if (bytes == bps) |
1056 | flags |= BLOCK_FLAGS_NOREAD; |
1056 | flags |= BLOCK_FLAGS_NOREAD; |
1057 | 1057 | ||
1058 | boundary = ROUND_UP(nodep->size, bpc); |
1058 | boundary = ROUND_UP(nodep->size, bpc); |
1059 | if (pos < boundary) { |
1059 | if (pos < boundary) { |
1060 | /* |
1060 | /* |
1061 | * This is the easier case - we are either overwriting already |
1061 | * This is the easier case - we are either overwriting already |
1062 | * existing contents or writing behind the EOF, but still within |
1062 | * existing contents or writing behind the EOF, but still within |
1063 | * the limits of the last cluster. The node size may grow to the |
1063 | * the limits of the last cluster. The node size may grow to the |
1064 | * next block size boundary. |
1064 | * next block size boundary. |
1065 | */ |
1065 | */ |
1066 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
1066 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
1067 | b = fat_block_get(bs, nodep, pos / bps, flags); |
1067 | b = fat_block_get(bs, nodep, pos / bps, flags); |
1068 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
1068 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
1069 | bytes); |
1069 | bytes); |
1070 | b->dirty = true; /* need to sync block */ |
1070 | b->dirty = true; /* need to sync block */ |
1071 | block_put(b); |
1071 | block_put(b); |
1072 | if (pos + bytes > nodep->size) { |
1072 | if (pos + bytes > nodep->size) { |
1073 | nodep->size = pos + bytes; |
1073 | nodep->size = pos + bytes; |
1074 | nodep->dirty = true; /* need to sync node */ |
1074 | nodep->dirty = true; /* need to sync node */ |
1075 | } |
1075 | } |
1076 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
1076 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
1077 | fat_node_put(fn); |
1077 | fat_node_put(fn); |
1078 | return; |
1078 | return; |
1079 | } else { |
1079 | } else { |
1080 | /* |
1080 | /* |
1081 | * This is the more difficult case. We must allocate new |
1081 | * This is the more difficult case. We must allocate new |
1082 | * clusters for the node and zero them out. |
1082 | * clusters for the node and zero them out. |
1083 | */ |
1083 | */ |
1084 | int status; |
1084 | int status; |
1085 | unsigned nclsts; |
1085 | unsigned nclsts; |
1086 | fat_cluster_t mcl, lcl; |
1086 | fat_cluster_t mcl, lcl; |
1087 | 1087 | ||
1088 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
1088 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
1089 | /* create an independent chain of nclsts clusters in all FATs */ |
1089 | /* create an independent chain of nclsts clusters in all FATs */ |
1090 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
1090 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
1091 | if (status != EOK) { |
1091 | if (status != EOK) { |
1092 | /* could not allocate a chain of nclsts clusters */ |
1092 | /* could not allocate a chain of nclsts clusters */ |
1093 | fat_node_put(fn); |
1093 | fat_node_put(fn); |
1094 | ipc_answer_0(callid, status); |
1094 | ipc_answer_0(callid, status); |
1095 | ipc_answer_0(rid, status); |
1095 | ipc_answer_0(rid, status); |
1096 | return; |
1096 | return; |
1097 | } |
1097 | } |
1098 | /* zero fill any gaps */ |
1098 | /* zero fill any gaps */ |
1099 | fat_fill_gap(bs, nodep, mcl, pos); |
1099 | fat_fill_gap(bs, nodep, mcl, pos); |
1100 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, |
1100 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, |
1101 | flags); |
1101 | flags); |
1102 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
1102 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
1103 | bytes); |
1103 | bytes); |
1104 | b->dirty = true; /* need to sync block */ |
1104 | b->dirty = true; /* need to sync block */ |
1105 | block_put(b); |
1105 | block_put(b); |
1106 | /* |
1106 | /* |
1107 | * Append the cluster chain starting in mcl to the end of the |
1107 | * Append the cluster chain starting in mcl to the end of the |
1108 | * node's cluster chain. |
1108 | * node's cluster chain. |
1109 | */ |
1109 | */ |
1110 | fat_append_clusters(bs, nodep, mcl); |
1110 | fat_append_clusters(bs, nodep, mcl); |
1111 | nodep->size = pos + bytes; |
1111 | nodep->size = pos + bytes; |
1112 | nodep->dirty = true; /* need to sync node */ |
1112 | nodep->dirty = true; /* need to sync node */ |
1113 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
1113 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
1114 | fat_node_put(fn); |
1114 | fat_node_put(fn); |
1115 | return; |
1115 | return; |
1116 | } |
1116 | } |
1117 | } |
1117 | } |
1118 | 1118 | ||
1119 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
1119 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
1120 | { |
1120 | { |
1121 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1121 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1122 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1122 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1123 | size_t size = (off_t)IPC_GET_ARG3(*request); |
1123 | size_t size = (off_t)IPC_GET_ARG3(*request); |
1124 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1124 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1125 | fat_node_t *nodep; |
1125 | fat_node_t *nodep; |
1126 | fat_bs_t *bs; |
1126 | fat_bs_t *bs; |
1127 | uint16_t bps; |
1127 | uint16_t bps; |
1128 | uint8_t spc; |
1128 | uint8_t spc; |
1129 | unsigned bpc; /* bytes per cluster */ |
1129 | unsigned bpc; /* bytes per cluster */ |
1130 | int rc; |
1130 | int rc; |
1131 | 1131 | ||
1132 | if (!fn) { |
1132 | if (!fn) { |
1133 | ipc_answer_0(rid, ENOENT); |
1133 | ipc_answer_0(rid, ENOENT); |
1134 | return; |
1134 | return; |
1135 | } |
1135 | } |
1136 | nodep = FAT_NODE(fn); |
1136 | nodep = FAT_NODE(fn); |
1137 | 1137 | ||
1138 | bs = block_bb_get(dev_handle); |
1138 | bs = block_bb_get(dev_handle); |
1139 | bps = uint16_t_le2host(bs->bps); |
1139 | bps = uint16_t_le2host(bs->bps); |
1140 | spc = bs->spc; |
1140 | spc = bs->spc; |
1141 | bpc = bps * spc; |
1141 | bpc = bps * spc; |
1142 | 1142 | ||
1143 | if (nodep->size == size) { |
1143 | if (nodep->size == size) { |
1144 | rc = EOK; |
1144 | rc = EOK; |
1145 | } else if (nodep->size < size) { |
1145 | } else if (nodep->size < size) { |
1146 | /* |
1146 | /* |
1147 | * The standard says we have the freedom to grow the node. |
1147 | * The standard says we have the freedom to grow the node. |
1148 | * For now, we simply return an error. |
1148 | * For now, we simply return an error. |
1149 | */ |
1149 | */ |
1150 | rc = EINVAL; |
1150 | rc = EINVAL; |
1151 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
1151 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
1152 | /* |
1152 | /* |
1153 | * The node will be shrunk, but no clusters will be deallocated. |
1153 | * The node will be shrunk, but no clusters will be deallocated. |
1154 | */ |
1154 | */ |
1155 | nodep->size = size; |
1155 | nodep->size = size; |
1156 | nodep->dirty = true; /* need to sync node */ |
1156 | nodep->dirty = true; /* need to sync node */ |
1157 | rc = EOK; |
1157 | rc = EOK; |
1158 | } else { |
1158 | } else { |
1159 | /* |
1159 | /* |
1160 | * The node will be shrunk, clusters will be deallocated. |
1160 | * The node will be shrunk, clusters will be deallocated. |
1161 | */ |
1161 | */ |
1162 | if (size == 0) { |
1162 | if (size == 0) { |
1163 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
1163 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
1164 | } else { |
1164 | } else { |
1165 | fat_cluster_t lastc; |
1165 | fat_cluster_t lastc; |
1166 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
1166 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
1167 | &lastc, (size - 1) / bpc); |
1167 | &lastc, (size - 1) / bpc); |
1168 | fat_chop_clusters(bs, nodep, lastc); |
1168 | fat_chop_clusters(bs, nodep, lastc); |
1169 | } |
1169 | } |
1170 | nodep->size = size; |
1170 | nodep->size = size; |
1171 | nodep->dirty = true; /* need to sync node */ |
1171 | nodep->dirty = true; /* need to sync node */ |
1172 | rc = EOK; |
1172 | rc = EOK; |
1173 | } |
1173 | } |
1174 | fat_node_put(fn); |
1174 | fat_node_put(fn); |
1175 | ipc_answer_0(rid, rc); |
1175 | ipc_answer_0(rid, rc); |
1176 | return; |
1176 | return; |
1177 | } |
1177 | } |
1178 | 1178 | ||
1179 | void fat_close(ipc_callid_t rid, ipc_call_t *request) |
1179 | void fat_close(ipc_callid_t rid, ipc_call_t *request) |
1180 | { |
1180 | { |
1181 | ipc_answer_0(rid, EOK); |
1181 | ipc_answer_0(rid, EOK); |
1182 | } |
1182 | } |
1183 | 1183 | ||
1184 | void fat_destroy(ipc_callid_t rid, ipc_call_t *request) |
1184 | void fat_destroy(ipc_callid_t rid, ipc_call_t *request) |
1185 | { |
1185 | { |
1186 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1186 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
1187 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1187 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
1188 | int rc; |
1188 | int rc; |
1189 | 1189 | ||
1190 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1190 | fs_node_t *fn = fat_node_get(dev_handle, index); |
1191 | if (!fn) { |
1191 | if (!fn) { |
1192 | ipc_answer_0(rid, ENOENT); |
1192 | ipc_answer_0(rid, ENOENT); |
1193 | return; |
1193 | return; |
1194 | } |
1194 | } |
1195 | 1195 | ||
1196 | rc = fat_destroy_node(fn); |
1196 | rc = fat_destroy_node(fn); |
1197 | ipc_answer_0(rid, rc); |
1197 | ipc_answer_0(rid, rc); |
1198 | } |
1198 | } |
1199 | 1199 | ||
1200 | void fat_open_node(ipc_callid_t rid, ipc_call_t *request) |
1200 | void fat_open_node(ipc_callid_t rid, ipc_call_t *request) |
1201 | { |
1201 | { |
1202 | libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
1202 | libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
1203 | } |
1203 | } |
1204 | 1204 | ||
1205 | void fat_stat(ipc_callid_t rid, ipc_call_t *request) |
1205 | void fat_stat(ipc_callid_t rid, ipc_call_t *request) |
1206 | { |
1206 | { |
1207 | libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
1207 | libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
1208 | } |
1208 | } |
1209 | 1209 | ||
1210 | void fat_sync(ipc_callid_t rid, ipc_call_t *request) |
1210 | void fat_sync(ipc_callid_t rid, ipc_call_t *request) |
1211 | { |
1211 | { |
1212 | /* Dummy implementation */ |
1212 | /* Dummy implementation */ |
1213 | ipc_answer_0(rid, EOK); |
1213 | ipc_answer_0(rid, EOK); |
1214 | } |
1214 | } |
1215 | 1215 | ||
1216 | /** |
1216 | /** |
1217 | * @} |
1217 | * @} |
1218 | */ |
1218 | */ |
1219 | 1219 |