Rev 3561 | Rev 3598 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3561 | Rev 3588 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jakub Jermar |
2 | * Copyright (c) 2008 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup fs |
29 | /** @addtogroup fs |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file fat_ops.c |
34 | * @file fat_ops.c |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include "fat.h" |
38 | #include "fat.h" |
39 | #include "fat_dentry.h" |
39 | #include "fat_dentry.h" |
40 | #include "fat_fat.h" |
40 | #include "fat_fat.h" |
41 | #include "../../vfs/vfs.h" |
41 | #include "../../vfs/vfs.h" |
42 | #include <libfs.h> |
42 | #include <libfs.h> |
43 | #include <libblock.h> |
43 | #include <libblock.h> |
44 | #include <ipc/ipc.h> |
44 | #include <ipc/ipc.h> |
45 | #include <ipc/services.h> |
45 | #include <ipc/services.h> |
46 | #include <ipc/devmap.h> |
46 | #include <ipc/devmap.h> |
47 | #include <async.h> |
47 | #include <async.h> |
48 | #include <errno.h> |
48 | #include <errno.h> |
49 | #include <string.h> |
49 | #include <string.h> |
50 | #include <byteorder.h> |
50 | #include <byteorder.h> |
51 | #include <libadt/hash_table.h> |
51 | #include <libadt/hash_table.h> |
52 | #include <libadt/list.h> |
52 | #include <libadt/list.h> |
53 | #include <assert.h> |
53 | #include <assert.h> |
54 | #include <futex.h> |
54 | #include <futex.h> |
55 | #include <sys/mman.h> |
55 | #include <sys/mman.h> |
56 | #include <align.h> |
56 | #include <align.h> |
57 | 57 | ||
58 | /** Futex protecting the list of cached free FAT nodes. */ |
58 | /** Futex protecting the list of cached free FAT nodes. */ |
59 | static futex_t ffn_futex = FUTEX_INITIALIZER; |
59 | static futex_t ffn_futex = FUTEX_INITIALIZER; |
60 | 60 | ||
61 | /** List of cached free FAT nodes. */ |
61 | /** List of cached free FAT nodes. */ |
62 | static LIST_INITIALIZE(ffn_head); |
62 | static LIST_INITIALIZE(ffn_head); |
63 | 63 | ||
64 | static void fat_node_initialize(fat_node_t *node) |
64 | static void fat_node_initialize(fat_node_t *node) |
65 | { |
65 | { |
66 | futex_initialize(&node->lock, 1); |
66 | futex_initialize(&node->lock, 1); |
67 | node->idx = NULL; |
67 | node->idx = NULL; |
68 | node->type = 0; |
68 | node->type = 0; |
69 | link_initialize(&node->ffn_link); |
69 | link_initialize(&node->ffn_link); |
70 | node->size = 0; |
70 | node->size = 0; |
71 | node->lnkcnt = 0; |
71 | node->lnkcnt = 0; |
72 | node->refcnt = 0; |
72 | node->refcnt = 0; |
73 | node->dirty = false; |
73 | node->dirty = false; |
74 | } |
74 | } |
75 | 75 | ||
76 | static void fat_node_sync(fat_node_t *node) |
76 | static void fat_node_sync(fat_node_t *node) |
77 | { |
77 | { |
78 | block_t *b; |
78 | block_t *b; |
79 | fat_bs_t *bs; |
79 | fat_bs_t *bs; |
80 | fat_dentry_t *d; |
80 | fat_dentry_t *d; |
81 | uint16_t bps; |
81 | uint16_t bps; |
82 | unsigned dps; |
82 | unsigned dps; |
83 | 83 | ||
84 | assert(node->dirty); |
84 | assert(node->dirty); |
85 | 85 | ||
86 | bs = block_bb_get(node->idx->dev_handle); |
86 | bs = block_bb_get(node->idx->dev_handle); |
87 | bps = uint16_t_le2host(bs->bps); |
87 | bps = uint16_t_le2host(bs->bps); |
88 | dps = bps / sizeof(fat_dentry_t); |
88 | dps = bps / sizeof(fat_dentry_t); |
89 | 89 | ||
90 | /* Read the block that contains the dentry of interest. */ |
90 | /* Read the block that contains the dentry of interest. */ |
91 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
91 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
92 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps); |
92 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps); |
93 | 93 | ||
94 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
94 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
95 | 95 | ||
96 | d->firstc = host2uint16_t_le(node->firstc); |
96 | d->firstc = host2uint16_t_le(node->firstc); |
97 | if (node->type == FAT_FILE) |
97 | if (node->type == FAT_FILE) |
98 | d->size = host2uint32_t_le(node->size); |
98 | d->size = host2uint32_t_le(node->size); |
99 | /* TODO: update other fields? (e.g time fields, attr field) */ |
99 | /* TODO: update other fields? (e.g time fields, attr field) */ |
100 | 100 | ||
101 | b->dirty = true; /* need to sync block */ |
101 | b->dirty = true; /* need to sync block */ |
102 | block_put(b); |
102 | block_put(b); |
103 | } |
103 | } |
104 | 104 | ||
- | 105 | static fat_node_t *fat_node_get_new(void) |
|
- | 106 | { |
|
- | 107 | fat_node_t *nodep; |
|
- | 108 | ||
- | 109 | futex_down(&ffn_futex); |
|
- | 110 | if (!list_empty(&ffn_head)) { |
|
- | 111 | /* Try to use a cached free node structure. */ |
|
- | 112 | fat_idx_t *idxp_tmp; |
|
- | 113 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
|
- | 114 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
|
- | 115 | goto skip_cache; |
|
- | 116 | idxp_tmp = nodep->idx; |
|
- | 117 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
|
- | 118 | futex_up(&nodep->lock); |
|
- | 119 | goto skip_cache; |
|
- | 120 | } |
|
- | 121 | list_remove(&nodep->ffn_link); |
|
- | 122 | futex_up(&ffn_futex); |
|
- | 123 | if (nodep->dirty) |
|
- | 124 | fat_node_sync(nodep); |
|
- | 125 | idxp_tmp->nodep = NULL; |
|
- | 126 | futex_up(&nodep->lock); |
|
- | 127 | futex_up(&idxp_tmp->lock); |
|
- | 128 | } else { |
|
- | 129 | skip_cache: |
|
- | 130 | /* Try to allocate a new node structure. */ |
|
- | 131 | futex_up(&ffn_futex); |
|
- | 132 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
|
- | 133 | if (!nodep) |
|
- | 134 | return NULL; |
|
- | 135 | } |
|
- | 136 | fat_node_initialize(nodep); |
|
- | 137 | ||
- | 138 | return nodep; |
|
- | 139 | } |
|
- | 140 | ||
105 | /** Internal version of fat_node_get(). |
141 | /** Internal version of fat_node_get(). |
106 | * |
142 | * |
107 | * @param idxp Locked index structure. |
143 | * @param idxp Locked index structure. |
108 | */ |
144 | */ |
109 | static void *fat_node_get_core(fat_idx_t *idxp) |
145 | static void *fat_node_get_core(fat_idx_t *idxp) |
110 | { |
146 | { |
111 | block_t *b; |
147 | block_t *b; |
112 | fat_bs_t *bs; |
148 | fat_bs_t *bs; |
113 | fat_dentry_t *d; |
149 | fat_dentry_t *d; |
114 | fat_node_t *nodep = NULL; |
150 | fat_node_t *nodep = NULL; |
115 | unsigned bps; |
151 | unsigned bps; |
116 | unsigned spc; |
152 | unsigned spc; |
117 | unsigned dps; |
153 | unsigned dps; |
118 | 154 | ||
119 | if (idxp->nodep) { |
155 | if (idxp->nodep) { |
120 | /* |
156 | /* |
121 | * We are lucky. |
157 | * We are lucky. |
122 | * The node is already instantiated in memory. |
158 | * The node is already instantiated in memory. |
123 | */ |
159 | */ |
124 | futex_down(&idxp->nodep->lock); |
160 | futex_down(&idxp->nodep->lock); |
125 | if (!idxp->nodep->refcnt++) |
161 | if (!idxp->nodep->refcnt++) |
126 | list_remove(&idxp->nodep->ffn_link); |
162 | list_remove(&idxp->nodep->ffn_link); |
127 | futex_up(&idxp->nodep->lock); |
163 | futex_up(&idxp->nodep->lock); |
128 | return idxp->nodep; |
164 | return idxp->nodep; |
129 | } |
165 | } |
130 | 166 | ||
131 | /* |
167 | /* |
132 | * We must instantiate the node from the file system. |
168 | * We must instantiate the node from the file system. |
133 | */ |
169 | */ |
134 | 170 | ||
135 | assert(idxp->pfc); |
171 | assert(idxp->pfc); |
136 | 172 | ||
137 | futex_down(&ffn_futex); |
- | |
138 | if (!list_empty(&ffn_head)) { |
- | |
139 | /* Try to use a cached free node structure. */ |
- | |
140 | fat_idx_t *idxp_tmp; |
- | |
141 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
- | |
142 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
- | |
143 | goto skip_cache; |
- | |
144 | idxp_tmp = nodep->idx; |
- | |
145 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
- | |
146 | futex_up(&nodep->lock); |
- | |
147 | goto skip_cache; |
- | |
148 | } |
- | |
149 | list_remove(&nodep->ffn_link); |
- | |
150 | futex_up(&ffn_futex); |
- | |
151 | if (nodep->dirty) |
- | |
152 | fat_node_sync(nodep); |
173 | nodep = fat_node_get_new(); |
153 | idxp_tmp->nodep = NULL; |
- | |
154 | futex_up(&nodep->lock); |
- | |
155 | futex_up(&idxp_tmp->lock); |
- | |
156 | } else { |
- | |
157 | skip_cache: |
- | |
158 | /* Try to allocate a new node structure. */ |
- | |
159 | futex_up(&ffn_futex); |
- | |
160 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
- | |
161 | if (!nodep) |
174 | if (!nodep) |
162 | return NULL; |
175 | return NULL; |
163 | } |
- | |
164 | fat_node_initialize(nodep); |
- | |
165 | 176 | ||
166 | bs = block_bb_get(idxp->dev_handle); |
177 | bs = block_bb_get(idxp->dev_handle); |
167 | bps = uint16_t_le2host(bs->bps); |
178 | bps = uint16_t_le2host(bs->bps); |
168 | spc = bs->spc; |
179 | spc = bs->spc; |
169 | dps = bps / sizeof(fat_dentry_t); |
180 | dps = bps / sizeof(fat_dentry_t); |
170 | 181 | ||
171 | /* Read the block that contains the dentry of interest. */ |
182 | /* Read the block that contains the dentry of interest. */ |
172 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
183 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
173 | (idxp->pdi * sizeof(fat_dentry_t)) / bps); |
184 | (idxp->pdi * sizeof(fat_dentry_t)) / bps); |
174 | assert(b); |
185 | assert(b); |
175 | 186 | ||
176 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
187 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
177 | if (d->attr & FAT_ATTR_SUBDIR) { |
188 | if (d->attr & FAT_ATTR_SUBDIR) { |
178 | /* |
189 | /* |
179 | * The only directory which does not have this bit set is the |
190 | * The only directory which does not have this bit set is the |
180 | * root directory itself. The root directory node is handled |
191 | * root directory itself. The root directory node is handled |
181 | * and initialized elsewhere. |
192 | * and initialized elsewhere. |
182 | */ |
193 | */ |
183 | nodep->type = FAT_DIRECTORY; |
194 | nodep->type = FAT_DIRECTORY; |
184 | /* |
195 | /* |
185 | * Unfortunately, the 'size' field of the FAT dentry is not |
196 | * Unfortunately, the 'size' field of the FAT dentry is not |
186 | * defined for the directory entry type. We must determine the |
197 | * defined for the directory entry type. We must determine the |
187 | * size of the directory by walking the FAT. |
198 | * size of the directory by walking the FAT. |
188 | */ |
199 | */ |
189 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
200 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
190 | uint16_t_le2host(d->firstc)); |
201 | uint16_t_le2host(d->firstc)); |
191 | } else { |
202 | } else { |
192 | nodep->type = FAT_FILE; |
203 | nodep->type = FAT_FILE; |
193 | nodep->size = uint32_t_le2host(d->size); |
204 | nodep->size = uint32_t_le2host(d->size); |
194 | } |
205 | } |
195 | nodep->firstc = uint16_t_le2host(d->firstc); |
206 | nodep->firstc = uint16_t_le2host(d->firstc); |
196 | nodep->lnkcnt = 1; |
207 | nodep->lnkcnt = 1; |
197 | nodep->refcnt = 1; |
208 | nodep->refcnt = 1; |
198 | 209 | ||
199 | block_put(b); |
210 | block_put(b); |
200 | 211 | ||
201 | /* Link the idx structure with the node structure. */ |
212 | /* Link the idx structure with the node structure. */ |
202 | nodep->idx = idxp; |
213 | nodep->idx = idxp; |
203 | idxp->nodep = nodep; |
214 | idxp->nodep = nodep; |
204 | 215 | ||
205 | return nodep; |
216 | return nodep; |
206 | } |
217 | } |
207 | 218 | ||
208 | /** Instantiate a FAT in-core node. */ |
219 | /** Instantiate a FAT in-core node. */ |
209 | static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
220 | static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
210 | { |
221 | { |
211 | void *node; |
222 | void *node; |
212 | fat_idx_t *idxp; |
223 | fat_idx_t *idxp; |
213 | 224 | ||
214 | idxp = fat_idx_get_by_index(dev_handle, index); |
225 | idxp = fat_idx_get_by_index(dev_handle, index); |
215 | if (!idxp) |
226 | if (!idxp) |
216 | return NULL; |
227 | return NULL; |
217 | /* idxp->lock held */ |
228 | /* idxp->lock held */ |
218 | node = fat_node_get_core(idxp); |
229 | node = fat_node_get_core(idxp); |
219 | futex_up(&idxp->lock); |
230 | futex_up(&idxp->lock); |
220 | return node; |
231 | return node; |
221 | } |
232 | } |
222 | 233 | ||
223 | static void fat_node_put(void *node) |
234 | static void fat_node_put(void *node) |
224 | { |
235 | { |
225 | fat_node_t *nodep = (fat_node_t *)node; |
236 | fat_node_t *nodep = (fat_node_t *)node; |
226 | 237 | ||
227 | futex_down(&nodep->lock); |
238 | futex_down(&nodep->lock); |
228 | if (!--nodep->refcnt) { |
239 | if (!--nodep->refcnt) { |
229 | futex_down(&ffn_futex); |
240 | futex_down(&ffn_futex); |
230 | list_append(&nodep->ffn_link, &ffn_head); |
241 | list_append(&nodep->ffn_link, &ffn_head); |
231 | futex_up(&ffn_futex); |
242 | futex_up(&ffn_futex); |
232 | } |
243 | } |
233 | futex_up(&nodep->lock); |
244 | futex_up(&nodep->lock); |
234 | } |
245 | } |
235 | 246 | ||
236 | static void *fat_create(int flags) |
247 | static void *fat_create(dev_handle_t dev_handle, int flags) |
237 | { |
248 | { |
238 | return NULL; /* not supported at the moment */ |
249 | return NULL; /* not supported at the moment */ |
239 | } |
250 | } |
240 | 251 | ||
241 | static int fat_destroy(void *node) |
252 | static int fat_destroy(void *node) |
242 | { |
253 | { |
243 | return ENOTSUP; /* not supported at the moment */ |
254 | return ENOTSUP; /* not supported at the moment */ |
244 | } |
255 | } |
245 | 256 | ||
246 | static bool fat_link(void *prnt, void *chld, const char *name) |
257 | static bool fat_link(void *prnt, void *chld, const char *name) |
247 | { |
258 | { |
248 | return false; /* not supported at the moment */ |
259 | return false; /* not supported at the moment */ |
249 | } |
260 | } |
250 | 261 | ||
251 | static int fat_unlink(void *prnt, void *chld) |
262 | static int fat_unlink(void *prnt, void *chld) |
252 | { |
263 | { |
253 | return ENOTSUP; /* not supported at the moment */ |
264 | return ENOTSUP; /* not supported at the moment */ |
254 | } |
265 | } |
255 | 266 | ||
256 | static void *fat_match(void *prnt, const char *component) |
267 | static void *fat_match(void *prnt, const char *component) |
257 | { |
268 | { |
258 | fat_bs_t *bs; |
269 | fat_bs_t *bs; |
259 | fat_node_t *parentp = (fat_node_t *)prnt; |
270 | fat_node_t *parentp = (fat_node_t *)prnt; |
260 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
271 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
261 | unsigned i, j; |
272 | unsigned i, j; |
262 | unsigned bps; /* bytes per sector */ |
273 | unsigned bps; /* bytes per sector */ |
263 | unsigned dps; /* dentries per sector */ |
274 | unsigned dps; /* dentries per sector */ |
264 | unsigned blocks; |
275 | unsigned blocks; |
265 | fat_dentry_t *d; |
276 | fat_dentry_t *d; |
266 | block_t *b; |
277 | block_t *b; |
267 | 278 | ||
268 | futex_down(&parentp->idx->lock); |
279 | futex_down(&parentp->idx->lock); |
269 | bs = block_bb_get(parentp->idx->dev_handle); |
280 | bs = block_bb_get(parentp->idx->dev_handle); |
270 | bps = uint16_t_le2host(bs->bps); |
281 | bps = uint16_t_le2host(bs->bps); |
271 | dps = bps / sizeof(fat_dentry_t); |
282 | dps = bps / sizeof(fat_dentry_t); |
272 | blocks = parentp->size / bps; |
283 | blocks = parentp->size / bps; |
273 | for (i = 0; i < blocks; i++) { |
284 | for (i = 0; i < blocks; i++) { |
274 | b = fat_block_get(bs, parentp, i); |
285 | b = fat_block_get(bs, parentp, i); |
275 | for (j = 0; j < dps; j++) { |
286 | for (j = 0; j < dps; j++) { |
276 | d = ((fat_dentry_t *)b->data) + j; |
287 | d = ((fat_dentry_t *)b->data) + j; |
277 | switch (fat_classify_dentry(d)) { |
288 | switch (fat_classify_dentry(d)) { |
278 | case FAT_DENTRY_SKIP: |
289 | case FAT_DENTRY_SKIP: |
279 | continue; |
290 | continue; |
280 | case FAT_DENTRY_LAST: |
291 | case FAT_DENTRY_LAST: |
281 | block_put(b); |
292 | block_put(b); |
282 | futex_up(&parentp->idx->lock); |
293 | futex_up(&parentp->idx->lock); |
283 | return NULL; |
294 | return NULL; |
284 | default: |
295 | default: |
285 | case FAT_DENTRY_VALID: |
296 | case FAT_DENTRY_VALID: |
286 | dentry_name_canonify(d, name); |
297 | dentry_name_canonify(d, name); |
287 | break; |
298 | break; |
288 | } |
299 | } |
289 | if (stricmp(name, component) == 0) { |
300 | if (stricmp(name, component) == 0) { |
290 | /* hit */ |
301 | /* hit */ |
291 | void *node; |
302 | void *node; |
292 | /* |
303 | /* |
293 | * Assume tree hierarchy for locking. We |
304 | * Assume tree hierarchy for locking. We |
294 | * already have the parent and now we are going |
305 | * already have the parent and now we are going |
295 | * to lock the child. Never lock in the oposite |
306 | * to lock the child. Never lock in the oposite |
296 | * order. |
307 | * order. |
297 | */ |
308 | */ |
298 | fat_idx_t *idx = fat_idx_get_by_pos( |
309 | fat_idx_t *idx = fat_idx_get_by_pos( |
299 | parentp->idx->dev_handle, parentp->firstc, |
310 | parentp->idx->dev_handle, parentp->firstc, |
300 | i * dps + j); |
311 | i * dps + j); |
301 | futex_up(&parentp->idx->lock); |
312 | futex_up(&parentp->idx->lock); |
302 | if (!idx) { |
313 | if (!idx) { |
303 | /* |
314 | /* |
304 | * Can happen if memory is low or if we |
315 | * Can happen if memory is low or if we |
305 | * run out of 32-bit indices. |
316 | * run out of 32-bit indices. |
306 | */ |
317 | */ |
307 | block_put(b); |
318 | block_put(b); |
308 | return NULL; |
319 | return NULL; |
309 | } |
320 | } |
310 | node = fat_node_get_core(idx); |
321 | node = fat_node_get_core(idx); |
311 | futex_up(&idx->lock); |
322 | futex_up(&idx->lock); |
312 | block_put(b); |
323 | block_put(b); |
313 | return node; |
324 | return node; |
314 | } |
325 | } |
315 | } |
326 | } |
316 | block_put(b); |
327 | block_put(b); |
317 | } |
328 | } |
318 | 329 | ||
319 | futex_up(&parentp->idx->lock); |
330 | futex_up(&parentp->idx->lock); |
320 | return NULL; |
331 | return NULL; |
321 | } |
332 | } |
322 | 333 | ||
323 | static fs_index_t fat_index_get(void *node) |
334 | static fs_index_t fat_index_get(void *node) |
324 | { |
335 | { |
325 | fat_node_t *fnodep = (fat_node_t *)node; |
336 | fat_node_t *fnodep = (fat_node_t *)node; |
326 | if (!fnodep) |
337 | if (!fnodep) |
327 | return 0; |
338 | return 0; |
328 | return fnodep->idx->index; |
339 | return fnodep->idx->index; |
329 | } |
340 | } |
330 | 341 | ||
331 | static size_t fat_size_get(void *node) |
342 | static size_t fat_size_get(void *node) |
332 | { |
343 | { |
333 | return ((fat_node_t *)node)->size; |
344 | return ((fat_node_t *)node)->size; |
334 | } |
345 | } |
335 | 346 | ||
336 | static unsigned fat_lnkcnt_get(void *node) |
347 | static unsigned fat_lnkcnt_get(void *node) |
337 | { |
348 | { |
338 | return ((fat_node_t *)node)->lnkcnt; |
349 | return ((fat_node_t *)node)->lnkcnt; |
339 | } |
350 | } |
340 | 351 | ||
341 | static bool fat_has_children(void *node) |
352 | static bool fat_has_children(void *node) |
342 | { |
353 | { |
343 | fat_bs_t *bs; |
354 | fat_bs_t *bs; |
344 | fat_node_t *nodep = (fat_node_t *)node; |
355 | fat_node_t *nodep = (fat_node_t *)node; |
345 | unsigned bps; |
356 | unsigned bps; |
346 | unsigned dps; |
357 | unsigned dps; |
347 | unsigned blocks; |
358 | unsigned blocks; |
348 | block_t *b; |
359 | block_t *b; |
349 | unsigned i, j; |
360 | unsigned i, j; |
350 | 361 | ||
351 | if (nodep->type != FAT_DIRECTORY) |
362 | if (nodep->type != FAT_DIRECTORY) |
352 | return false; |
363 | return false; |
353 | 364 | ||
354 | futex_down(&nodep->idx->lock); |
365 | futex_down(&nodep->idx->lock); |
355 | bs = block_bb_get(nodep->idx->dev_handle); |
366 | bs = block_bb_get(nodep->idx->dev_handle); |
356 | bps = uint16_t_le2host(bs->bps); |
367 | bps = uint16_t_le2host(bs->bps); |
357 | dps = bps / sizeof(fat_dentry_t); |
368 | dps = bps / sizeof(fat_dentry_t); |
358 | 369 | ||
359 | blocks = nodep->size / bps; |
370 | blocks = nodep->size / bps; |
360 | 371 | ||
361 | for (i = 0; i < blocks; i++) { |
372 | for (i = 0; i < blocks; i++) { |
362 | fat_dentry_t *d; |
373 | fat_dentry_t *d; |
363 | 374 | ||
364 | b = fat_block_get(bs, nodep, i); |
375 | b = fat_block_get(bs, nodep, i); |
365 | for (j = 0; j < dps; j++) { |
376 | for (j = 0; j < dps; j++) { |
366 | d = ((fat_dentry_t *)b->data) + j; |
377 | d = ((fat_dentry_t *)b->data) + j; |
367 | switch (fat_classify_dentry(d)) { |
378 | switch (fat_classify_dentry(d)) { |
368 | case FAT_DENTRY_SKIP: |
379 | case FAT_DENTRY_SKIP: |
369 | continue; |
380 | continue; |
370 | case FAT_DENTRY_LAST: |
381 | case FAT_DENTRY_LAST: |
371 | block_put(b); |
382 | block_put(b); |
372 | futex_up(&nodep->idx->lock); |
383 | futex_up(&nodep->idx->lock); |
373 | return false; |
384 | return false; |
374 | default: |
385 | default: |
375 | case FAT_DENTRY_VALID: |
386 | case FAT_DENTRY_VALID: |
376 | block_put(b); |
387 | block_put(b); |
377 | futex_up(&nodep->idx->lock); |
388 | futex_up(&nodep->idx->lock); |
378 | return true; |
389 | return true; |
379 | } |
390 | } |
380 | block_put(b); |
391 | block_put(b); |
381 | futex_up(&nodep->idx->lock); |
392 | futex_up(&nodep->idx->lock); |
382 | return true; |
393 | return true; |
383 | } |
394 | } |
384 | block_put(b); |
395 | block_put(b); |
385 | } |
396 | } |
386 | 397 | ||
387 | futex_up(&nodep->idx->lock); |
398 | futex_up(&nodep->idx->lock); |
388 | return false; |
399 | return false; |
389 | } |
400 | } |
390 | 401 | ||
391 | static void *fat_root_get(dev_handle_t dev_handle) |
402 | static void *fat_root_get(dev_handle_t dev_handle) |
392 | { |
403 | { |
393 | return fat_node_get(dev_handle, 0); |
404 | return fat_node_get(dev_handle, 0); |
394 | } |
405 | } |
395 | 406 | ||
396 | static char fat_plb_get_char(unsigned pos) |
407 | static char fat_plb_get_char(unsigned pos) |
397 | { |
408 | { |
398 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
409 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
399 | } |
410 | } |
400 | 411 | ||
401 | static bool fat_is_directory(void *node) |
412 | static bool fat_is_directory(void *node) |
402 | { |
413 | { |
403 | return ((fat_node_t *)node)->type == FAT_DIRECTORY; |
414 | return ((fat_node_t *)node)->type == FAT_DIRECTORY; |
404 | } |
415 | } |
405 | 416 | ||
406 | static bool fat_is_file(void *node) |
417 | static bool fat_is_file(void *node) |
407 | { |
418 | { |
408 | return ((fat_node_t *)node)->type == FAT_FILE; |
419 | return ((fat_node_t *)node)->type == FAT_FILE; |
409 | } |
420 | } |
410 | 421 | ||
411 | /** libfs operations */ |
422 | /** libfs operations */ |
412 | libfs_ops_t fat_libfs_ops = { |
423 | libfs_ops_t fat_libfs_ops = { |
413 | .match = fat_match, |
424 | .match = fat_match, |
414 | .node_get = fat_node_get, |
425 | .node_get = fat_node_get, |
415 | .node_put = fat_node_put, |
426 | .node_put = fat_node_put, |
416 | .create = fat_create, |
427 | .create = fat_create, |
417 | .destroy = fat_destroy, |
428 | .destroy = fat_destroy, |
418 | .link = fat_link, |
429 | .link = fat_link, |
419 | .unlink = fat_unlink, |
430 | .unlink = fat_unlink, |
420 | .index_get = fat_index_get, |
431 | .index_get = fat_index_get, |
421 | .size_get = fat_size_get, |
432 | .size_get = fat_size_get, |
422 | .lnkcnt_get = fat_lnkcnt_get, |
433 | .lnkcnt_get = fat_lnkcnt_get, |
423 | .has_children = fat_has_children, |
434 | .has_children = fat_has_children, |
424 | .root_get = fat_root_get, |
435 | .root_get = fat_root_get, |
425 | .plb_get_char = fat_plb_get_char, |
436 | .plb_get_char = fat_plb_get_char, |
426 | .is_directory = fat_is_directory, |
437 | .is_directory = fat_is_directory, |
427 | .is_file = fat_is_file |
438 | .is_file = fat_is_file |
428 | }; |
439 | }; |
429 | 440 | ||
430 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
441 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
431 | { |
442 | { |
432 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
443 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
433 | fat_bs_t *bs; |
444 | fat_bs_t *bs; |
434 | uint16_t bps; |
445 | uint16_t bps; |
435 | uint16_t rde; |
446 | uint16_t rde; |
436 | int rc; |
447 | int rc; |
437 | 448 | ||
438 | /* initialize libblock */ |
449 | /* initialize libblock */ |
439 | rc = block_init(dev_handle, BS_SIZE); |
450 | rc = block_init(dev_handle, BS_SIZE); |
440 | if (rc != EOK) { |
451 | if (rc != EOK) { |
441 | ipc_answer_0(rid, rc); |
452 | ipc_answer_0(rid, rc); |
442 | return; |
453 | return; |
443 | } |
454 | } |
444 | 455 | ||
445 | /* prepare the boot block */ |
456 | /* prepare the boot block */ |
446 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
457 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
447 | if (rc != EOK) { |
458 | if (rc != EOK) { |
448 | block_fini(dev_handle); |
459 | block_fini(dev_handle); |
449 | ipc_answer_0(rid, rc); |
460 | ipc_answer_0(rid, rc); |
450 | return; |
461 | return; |
451 | } |
462 | } |
452 | 463 | ||
453 | /* get the buffer with the boot sector */ |
464 | /* get the buffer with the boot sector */ |
454 | bs = block_bb_get(dev_handle); |
465 | bs = block_bb_get(dev_handle); |
455 | 466 | ||
456 | /* Read the number of root directory entries. */ |
467 | /* Read the number of root directory entries. */ |
457 | bps = uint16_t_le2host(bs->bps); |
468 | bps = uint16_t_le2host(bs->bps); |
458 | rde = uint16_t_le2host(bs->root_ent_max); |
469 | rde = uint16_t_le2host(bs->root_ent_max); |
459 | 470 | ||
460 | if (bps != BS_SIZE) { |
471 | if (bps != BS_SIZE) { |
461 | block_fini(dev_handle); |
472 | block_fini(dev_handle); |
462 | ipc_answer_0(rid, ENOTSUP); |
473 | ipc_answer_0(rid, ENOTSUP); |
463 | return; |
474 | return; |
464 | } |
475 | } |
465 | 476 | ||
466 | /* Initialize the block cache */ |
477 | /* Initialize the block cache */ |
467 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */); |
478 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */); |
468 | if (rc != EOK) { |
479 | if (rc != EOK) { |
469 | block_fini(dev_handle); |
480 | block_fini(dev_handle); |
470 | ipc_answer_0(rid, rc); |
481 | ipc_answer_0(rid, rc); |
471 | return; |
482 | return; |
472 | } |
483 | } |
473 | 484 | ||
474 | rc = fat_idx_init_by_dev_handle(dev_handle); |
485 | rc = fat_idx_init_by_dev_handle(dev_handle); |
475 | if (rc != EOK) { |
486 | if (rc != EOK) { |
476 | block_fini(dev_handle); |
487 | block_fini(dev_handle); |
477 | ipc_answer_0(rid, rc); |
488 | ipc_answer_0(rid, rc); |
478 | return; |
489 | return; |
479 | } |
490 | } |
480 | 491 | ||
481 | /* Initialize the root node. */ |
492 | /* Initialize the root node. */ |
482 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
493 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
483 | if (!rootp) { |
494 | if (!rootp) { |
484 | block_fini(dev_handle); |
495 | block_fini(dev_handle); |
485 | fat_idx_fini_by_dev_handle(dev_handle); |
496 | fat_idx_fini_by_dev_handle(dev_handle); |
486 | ipc_answer_0(rid, ENOMEM); |
497 | ipc_answer_0(rid, ENOMEM); |
487 | return; |
498 | return; |
488 | } |
499 | } |
489 | fat_node_initialize(rootp); |
500 | fat_node_initialize(rootp); |
490 | 501 | ||
491 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
502 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
492 | if (!ridxp) { |
503 | if (!ridxp) { |
493 | block_fini(dev_handle); |
504 | block_fini(dev_handle); |
494 | free(rootp); |
505 | free(rootp); |
495 | fat_idx_fini_by_dev_handle(dev_handle); |
506 | fat_idx_fini_by_dev_handle(dev_handle); |
496 | ipc_answer_0(rid, ENOMEM); |
507 | ipc_answer_0(rid, ENOMEM); |
497 | return; |
508 | return; |
498 | } |
509 | } |
499 | assert(ridxp->index == 0); |
510 | assert(ridxp->index == 0); |
500 | /* ridxp->lock held */ |
511 | /* ridxp->lock held */ |
501 | 512 | ||
502 | rootp->type = FAT_DIRECTORY; |
513 | rootp->type = FAT_DIRECTORY; |
503 | rootp->firstc = FAT_CLST_ROOT; |
514 | rootp->firstc = FAT_CLST_ROOT; |
504 | rootp->refcnt = 1; |
515 | rootp->refcnt = 1; |
505 | rootp->lnkcnt = 0; /* FS root is not linked */ |
516 | rootp->lnkcnt = 0; /* FS root is not linked */ |
506 | rootp->size = rde * sizeof(fat_dentry_t); |
517 | rootp->size = rde * sizeof(fat_dentry_t); |
507 | rootp->idx = ridxp; |
518 | rootp->idx = ridxp; |
508 | ridxp->nodep = rootp; |
519 | ridxp->nodep = rootp; |
509 | 520 | ||
510 | futex_up(&ridxp->lock); |
521 | futex_up(&ridxp->lock); |
511 | 522 | ||
512 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
523 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
513 | } |
524 | } |
514 | 525 | ||
515 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
526 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
516 | { |
527 | { |
517 | ipc_answer_0(rid, ENOTSUP); |
528 | ipc_answer_0(rid, ENOTSUP); |
518 | } |
529 | } |
519 | 530 | ||
520 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
531 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
521 | { |
532 | { |
522 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
533 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
523 | } |
534 | } |
524 | 535 | ||
525 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
536 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
526 | { |
537 | { |
527 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
538 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
528 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
539 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
529 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
540 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
530 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
541 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
531 | fat_bs_t *bs; |
542 | fat_bs_t *bs; |
532 | uint16_t bps; |
543 | uint16_t bps; |
533 | size_t bytes; |
544 | size_t bytes; |
534 | block_t *b; |
545 | block_t *b; |
535 | 546 | ||
536 | if (!nodep) { |
547 | if (!nodep) { |
537 | ipc_answer_0(rid, ENOENT); |
548 | ipc_answer_0(rid, ENOENT); |
538 | return; |
549 | return; |
539 | } |
550 | } |
540 | 551 | ||
541 | ipc_callid_t callid; |
552 | ipc_callid_t callid; |
542 | size_t len; |
553 | size_t len; |
543 | if (!ipc_data_read_receive(&callid, &len)) { |
554 | if (!ipc_data_read_receive(&callid, &len)) { |
544 | fat_node_put(nodep); |
555 | fat_node_put(nodep); |
545 | ipc_answer_0(callid, EINVAL); |
556 | ipc_answer_0(callid, EINVAL); |
546 | ipc_answer_0(rid, EINVAL); |
557 | ipc_answer_0(rid, EINVAL); |
547 | return; |
558 | return; |
548 | } |
559 | } |
549 | 560 | ||
550 | bs = block_bb_get(dev_handle); |
561 | bs = block_bb_get(dev_handle); |
551 | bps = uint16_t_le2host(bs->bps); |
562 | bps = uint16_t_le2host(bs->bps); |
552 | 563 | ||
553 | if (nodep->type == FAT_FILE) { |
564 | if (nodep->type == FAT_FILE) { |
554 | /* |
565 | /* |
555 | * Our strategy for regular file reads is to read one block at |
566 | * Our strategy for regular file reads is to read one block at |
556 | * most and make use of the possibility to return less data than |
567 | * most and make use of the possibility to return less data than |
557 | * requested. This keeps the code very simple. |
568 | * requested. This keeps the code very simple. |
558 | */ |
569 | */ |
559 | if (pos >= nodep->size) { |
570 | if (pos >= nodep->size) { |
560 | /* reading beyond the EOF */ |
571 | /* reading beyond the EOF */ |
561 | bytes = 0; |
572 | bytes = 0; |
562 | (void) ipc_data_read_finalize(callid, NULL, 0); |
573 | (void) ipc_data_read_finalize(callid, NULL, 0); |
563 | } else { |
574 | } else { |
564 | bytes = min(len, bps - pos % bps); |
575 | bytes = min(len, bps - pos % bps); |
565 | bytes = min(bytes, nodep->size - pos); |
576 | bytes = min(bytes, nodep->size - pos); |
566 | b = fat_block_get(bs, nodep, pos / bps); |
577 | b = fat_block_get(bs, nodep, pos / bps); |
567 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
578 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
568 | bytes); |
579 | bytes); |
569 | block_put(b); |
580 | block_put(b); |
570 | } |
581 | } |
571 | } else { |
582 | } else { |
572 | unsigned bnum; |
583 | unsigned bnum; |
573 | off_t spos = pos; |
584 | off_t spos = pos; |
574 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
585 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
575 | fat_dentry_t *d; |
586 | fat_dentry_t *d; |
576 | 587 | ||
577 | assert(nodep->type == FAT_DIRECTORY); |
588 | assert(nodep->type == FAT_DIRECTORY); |
578 | assert(nodep->size % bps == 0); |
589 | assert(nodep->size % bps == 0); |
579 | assert(bps % sizeof(fat_dentry_t) == 0); |
590 | assert(bps % sizeof(fat_dentry_t) == 0); |
580 | 591 | ||
581 | /* |
592 | /* |
582 | * Our strategy for readdir() is to use the position pointer as |
593 | * Our strategy for readdir() is to use the position pointer as |
583 | * an index into the array of all dentries. On entry, it points |
594 | * an index into the array of all dentries. On entry, it points |
584 | * to the first unread dentry. If we skip any dentries, we bump |
595 | * to the first unread dentry. If we skip any dentries, we bump |
585 | * the position pointer accordingly. |
596 | * the position pointer accordingly. |
586 | */ |
597 | */ |
587 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
598 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
588 | while (bnum < nodep->size / bps) { |
599 | while (bnum < nodep->size / bps) { |
589 | off_t o; |
600 | off_t o; |
590 | 601 | ||
591 | b = fat_block_get(bs, nodep, bnum); |
602 | b = fat_block_get(bs, nodep, bnum); |
592 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
603 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
593 | o < bps / sizeof(fat_dentry_t); |
604 | o < bps / sizeof(fat_dentry_t); |
594 | o++, pos++) { |
605 | o++, pos++) { |
595 | d = ((fat_dentry_t *)b->data) + o; |
606 | d = ((fat_dentry_t *)b->data) + o; |
596 | switch (fat_classify_dentry(d)) { |
607 | switch (fat_classify_dentry(d)) { |
597 | case FAT_DENTRY_SKIP: |
608 | case FAT_DENTRY_SKIP: |
598 | continue; |
609 | continue; |
599 | case FAT_DENTRY_LAST: |
610 | case FAT_DENTRY_LAST: |
600 | block_put(b); |
611 | block_put(b); |
601 | goto miss; |
612 | goto miss; |
602 | default: |
613 | default: |
603 | case FAT_DENTRY_VALID: |
614 | case FAT_DENTRY_VALID: |
604 | dentry_name_canonify(d, name); |
615 | dentry_name_canonify(d, name); |
605 | block_put(b); |
616 | block_put(b); |
606 | goto hit; |
617 | goto hit; |
607 | } |
618 | } |
608 | } |
619 | } |
609 | block_put(b); |
620 | block_put(b); |
610 | bnum++; |
621 | bnum++; |
611 | } |
622 | } |
612 | miss: |
623 | miss: |
613 | fat_node_put(nodep); |
624 | fat_node_put(nodep); |
614 | ipc_answer_0(callid, ENOENT); |
625 | ipc_answer_0(callid, ENOENT); |
615 | ipc_answer_1(rid, ENOENT, 0); |
626 | ipc_answer_1(rid, ENOENT, 0); |
616 | return; |
627 | return; |
617 | hit: |
628 | hit: |
618 | (void) ipc_data_read_finalize(callid, name, strlen(name) + 1); |
629 | (void) ipc_data_read_finalize(callid, name, strlen(name) + 1); |
619 | bytes = (pos - spos) + 1; |
630 | bytes = (pos - spos) + 1; |
620 | } |
631 | } |
621 | 632 | ||
622 | fat_node_put(nodep); |
633 | fat_node_put(nodep); |
623 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
634 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
624 | } |
635 | } |
625 | 636 | ||
626 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
637 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
627 | { |
638 | { |
628 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
639 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
629 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
640 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
630 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
641 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
631 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
642 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
632 | fat_bs_t *bs; |
643 | fat_bs_t *bs; |
633 | size_t bytes; |
644 | size_t bytes; |
634 | block_t *b; |
645 | block_t *b; |
635 | uint16_t bps; |
646 | uint16_t bps; |
636 | unsigned spc; |
647 | unsigned spc; |
- | 648 | unsigned bpc; /* bytes per cluster */ |
|
637 | off_t boundary; |
649 | off_t boundary; |
638 | 650 | ||
639 | if (!nodep) { |
651 | if (!nodep) { |
640 | ipc_answer_0(rid, ENOENT); |
652 | ipc_answer_0(rid, ENOENT); |
641 | return; |
653 | return; |
642 | } |
654 | } |
643 | 655 | ||
644 | ipc_callid_t callid; |
656 | ipc_callid_t callid; |
645 | size_t len; |
657 | size_t len; |
646 | if (!ipc_data_write_receive(&callid, &len)) { |
658 | if (!ipc_data_write_receive(&callid, &len)) { |
647 | fat_node_put(nodep); |
659 | fat_node_put(nodep); |
648 | ipc_answer_0(callid, EINVAL); |
660 | ipc_answer_0(callid, EINVAL); |
649 | ipc_answer_0(rid, EINVAL); |
661 | ipc_answer_0(rid, EINVAL); |
650 | return; |
662 | return; |
651 | } |
663 | } |
652 | 664 | ||
- | 665 | bs = block_bb_get(dev_handle); |
|
- | 666 | bps = uint16_t_le2host(bs->bps); |
|
- | 667 | spc = bs->spc; |
|
- | 668 | bpc = bps * spc; |
|
- | 669 | ||
653 | /* |
670 | /* |
654 | * In all scenarios, we will attempt to write out only one block worth |
671 | * In all scenarios, we will attempt to write out only one block worth |
655 | * of data at maximum. There might be some more efficient approaches, |
672 | * of data at maximum. There might be some more efficient approaches, |
656 | * but this one greatly simplifies fat_write(). Note that we can afford |
673 | * but this one greatly simplifies fat_write(). Note that we can afford |
657 | * to do this because the client must be ready to handle the return |
674 | * to do this because the client must be ready to handle the return |
658 | * value signalizing a smaller number of bytes written. |
675 | * value signalizing a smaller number of bytes written. |
659 | */ |
676 | */ |
660 | bytes = min(len, bps - pos % bps); |
677 | bytes = min(len, bps - pos % bps); |
661 | - | ||
662 | bs = block_bb_get(dev_handle); |
- | |
663 | bps = uint16_t_le2host(bs->bps); |
- | |
664 | spc = bs->spc; |
- | |
665 | 678 | ||
666 | boundary = ROUND_UP(nodep->size, bps * spc); |
679 | boundary = ROUND_UP(nodep->size, bpc); |
667 | if (pos < boundary) { |
680 | if (pos < boundary) { |
668 | /* |
681 | /* |
669 | * This is the easier case - we are either overwriting already |
682 | * This is the easier case - we are either overwriting already |
670 | * existing contents or writing behind the EOF, but still within |
683 | * existing contents or writing behind the EOF, but still within |
671 | * the limits of the last cluster. The node size may grow to the |
684 | * the limits of the last cluster. The node size may grow to the |
672 | * next block size boundary. |
685 | * next block size boundary. |
673 | */ |
686 | */ |
674 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
687 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
675 | b = fat_block_get(bs, nodep, pos / bps); |
688 | b = fat_block_get(bs, nodep, pos / bps); |
676 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
689 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
677 | bytes); |
690 | bytes); |
678 | b->dirty = true; /* need to sync block */ |
691 | b->dirty = true; /* need to sync block */ |
679 | block_put(b); |
692 | block_put(b); |
680 | if (pos + bytes > nodep->size) { |
693 | if (pos + bytes > nodep->size) { |
681 | nodep->size = pos + bytes; |
694 | nodep->size = pos + bytes; |
682 | nodep->dirty = true; /* need to sync node */ |
695 | nodep->dirty = true; /* need to sync node */ |
683 | } |
696 | } |
- | 697 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
684 | fat_node_put(nodep); |
698 | fat_node_put(nodep); |
685 | ipc_answer_1(rid, EOK, bytes); |
- | |
686 | return; |
699 | return; |
687 | } else { |
700 | } else { |
688 | /* |
701 | /* |
689 | * This is the more difficult case. We must allocate new |
702 | * This is the more difficult case. We must allocate new |
690 | * clusters for the node and zero them out. |
703 | * clusters for the node and zero them out. |
691 | */ |
704 | */ |
692 | int status; |
705 | int status; |
693 | unsigned nclsts; |
706 | unsigned nclsts; |
694 | fat_cluster_t mcl, lcl; |
707 | fat_cluster_t mcl, lcl; |
695 | 708 | ||
696 | nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) / |
709 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
697 | bps * spc; |
- | |
698 | /* create an independent chain of nclsts clusters in all FATs */ |
710 | /* create an independent chain of nclsts clusters in all FATs */ |
699 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, |
711 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
700 | &lcl); |
- | |
701 | if (status != EOK) { |
712 | if (status != EOK) { |
702 | /* could not allocate a chain of nclsts clusters */ |
713 | /* could not allocate a chain of nclsts clusters */ |
703 | fat_node_put(nodep); |
714 | fat_node_put(nodep); |
704 | ipc_answer_0(callid, status); |
715 | ipc_answer_0(callid, status); |
705 | ipc_answer_0(rid, status); |
716 | ipc_answer_0(rid, status); |
706 | return; |
717 | return; |
707 | } |
718 | } |
708 | /* zero fill any gaps */ |
719 | /* zero fill any gaps */ |
709 | fat_fill_gap(bs, nodep, mcl, pos); |
720 | fat_fill_gap(bs, nodep, mcl, pos); |
710 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc); |
721 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc); |
711 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
722 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
712 | bytes); |
723 | bytes); |
713 | b->dirty = true; /* need to sync block */ |
724 | b->dirty = true; /* need to sync block */ |
714 | block_put(b); |
725 | block_put(b); |
715 | /* |
726 | /* |
716 | * Append the cluster chain starting in mcl to the end of the |
727 | * Append the cluster chain starting in mcl to the end of the |
717 | * node's cluster chain. |
728 | * node's cluster chain. |
718 | */ |
729 | */ |
719 | fat_append_clusters(bs, nodep, mcl); |
730 | fat_append_clusters(bs, nodep, mcl); |
720 | nodep->size = pos + bytes; |
731 | nodep->size = pos + bytes; |
721 | nodep->dirty = true; /* need to sync node */ |
732 | nodep->dirty = true; /* need to sync node */ |
- | 733 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
722 | fat_node_put(nodep); |
734 | fat_node_put(nodep); |
723 | ipc_answer_1(rid, EOK, bytes); |
- | |
724 | return; |
735 | return; |
725 | } |
736 | } |
726 | } |
737 | } |
727 | 738 | ||
728 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
739 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
729 | { |
740 | { |
730 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
741 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
731 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
742 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
732 | size_t size = (off_t)IPC_GET_ARG3(*request); |
743 | size_t size = (off_t)IPC_GET_ARG3(*request); |
733 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
744 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
- | 745 | fat_bs_t *bs; |
|
- | 746 | uint16_t bps; |
|
- | 747 | uint8_t spc; |
|
- | 748 | unsigned bpc; /* bytes per cluster */ |
|
734 | int rc; |
749 | int rc; |
735 | 750 | ||
736 | if (!nodep) { |
751 | if (!nodep) { |
737 | ipc_answer_0(rid, ENOENT); |
752 | ipc_answer_0(rid, ENOENT); |
738 | return; |
753 | return; |
739 | } |
754 | } |
740 | 755 | ||
- | 756 | bs = block_bb_get(dev_handle); |
|
- | 757 | bps = uint16_t_le2host(bs->bps); |
|
- | 758 | spc = bs->spc; |
|
- | 759 | bpc = bps * spc; |
|
- | 760 | ||
741 | if (nodep->size == size) { |
761 | if (nodep->size == size) { |
742 | rc = EOK; |
762 | rc = EOK; |
743 | } else if (nodep->size < size) { |
763 | } else if (nodep->size < size) { |
744 | /* |
764 | /* |
745 | * TODO: the standard says we have the freedom to grow the file. |
765 | * The standard says we have the freedom to grow the node. |
746 | * For now, we simply return an error. |
766 | * For now, we simply return an error. |
747 | */ |
767 | */ |
748 | rc = EINVAL; |
768 | rc = EINVAL; |
- | 769 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
|
- | 770 | /* |
|
- | 771 | * The node will be shrunk, but no clusters will be deallocated. |
|
- | 772 | */ |
|
- | 773 | nodep->size = size; |
|
- | 774 | nodep->dirty = true; /* need to sync node */ |
|
- | 775 | rc = EOK; |
|
749 | } else { |
776 | } else { |
750 | /* |
777 | /* |
751 | * The file is to be shrunk. |
778 | * The node will be shrunk, clusters will be deallocated. |
752 | */ |
779 | */ |
- | 780 | if (size == 0) { |
|
- | 781 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
|
- | 782 | } else { |
|
- | 783 | fat_cluster_t lastc; |
|
- | 784 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
|
- | 785 | &lastc, (size - 1) / bpc); |
|
- | 786 | fat_chop_clusters(bs, nodep, lastc); |
|
- | 787 | } |
|
- | 788 | nodep->size = size; |
|
- | 789 | nodep->dirty = true; /* need to sync node */ |
|
753 | rc = ENOTSUP; /* XXX */ |
790 | rc = EOK; |
754 | } |
791 | } |
755 | fat_node_put(nodep); |
792 | fat_node_put(nodep); |
756 | ipc_answer_0(rid, rc); |
793 | ipc_answer_0(rid, rc); |
757 | return; |
794 | return; |
758 | - | ||
759 | } |
795 | } |
760 | 796 | ||
761 | /** |
797 | /** |
762 | * @} |
798 | * @} |
763 | */ |
799 | */ |
764 | 800 |