Rev 3536 | Rev 3612 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3536 | Rev 3597 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2008 Jakub Jermar |
2 | * Copyright (c) 2008 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup fs |
29 | /** @addtogroup fs |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file fat_ops.c |
34 | * @file fat_ops.c |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
35 | * @brief Implementation of VFS operations for the FAT file system server. |
36 | */ |
36 | */ |
37 | 37 | ||
38 | #include "fat.h" |
38 | #include "fat.h" |
39 | #include "fat_dentry.h" |
39 | #include "fat_dentry.h" |
40 | #include "fat_fat.h" |
40 | #include "fat_fat.h" |
41 | #include "../../vfs/vfs.h" |
41 | #include "../../vfs/vfs.h" |
42 | #include <libfs.h> |
42 | #include <libfs.h> |
43 | #include <libblock.h> |
43 | #include <libblock.h> |
44 | #include <ipc/ipc.h> |
44 | #include <ipc/ipc.h> |
45 | #include <ipc/services.h> |
45 | #include <ipc/services.h> |
46 | #include <ipc/devmap.h> |
46 | #include <ipc/devmap.h> |
47 | #include <async.h> |
47 | #include <async.h> |
48 | #include <errno.h> |
48 | #include <errno.h> |
49 | #include <string.h> |
49 | #include <string.h> |
50 | #include <byteorder.h> |
50 | #include <byteorder.h> |
51 | #include <libadt/hash_table.h> |
51 | #include <libadt/hash_table.h> |
52 | #include <libadt/list.h> |
52 | #include <libadt/list.h> |
53 | #include <assert.h> |
53 | #include <assert.h> |
54 | #include <futex.h> |
54 | #include <futex.h> |
55 | #include <sys/mman.h> |
55 | #include <sys/mman.h> |
56 | #include <align.h> |
56 | #include <align.h> |
57 | 57 | ||
58 | /** Futex protecting the list of cached free FAT nodes. */ |
58 | /** Futex protecting the list of cached free FAT nodes. */ |
59 | static futex_t ffn_futex = FUTEX_INITIALIZER; |
59 | static futex_t ffn_futex = FUTEX_INITIALIZER; |
60 | 60 | ||
61 | /** List of cached free FAT nodes. */ |
61 | /** List of cached free FAT nodes. */ |
62 | static LIST_INITIALIZE(ffn_head); |
62 | static LIST_INITIALIZE(ffn_head); |
63 | 63 | ||
64 | static void fat_node_initialize(fat_node_t *node) |
64 | static void fat_node_initialize(fat_node_t *node) |
65 | { |
65 | { |
66 | futex_initialize(&node->lock, 1); |
66 | futex_initialize(&node->lock, 1); |
67 | node->idx = NULL; |
67 | node->idx = NULL; |
68 | node->type = 0; |
68 | node->type = 0; |
69 | link_initialize(&node->ffn_link); |
69 | link_initialize(&node->ffn_link); |
70 | node->size = 0; |
70 | node->size = 0; |
71 | node->lnkcnt = 0; |
71 | node->lnkcnt = 0; |
72 | node->refcnt = 0; |
72 | node->refcnt = 0; |
73 | node->dirty = false; |
73 | node->dirty = false; |
74 | } |
74 | } |
75 | 75 | ||
76 | static void fat_node_sync(fat_node_t *node) |
76 | static void fat_node_sync(fat_node_t *node) |
77 | { |
77 | { |
78 | block_t *b; |
78 | block_t *b; |
79 | fat_bs_t *bs; |
79 | fat_bs_t *bs; |
80 | fat_dentry_t *d; |
80 | fat_dentry_t *d; |
81 | uint16_t bps; |
81 | uint16_t bps; |
82 | unsigned dps; |
82 | unsigned dps; |
83 | 83 | ||
84 | assert(node->dirty); |
84 | assert(node->dirty); |
85 | 85 | ||
86 | bs = block_bb_get(node->idx->dev_handle); |
86 | bs = block_bb_get(node->idx->dev_handle); |
87 | bps = uint16_t_le2host(bs->bps); |
87 | bps = uint16_t_le2host(bs->bps); |
88 | dps = bps / sizeof(fat_dentry_t); |
88 | dps = bps / sizeof(fat_dentry_t); |
89 | 89 | ||
90 | /* Read the block that contains the dentry of interest. */ |
90 | /* Read the block that contains the dentry of interest. */ |
91 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
91 | b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
92 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps); |
92 | (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
93 | 93 | ||
94 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
94 | d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
95 | 95 | ||
96 | d->firstc = host2uint16_t_le(node->firstc); |
96 | d->firstc = host2uint16_t_le(node->firstc); |
97 | if (node->type == FAT_FILE) |
97 | if (node->type == FAT_FILE) |
98 | d->size = host2uint32_t_le(node->size); |
98 | d->size = host2uint32_t_le(node->size); |
99 | /* TODO: update other fields? (e.g time fields, attr field) */ |
99 | /* TODO: update other fields? (e.g time fields, attr field) */ |
100 | 100 | ||
101 | b->dirty = true; /* need to sync block */ |
101 | b->dirty = true; /* need to sync block */ |
102 | block_put(b); |
102 | block_put(b); |
103 | } |
103 | } |
104 | 104 | ||
- | 105 | static fat_node_t *fat_node_get_new(void) |
|
- | 106 | { |
|
- | 107 | fat_node_t *nodep; |
|
- | 108 | ||
- | 109 | futex_down(&ffn_futex); |
|
- | 110 | if (!list_empty(&ffn_head)) { |
|
- | 111 | /* Try to use a cached free node structure. */ |
|
- | 112 | fat_idx_t *idxp_tmp; |
|
- | 113 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
|
- | 114 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
|
- | 115 | goto skip_cache; |
|
- | 116 | idxp_tmp = nodep->idx; |
|
- | 117 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
|
- | 118 | futex_up(&nodep->lock); |
|
- | 119 | goto skip_cache; |
|
- | 120 | } |
|
- | 121 | list_remove(&nodep->ffn_link); |
|
- | 122 | futex_up(&ffn_futex); |
|
- | 123 | if (nodep->dirty) |
|
- | 124 | fat_node_sync(nodep); |
|
- | 125 | idxp_tmp->nodep = NULL; |
|
- | 126 | futex_up(&nodep->lock); |
|
- | 127 | futex_up(&idxp_tmp->lock); |
|
- | 128 | } else { |
|
- | 129 | skip_cache: |
|
- | 130 | /* Try to allocate a new node structure. */ |
|
- | 131 | futex_up(&ffn_futex); |
|
- | 132 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
|
- | 133 | if (!nodep) |
|
- | 134 | return NULL; |
|
- | 135 | } |
|
- | 136 | fat_node_initialize(nodep); |
|
- | 137 | ||
- | 138 | return nodep; |
|
- | 139 | } |
|
- | 140 | ||
105 | /** Internal version of fat_node_get(). |
141 | /** Internal version of fat_node_get(). |
106 | * |
142 | * |
107 | * @param idxp Locked index structure. |
143 | * @param idxp Locked index structure. |
108 | */ |
144 | */ |
109 | static void *fat_node_get_core(fat_idx_t *idxp) |
145 | static void *fat_node_get_core(fat_idx_t *idxp) |
110 | { |
146 | { |
111 | block_t *b; |
147 | block_t *b; |
112 | fat_bs_t *bs; |
148 | fat_bs_t *bs; |
113 | fat_dentry_t *d; |
149 | fat_dentry_t *d; |
114 | fat_node_t *nodep = NULL; |
150 | fat_node_t *nodep = NULL; |
115 | unsigned bps; |
151 | unsigned bps; |
- | 152 | unsigned spc; |
|
116 | unsigned dps; |
153 | unsigned dps; |
117 | 154 | ||
118 | if (idxp->nodep) { |
155 | if (idxp->nodep) { |
119 | /* |
156 | /* |
120 | * We are lucky. |
157 | * We are lucky. |
121 | * The node is already instantiated in memory. |
158 | * The node is already instantiated in memory. |
122 | */ |
159 | */ |
123 | futex_down(&idxp->nodep->lock); |
160 | futex_down(&idxp->nodep->lock); |
124 | if (!idxp->nodep->refcnt++) |
161 | if (!idxp->nodep->refcnt++) |
125 | list_remove(&idxp->nodep->ffn_link); |
162 | list_remove(&idxp->nodep->ffn_link); |
126 | futex_up(&idxp->nodep->lock); |
163 | futex_up(&idxp->nodep->lock); |
127 | return idxp->nodep; |
164 | return idxp->nodep; |
128 | } |
165 | } |
129 | 166 | ||
130 | /* |
167 | /* |
131 | * We must instantiate the node from the file system. |
168 | * We must instantiate the node from the file system. |
132 | */ |
169 | */ |
133 | 170 | ||
134 | assert(idxp->pfc); |
171 | assert(idxp->pfc); |
135 | 172 | ||
136 | futex_down(&ffn_futex); |
- | |
137 | if (!list_empty(&ffn_head)) { |
- | |
138 | /* Try to use a cached free node structure. */ |
- | |
139 | fat_idx_t *idxp_tmp; |
- | |
140 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
- | |
141 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
- | |
142 | goto skip_cache; |
- | |
143 | idxp_tmp = nodep->idx; |
- | |
144 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
- | |
145 | futex_up(&nodep->lock); |
- | |
146 | goto skip_cache; |
- | |
147 | } |
- | |
148 | list_remove(&nodep->ffn_link); |
- | |
149 | futex_up(&ffn_futex); |
- | |
150 | if (nodep->dirty) |
- | |
151 | fat_node_sync(nodep); |
173 | nodep = fat_node_get_new(); |
152 | idxp_tmp->nodep = NULL; |
- | |
153 | futex_up(&nodep->lock); |
- | |
154 | futex_up(&idxp_tmp->lock); |
- | |
155 | } else { |
- | |
156 | skip_cache: |
- | |
157 | /* Try to allocate a new node structure. */ |
- | |
158 | futex_up(&ffn_futex); |
- | |
159 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
- | |
160 | if (!nodep) |
174 | if (!nodep) |
161 | return NULL; |
175 | return NULL; |
162 | } |
- | |
163 | fat_node_initialize(nodep); |
- | |
164 | 176 | ||
165 | bs = block_bb_get(idxp->dev_handle); |
177 | bs = block_bb_get(idxp->dev_handle); |
166 | bps = uint16_t_le2host(bs->bps); |
178 | bps = uint16_t_le2host(bs->bps); |
- | 179 | spc = bs->spc; |
|
167 | dps = bps / sizeof(fat_dentry_t); |
180 | dps = bps / sizeof(fat_dentry_t); |
168 | 181 | ||
169 | /* Read the block that contains the dentry of interest. */ |
182 | /* Read the block that contains the dentry of interest. */ |
170 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
183 | b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
171 | (idxp->pdi * sizeof(fat_dentry_t)) / bps); |
184 | (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
172 | assert(b); |
185 | assert(b); |
173 | 186 | ||
174 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
187 | d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
175 | if (d->attr & FAT_ATTR_SUBDIR) { |
188 | if (d->attr & FAT_ATTR_SUBDIR) { |
176 | /* |
189 | /* |
177 | * The only directory which does not have this bit set is the |
190 | * The only directory which does not have this bit set is the |
178 | * root directory itself. The root directory node is handled |
191 | * root directory itself. The root directory node is handled |
179 | * and initialized elsewhere. |
192 | * and initialized elsewhere. |
180 | */ |
193 | */ |
181 | nodep->type = FAT_DIRECTORY; |
194 | nodep->type = FAT_DIRECTORY; |
182 | /* |
195 | /* |
183 | * Unfortunately, the 'size' field of the FAT dentry is not |
196 | * Unfortunately, the 'size' field of the FAT dentry is not |
184 | * defined for the directory entry type. We must determine the |
197 | * defined for the directory entry type. We must determine the |
185 | * size of the directory by walking the FAT. |
198 | * size of the directory by walking the FAT. |
186 | */ |
199 | */ |
187 | nodep->size = bps * _fat_blcks_get(bs, idxp->dev_handle, |
200 | nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, |
188 | uint16_t_le2host(d->firstc), NULL); |
201 | uint16_t_le2host(d->firstc)); |
189 | } else { |
202 | } else { |
190 | nodep->type = FAT_FILE; |
203 | nodep->type = FAT_FILE; |
191 | nodep->size = uint32_t_le2host(d->size); |
204 | nodep->size = uint32_t_le2host(d->size); |
192 | } |
205 | } |
193 | nodep->firstc = uint16_t_le2host(d->firstc); |
206 | nodep->firstc = uint16_t_le2host(d->firstc); |
194 | nodep->lnkcnt = 1; |
207 | nodep->lnkcnt = 1; |
195 | nodep->refcnt = 1; |
208 | nodep->refcnt = 1; |
196 | 209 | ||
197 | block_put(b); |
210 | block_put(b); |
198 | 211 | ||
199 | /* Link the idx structure with the node structure. */ |
212 | /* Link the idx structure with the node structure. */ |
200 | nodep->idx = idxp; |
213 | nodep->idx = idxp; |
201 | idxp->nodep = nodep; |
214 | idxp->nodep = nodep; |
202 | 215 | ||
203 | return nodep; |
216 | return nodep; |
204 | } |
217 | } |
205 | 218 | ||
206 | /** Instantiate a FAT in-core node. */ |
219 | /** Instantiate a FAT in-core node. */ |
207 | static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
220 | static void *fat_node_get(dev_handle_t dev_handle, fs_index_t index) |
208 | { |
221 | { |
209 | void *node; |
222 | void *node; |
210 | fat_idx_t *idxp; |
223 | fat_idx_t *idxp; |
211 | 224 | ||
212 | idxp = fat_idx_get_by_index(dev_handle, index); |
225 | idxp = fat_idx_get_by_index(dev_handle, index); |
213 | if (!idxp) |
226 | if (!idxp) |
214 | return NULL; |
227 | return NULL; |
215 | /* idxp->lock held */ |
228 | /* idxp->lock held */ |
216 | node = fat_node_get_core(idxp); |
229 | node = fat_node_get_core(idxp); |
217 | futex_up(&idxp->lock); |
230 | futex_up(&idxp->lock); |
218 | return node; |
231 | return node; |
219 | } |
232 | } |
220 | 233 | ||
221 | static void fat_node_put(void *node) |
234 | static void fat_node_put(void *node) |
222 | { |
235 | { |
223 | fat_node_t *nodep = (fat_node_t *)node; |
236 | fat_node_t *nodep = (fat_node_t *)node; |
224 | 237 | ||
225 | futex_down(&nodep->lock); |
238 | futex_down(&nodep->lock); |
226 | if (!--nodep->refcnt) { |
239 | if (!--nodep->refcnt) { |
227 | futex_down(&ffn_futex); |
240 | futex_down(&ffn_futex); |
228 | list_append(&nodep->ffn_link, &ffn_head); |
241 | list_append(&nodep->ffn_link, &ffn_head); |
229 | futex_up(&ffn_futex); |
242 | futex_up(&ffn_futex); |
230 | } |
243 | } |
231 | futex_up(&nodep->lock); |
244 | futex_up(&nodep->lock); |
232 | } |
245 | } |
233 | 246 | ||
234 | static void *fat_create(int flags) |
247 | static void *fat_create(dev_handle_t dev_handle, int flags) |
235 | { |
248 | { |
236 | return NULL; /* not supported at the moment */ |
249 | return NULL; /* not supported at the moment */ |
237 | } |
250 | } |
238 | 251 | ||
239 | static int fat_destroy(void *node) |
252 | static int fat_destroy(void *node) |
240 | { |
253 | { |
241 | return ENOTSUP; /* not supported at the moment */ |
254 | return ENOTSUP; /* not supported at the moment */ |
242 | } |
255 | } |
243 | 256 | ||
244 | static bool fat_link(void *prnt, void *chld, const char *name) |
257 | static bool fat_link(void *prnt, void *chld, const char *name) |
245 | { |
258 | { |
246 | return false; /* not supported at the moment */ |
259 | return false; /* not supported at the moment */ |
247 | } |
260 | } |
248 | 261 | ||
249 | static int fat_unlink(void *prnt, void *chld) |
262 | static int fat_unlink(void *prnt, void *chld) |
250 | { |
263 | { |
251 | return ENOTSUP; /* not supported at the moment */ |
264 | return ENOTSUP; /* not supported at the moment */ |
252 | } |
265 | } |
253 | 266 | ||
254 | static void *fat_match(void *prnt, const char *component) |
267 | static void *fat_match(void *prnt, const char *component) |
255 | { |
268 | { |
256 | fat_bs_t *bs; |
269 | fat_bs_t *bs; |
257 | fat_node_t *parentp = (fat_node_t *)prnt; |
270 | fat_node_t *parentp = (fat_node_t *)prnt; |
258 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
271 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
259 | unsigned i, j; |
272 | unsigned i, j; |
260 | unsigned bps; /* bytes per sector */ |
273 | unsigned bps; /* bytes per sector */ |
261 | unsigned dps; /* dentries per sector */ |
274 | unsigned dps; /* dentries per sector */ |
262 | unsigned blocks; |
275 | unsigned blocks; |
263 | fat_dentry_t *d; |
276 | fat_dentry_t *d; |
264 | block_t *b; |
277 | block_t *b; |
265 | 278 | ||
266 | futex_down(&parentp->idx->lock); |
279 | futex_down(&parentp->idx->lock); |
267 | bs = block_bb_get(parentp->idx->dev_handle); |
280 | bs = block_bb_get(parentp->idx->dev_handle); |
268 | bps = uint16_t_le2host(bs->bps); |
281 | bps = uint16_t_le2host(bs->bps); |
269 | dps = bps / sizeof(fat_dentry_t); |
282 | dps = bps / sizeof(fat_dentry_t); |
270 | blocks = parentp->size / bps; |
283 | blocks = parentp->size / bps; |
271 | for (i = 0; i < blocks; i++) { |
284 | for (i = 0; i < blocks; i++) { |
272 | b = fat_block_get(bs, parentp, i); |
285 | b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
273 | for (j = 0; j < dps; j++) { |
286 | for (j = 0; j < dps; j++) { |
274 | d = ((fat_dentry_t *)b->data) + j; |
287 | d = ((fat_dentry_t *)b->data) + j; |
275 | switch (fat_classify_dentry(d)) { |
288 | switch (fat_classify_dentry(d)) { |
276 | case FAT_DENTRY_SKIP: |
289 | case FAT_DENTRY_SKIP: |
277 | continue; |
290 | continue; |
278 | case FAT_DENTRY_LAST: |
291 | case FAT_DENTRY_LAST: |
279 | block_put(b); |
292 | block_put(b); |
280 | futex_up(&parentp->idx->lock); |
293 | futex_up(&parentp->idx->lock); |
281 | return NULL; |
294 | return NULL; |
282 | default: |
295 | default: |
283 | case FAT_DENTRY_VALID: |
296 | case FAT_DENTRY_VALID: |
284 | dentry_name_canonify(d, name); |
297 | dentry_name_canonify(d, name); |
285 | break; |
298 | break; |
286 | } |
299 | } |
287 | if (stricmp(name, component) == 0) { |
300 | if (stricmp(name, component) == 0) { |
288 | /* hit */ |
301 | /* hit */ |
289 | void *node; |
302 | void *node; |
290 | /* |
303 | /* |
291 | * Assume tree hierarchy for locking. We |
304 | * Assume tree hierarchy for locking. We |
292 | * already have the parent and now we are going |
305 | * already have the parent and now we are going |
293 | * to lock the child. Never lock in the oposite |
306 | * to lock the child. Never lock in the oposite |
294 | * order. |
307 | * order. |
295 | */ |
308 | */ |
296 | fat_idx_t *idx = fat_idx_get_by_pos( |
309 | fat_idx_t *idx = fat_idx_get_by_pos( |
297 | parentp->idx->dev_handle, parentp->firstc, |
310 | parentp->idx->dev_handle, parentp->firstc, |
298 | i * dps + j); |
311 | i * dps + j); |
299 | futex_up(&parentp->idx->lock); |
312 | futex_up(&parentp->idx->lock); |
300 | if (!idx) { |
313 | if (!idx) { |
301 | /* |
314 | /* |
302 | * Can happen if memory is low or if we |
315 | * Can happen if memory is low or if we |
303 | * run out of 32-bit indices. |
316 | * run out of 32-bit indices. |
304 | */ |
317 | */ |
305 | block_put(b); |
318 | block_put(b); |
306 | return NULL; |
319 | return NULL; |
307 | } |
320 | } |
308 | node = fat_node_get_core(idx); |
321 | node = fat_node_get_core(idx); |
309 | futex_up(&idx->lock); |
322 | futex_up(&idx->lock); |
310 | block_put(b); |
323 | block_put(b); |
311 | return node; |
324 | return node; |
312 | } |
325 | } |
313 | } |
326 | } |
314 | block_put(b); |
327 | block_put(b); |
315 | } |
328 | } |
316 | 329 | ||
317 | futex_up(&parentp->idx->lock); |
330 | futex_up(&parentp->idx->lock); |
318 | return NULL; |
331 | return NULL; |
319 | } |
332 | } |
320 | 333 | ||
321 | static fs_index_t fat_index_get(void *node) |
334 | static fs_index_t fat_index_get(void *node) |
322 | { |
335 | { |
323 | fat_node_t *fnodep = (fat_node_t *)node; |
336 | fat_node_t *fnodep = (fat_node_t *)node; |
324 | if (!fnodep) |
337 | if (!fnodep) |
325 | return 0; |
338 | return 0; |
326 | return fnodep->idx->index; |
339 | return fnodep->idx->index; |
327 | } |
340 | } |
328 | 341 | ||
329 | static size_t fat_size_get(void *node) |
342 | static size_t fat_size_get(void *node) |
330 | { |
343 | { |
331 | return ((fat_node_t *)node)->size; |
344 | return ((fat_node_t *)node)->size; |
332 | } |
345 | } |
333 | 346 | ||
334 | static unsigned fat_lnkcnt_get(void *node) |
347 | static unsigned fat_lnkcnt_get(void *node) |
335 | { |
348 | { |
336 | return ((fat_node_t *)node)->lnkcnt; |
349 | return ((fat_node_t *)node)->lnkcnt; |
337 | } |
350 | } |
338 | 351 | ||
339 | static bool fat_has_children(void *node) |
352 | static bool fat_has_children(void *node) |
340 | { |
353 | { |
341 | fat_bs_t *bs; |
354 | fat_bs_t *bs; |
342 | fat_node_t *nodep = (fat_node_t *)node; |
355 | fat_node_t *nodep = (fat_node_t *)node; |
343 | unsigned bps; |
356 | unsigned bps; |
344 | unsigned dps; |
357 | unsigned dps; |
345 | unsigned blocks; |
358 | unsigned blocks; |
346 | block_t *b; |
359 | block_t *b; |
347 | unsigned i, j; |
360 | unsigned i, j; |
348 | 361 | ||
349 | if (nodep->type != FAT_DIRECTORY) |
362 | if (nodep->type != FAT_DIRECTORY) |
350 | return false; |
363 | return false; |
351 | 364 | ||
352 | futex_down(&nodep->idx->lock); |
365 | futex_down(&nodep->idx->lock); |
353 | bs = block_bb_get(nodep->idx->dev_handle); |
366 | bs = block_bb_get(nodep->idx->dev_handle); |
354 | bps = uint16_t_le2host(bs->bps); |
367 | bps = uint16_t_le2host(bs->bps); |
355 | dps = bps / sizeof(fat_dentry_t); |
368 | dps = bps / sizeof(fat_dentry_t); |
356 | 369 | ||
357 | blocks = nodep->size / bps; |
370 | blocks = nodep->size / bps; |
358 | 371 | ||
359 | for (i = 0; i < blocks; i++) { |
372 | for (i = 0; i < blocks; i++) { |
360 | fat_dentry_t *d; |
373 | fat_dentry_t *d; |
361 | 374 | ||
362 | b = fat_block_get(bs, nodep, i); |
375 | b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); |
363 | for (j = 0; j < dps; j++) { |
376 | for (j = 0; j < dps; j++) { |
364 | d = ((fat_dentry_t *)b->data) + j; |
377 | d = ((fat_dentry_t *)b->data) + j; |
365 | switch (fat_classify_dentry(d)) { |
378 | switch (fat_classify_dentry(d)) { |
366 | case FAT_DENTRY_SKIP: |
379 | case FAT_DENTRY_SKIP: |
367 | continue; |
380 | continue; |
368 | case FAT_DENTRY_LAST: |
381 | case FAT_DENTRY_LAST: |
369 | block_put(b); |
382 | block_put(b); |
370 | futex_up(&nodep->idx->lock); |
383 | futex_up(&nodep->idx->lock); |
371 | return false; |
384 | return false; |
372 | default: |
385 | default: |
373 | case FAT_DENTRY_VALID: |
386 | case FAT_DENTRY_VALID: |
374 | block_put(b); |
387 | block_put(b); |
375 | futex_up(&nodep->idx->lock); |
388 | futex_up(&nodep->idx->lock); |
376 | return true; |
389 | return true; |
377 | } |
390 | } |
378 | block_put(b); |
391 | block_put(b); |
379 | futex_up(&nodep->idx->lock); |
392 | futex_up(&nodep->idx->lock); |
380 | return true; |
393 | return true; |
381 | } |
394 | } |
382 | block_put(b); |
395 | block_put(b); |
383 | } |
396 | } |
384 | 397 | ||
385 | futex_up(&nodep->idx->lock); |
398 | futex_up(&nodep->idx->lock); |
386 | return false; |
399 | return false; |
387 | } |
400 | } |
388 | 401 | ||
389 | static void *fat_root_get(dev_handle_t dev_handle) |
402 | static void *fat_root_get(dev_handle_t dev_handle) |
390 | { |
403 | { |
391 | return fat_node_get(dev_handle, 0); |
404 | return fat_node_get(dev_handle, 0); |
392 | } |
405 | } |
393 | 406 | ||
394 | static char fat_plb_get_char(unsigned pos) |
407 | static char fat_plb_get_char(unsigned pos) |
395 | { |
408 | { |
396 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
409 | return fat_reg.plb_ro[pos % PLB_SIZE]; |
397 | } |
410 | } |
398 | 411 | ||
399 | static bool fat_is_directory(void *node) |
412 | static bool fat_is_directory(void *node) |
400 | { |
413 | { |
401 | return ((fat_node_t *)node)->type == FAT_DIRECTORY; |
414 | return ((fat_node_t *)node)->type == FAT_DIRECTORY; |
402 | } |
415 | } |
403 | 416 | ||
404 | static bool fat_is_file(void *node) |
417 | static bool fat_is_file(void *node) |
405 | { |
418 | { |
406 | return ((fat_node_t *)node)->type == FAT_FILE; |
419 | return ((fat_node_t *)node)->type == FAT_FILE; |
407 | } |
420 | } |
408 | 421 | ||
409 | /** libfs operations */ |
422 | /** libfs operations */ |
410 | libfs_ops_t fat_libfs_ops = { |
423 | libfs_ops_t fat_libfs_ops = { |
411 | .match = fat_match, |
424 | .match = fat_match, |
412 | .node_get = fat_node_get, |
425 | .node_get = fat_node_get, |
413 | .node_put = fat_node_put, |
426 | .node_put = fat_node_put, |
414 | .create = fat_create, |
427 | .create = fat_create, |
415 | .destroy = fat_destroy, |
428 | .destroy = fat_destroy, |
416 | .link = fat_link, |
429 | .link = fat_link, |
417 | .unlink = fat_unlink, |
430 | .unlink = fat_unlink, |
418 | .index_get = fat_index_get, |
431 | .index_get = fat_index_get, |
419 | .size_get = fat_size_get, |
432 | .size_get = fat_size_get, |
420 | .lnkcnt_get = fat_lnkcnt_get, |
433 | .lnkcnt_get = fat_lnkcnt_get, |
421 | .has_children = fat_has_children, |
434 | .has_children = fat_has_children, |
422 | .root_get = fat_root_get, |
435 | .root_get = fat_root_get, |
423 | .plb_get_char = fat_plb_get_char, |
436 | .plb_get_char = fat_plb_get_char, |
424 | .is_directory = fat_is_directory, |
437 | .is_directory = fat_is_directory, |
425 | .is_file = fat_is_file |
438 | .is_file = fat_is_file |
426 | }; |
439 | }; |
427 | 440 | ||
428 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
441 | void fat_mounted(ipc_callid_t rid, ipc_call_t *request) |
429 | { |
442 | { |
430 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
443 | dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); |
431 | fat_bs_t *bs; |
444 | fat_bs_t *bs; |
432 | uint16_t bps; |
445 | uint16_t bps; |
433 | uint16_t rde; |
446 | uint16_t rde; |
434 | int rc; |
447 | int rc; |
435 | 448 | ||
436 | /* initialize libblock */ |
449 | /* initialize libblock */ |
- | 450 | rc = block_init(dev_handle, BS_SIZE); |
|
- | 451 | if (rc != EOK) { |
|
- | 452 | ipc_answer_0(rid, rc); |
|
- | 453 | return; |
|
- | 454 | } |
|
- | 455 | ||
- | 456 | /* prepare the boot block */ |
|
437 | rc = block_init(dev_handle, BS_SIZE, BS_BLOCK * BS_SIZE, BS_SIZE); |
457 | rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); |
438 | if (rc != EOK) { |
458 | if (rc != EOK) { |
- | 459 | block_fini(dev_handle); |
|
439 | ipc_answer_0(rid, 0); |
460 | ipc_answer_0(rid, rc); |
440 | return; |
461 | return; |
441 | } |
462 | } |
442 | 463 | ||
443 | /* get the buffer with the boot sector */ |
464 | /* get the buffer with the boot sector */ |
444 | bs = block_bb_get(dev_handle); |
465 | bs = block_bb_get(dev_handle); |
445 | 466 | ||
446 | /* Read the number of root directory entries. */ |
467 | /* Read the number of root directory entries. */ |
447 | bps = uint16_t_le2host(bs->bps); |
468 | bps = uint16_t_le2host(bs->bps); |
448 | rde = uint16_t_le2host(bs->root_ent_max); |
469 | rde = uint16_t_le2host(bs->root_ent_max); |
449 | 470 | ||
450 | if (bps != BS_SIZE) { |
471 | if (bps != BS_SIZE) { |
451 | block_fini(dev_handle); |
472 | block_fini(dev_handle); |
452 | ipc_answer_0(rid, ENOTSUP); |
473 | ipc_answer_0(rid, ENOTSUP); |
453 | return; |
474 | return; |
454 | } |
475 | } |
455 | 476 | ||
- | 477 | /* Initialize the block cache */ |
|
- | 478 | rc = block_cache_init(dev_handle, bps, 0 /* XXX */); |
|
- | 479 | if (rc != EOK) { |
|
- | 480 | block_fini(dev_handle); |
|
- | 481 | ipc_answer_0(rid, rc); |
|
- | 482 | return; |
|
- | 483 | } |
|
- | 484 | ||
456 | rc = fat_idx_init_by_dev_handle(dev_handle); |
485 | rc = fat_idx_init_by_dev_handle(dev_handle); |
457 | if (rc != EOK) { |
486 | if (rc != EOK) { |
458 | block_fini(dev_handle); |
487 | block_fini(dev_handle); |
459 | ipc_answer_0(rid, rc); |
488 | ipc_answer_0(rid, rc); |
460 | return; |
489 | return; |
461 | } |
490 | } |
462 | 491 | ||
463 | /* Initialize the root node. */ |
492 | /* Initialize the root node. */ |
464 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
493 | fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); |
465 | if (!rootp) { |
494 | if (!rootp) { |
466 | block_fini(dev_handle); |
495 | block_fini(dev_handle); |
467 | fat_idx_fini_by_dev_handle(dev_handle); |
496 | fat_idx_fini_by_dev_handle(dev_handle); |
468 | ipc_answer_0(rid, ENOMEM); |
497 | ipc_answer_0(rid, ENOMEM); |
469 | return; |
498 | return; |
470 | } |
499 | } |
471 | fat_node_initialize(rootp); |
500 | fat_node_initialize(rootp); |
472 | 501 | ||
473 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
502 | fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); |
474 | if (!ridxp) { |
503 | if (!ridxp) { |
475 | block_fini(dev_handle); |
504 | block_fini(dev_handle); |
476 | free(rootp); |
505 | free(rootp); |
477 | fat_idx_fini_by_dev_handle(dev_handle); |
506 | fat_idx_fini_by_dev_handle(dev_handle); |
478 | ipc_answer_0(rid, ENOMEM); |
507 | ipc_answer_0(rid, ENOMEM); |
479 | return; |
508 | return; |
480 | } |
509 | } |
481 | assert(ridxp->index == 0); |
510 | assert(ridxp->index == 0); |
482 | /* ridxp->lock held */ |
511 | /* ridxp->lock held */ |
483 | 512 | ||
484 | rootp->type = FAT_DIRECTORY; |
513 | rootp->type = FAT_DIRECTORY; |
485 | rootp->firstc = FAT_CLST_ROOT; |
514 | rootp->firstc = FAT_CLST_ROOT; |
486 | rootp->refcnt = 1; |
515 | rootp->refcnt = 1; |
487 | rootp->lnkcnt = 0; /* FS root is not linked */ |
516 | rootp->lnkcnt = 0; /* FS root is not linked */ |
488 | rootp->size = rde * sizeof(fat_dentry_t); |
517 | rootp->size = rde * sizeof(fat_dentry_t); |
489 | rootp->idx = ridxp; |
518 | rootp->idx = ridxp; |
490 | ridxp->nodep = rootp; |
519 | ridxp->nodep = rootp; |
491 | 520 | ||
492 | futex_up(&ridxp->lock); |
521 | futex_up(&ridxp->lock); |
493 | 522 | ||
494 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
523 | ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); |
495 | } |
524 | } |
496 | 525 | ||
497 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
526 | void fat_mount(ipc_callid_t rid, ipc_call_t *request) |
498 | { |
527 | { |
499 | ipc_answer_0(rid, ENOTSUP); |
528 | ipc_answer_0(rid, ENOTSUP); |
500 | } |
529 | } |
501 | 530 | ||
502 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
531 | void fat_lookup(ipc_callid_t rid, ipc_call_t *request) |
503 | { |
532 | { |
504 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
533 | libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); |
505 | } |
534 | } |
506 | 535 | ||
507 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
536 | void fat_read(ipc_callid_t rid, ipc_call_t *request) |
508 | { |
537 | { |
509 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
538 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
510 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
539 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
511 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
540 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
512 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
541 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
513 | fat_bs_t *bs; |
542 | fat_bs_t *bs; |
514 | uint16_t bps; |
543 | uint16_t bps; |
515 | size_t bytes; |
544 | size_t bytes; |
516 | block_t *b; |
545 | block_t *b; |
517 | 546 | ||
518 | if (!nodep) { |
547 | if (!nodep) { |
519 | ipc_answer_0(rid, ENOENT); |
548 | ipc_answer_0(rid, ENOENT); |
520 | return; |
549 | return; |
521 | } |
550 | } |
522 | 551 | ||
523 | ipc_callid_t callid; |
552 | ipc_callid_t callid; |
524 | size_t len; |
553 | size_t len; |
525 | if (!ipc_data_read_receive(&callid, &len)) { |
554 | if (!ipc_data_read_receive(&callid, &len)) { |
526 | fat_node_put(nodep); |
555 | fat_node_put(nodep); |
527 | ipc_answer_0(callid, EINVAL); |
556 | ipc_answer_0(callid, EINVAL); |
528 | ipc_answer_0(rid, EINVAL); |
557 | ipc_answer_0(rid, EINVAL); |
529 | return; |
558 | return; |
530 | } |
559 | } |
531 | 560 | ||
532 | bs = block_bb_get(dev_handle); |
561 | bs = block_bb_get(dev_handle); |
533 | bps = uint16_t_le2host(bs->bps); |
562 | bps = uint16_t_le2host(bs->bps); |
534 | 563 | ||
535 | if (nodep->type == FAT_FILE) { |
564 | if (nodep->type == FAT_FILE) { |
536 | /* |
565 | /* |
537 | * Our strategy for regular file reads is to read one block at |
566 | * Our strategy for regular file reads is to read one block at |
538 | * most and make use of the possibility to return less data than |
567 | * most and make use of the possibility to return less data than |
539 | * requested. This keeps the code very simple. |
568 | * requested. This keeps the code very simple. |
540 | */ |
569 | */ |
541 | if (pos >= nodep->size) { |
570 | if (pos >= nodep->size) { |
542 | /* reading beyond the EOF */ |
571 | /* reading beyond the EOF */ |
543 | bytes = 0; |
572 | bytes = 0; |
544 | (void) ipc_data_read_finalize(callid, NULL, 0); |
573 | (void) ipc_data_read_finalize(callid, NULL, 0); |
545 | } else { |
574 | } else { |
546 | bytes = min(len, bps - pos % bps); |
575 | bytes = min(len, bps - pos % bps); |
547 | bytes = min(bytes, nodep->size - pos); |
576 | bytes = min(bytes, nodep->size - pos); |
548 | b = fat_block_get(bs, nodep, pos / bps); |
577 | b = fat_block_get(bs, nodep, pos / bps, |
- | 578 | BLOCK_FLAGS_NONE); |
|
549 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
579 | (void) ipc_data_read_finalize(callid, b->data + pos % bps, |
550 | bytes); |
580 | bytes); |
551 | block_put(b); |
581 | block_put(b); |
552 | } |
582 | } |
553 | } else { |
583 | } else { |
554 | unsigned bnum; |
584 | unsigned bnum; |
555 | off_t spos = pos; |
585 | off_t spos = pos; |
556 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
586 | char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; |
557 | fat_dentry_t *d; |
587 | fat_dentry_t *d; |
558 | 588 | ||
559 | assert(nodep->type == FAT_DIRECTORY); |
589 | assert(nodep->type == FAT_DIRECTORY); |
560 | assert(nodep->size % bps == 0); |
590 | assert(nodep->size % bps == 0); |
561 | assert(bps % sizeof(fat_dentry_t) == 0); |
591 | assert(bps % sizeof(fat_dentry_t) == 0); |
562 | 592 | ||
563 | /* |
593 | /* |
564 | * Our strategy for readdir() is to use the position pointer as |
594 | * Our strategy for readdir() is to use the position pointer as |
565 | * an index into the array of all dentries. On entry, it points |
595 | * an index into the array of all dentries. On entry, it points |
566 | * to the first unread dentry. If we skip any dentries, we bump |
596 | * to the first unread dentry. If we skip any dentries, we bump |
567 | * the position pointer accordingly. |
597 | * the position pointer accordingly. |
568 | */ |
598 | */ |
569 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
599 | bnum = (pos * sizeof(fat_dentry_t)) / bps; |
570 | while (bnum < nodep->size / bps) { |
600 | while (bnum < nodep->size / bps) { |
571 | off_t o; |
601 | off_t o; |
572 | 602 | ||
573 | b = fat_block_get(bs, nodep, bnum); |
603 | b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); |
574 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
604 | for (o = pos % (bps / sizeof(fat_dentry_t)); |
575 | o < bps / sizeof(fat_dentry_t); |
605 | o < bps / sizeof(fat_dentry_t); |
576 | o++, pos++) { |
606 | o++, pos++) { |
577 | d = ((fat_dentry_t *)b->data) + o; |
607 | d = ((fat_dentry_t *)b->data) + o; |
578 | switch (fat_classify_dentry(d)) { |
608 | switch (fat_classify_dentry(d)) { |
579 | case FAT_DENTRY_SKIP: |
609 | case FAT_DENTRY_SKIP: |
580 | continue; |
610 | continue; |
581 | case FAT_DENTRY_LAST: |
611 | case FAT_DENTRY_LAST: |
582 | block_put(b); |
612 | block_put(b); |
583 | goto miss; |
613 | goto miss; |
584 | default: |
614 | default: |
585 | case FAT_DENTRY_VALID: |
615 | case FAT_DENTRY_VALID: |
586 | dentry_name_canonify(d, name); |
616 | dentry_name_canonify(d, name); |
587 | block_put(b); |
617 | block_put(b); |
588 | goto hit; |
618 | goto hit; |
589 | } |
619 | } |
590 | } |
620 | } |
591 | block_put(b); |
621 | block_put(b); |
592 | bnum++; |
622 | bnum++; |
593 | } |
623 | } |
594 | miss: |
624 | miss: |
595 | fat_node_put(nodep); |
625 | fat_node_put(nodep); |
596 | ipc_answer_0(callid, ENOENT); |
626 | ipc_answer_0(callid, ENOENT); |
597 | ipc_answer_1(rid, ENOENT, 0); |
627 | ipc_answer_1(rid, ENOENT, 0); |
598 | return; |
628 | return; |
599 | hit: |
629 | hit: |
600 | (void) ipc_data_read_finalize(callid, name, strlen(name) + 1); |
630 | (void) ipc_data_read_finalize(callid, name, strlen(name) + 1); |
601 | bytes = (pos - spos) + 1; |
631 | bytes = (pos - spos) + 1; |
602 | } |
632 | } |
603 | 633 | ||
604 | fat_node_put(nodep); |
634 | fat_node_put(nodep); |
605 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
635 | ipc_answer_1(rid, EOK, (ipcarg_t)bytes); |
606 | } |
636 | } |
607 | 637 | ||
608 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
638 | void fat_write(ipc_callid_t rid, ipc_call_t *request) |
609 | { |
639 | { |
610 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
640 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
611 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
641 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
612 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
642 | off_t pos = (off_t)IPC_GET_ARG3(*request); |
613 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
643 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
614 | fat_bs_t *bs; |
644 | fat_bs_t *bs; |
615 | size_t bytes; |
645 | size_t bytes; |
616 | block_t *b; |
646 | block_t *b; |
617 | uint16_t bps; |
647 | uint16_t bps; |
618 | unsigned spc; |
648 | unsigned spc; |
- | 649 | unsigned bpc; /* bytes per cluster */ |
|
619 | off_t boundary; |
650 | off_t boundary; |
- | 651 | int flags = BLOCK_FLAGS_NONE; |
|
620 | 652 | ||
621 | if (!nodep) { |
653 | if (!nodep) { |
622 | ipc_answer_0(rid, ENOENT); |
654 | ipc_answer_0(rid, ENOENT); |
623 | return; |
655 | return; |
624 | } |
656 | } |
625 | 657 | ||
626 | /* XXX remove me when you are ready */ |
- | |
627 | { |
- | |
628 | ipc_answer_0(rid, ENOTSUP); |
- | |
629 | fat_node_put(nodep); |
- | |
630 | return; |
- | |
631 | } |
- | |
632 | - | ||
633 | ipc_callid_t callid; |
658 | ipc_callid_t callid; |
634 | size_t len; |
659 | size_t len; |
635 | if (!ipc_data_write_receive(&callid, &len)) { |
660 | if (!ipc_data_write_receive(&callid, &len)) { |
636 | fat_node_put(nodep); |
661 | fat_node_put(nodep); |
637 | ipc_answer_0(callid, EINVAL); |
662 | ipc_answer_0(callid, EINVAL); |
638 | ipc_answer_0(rid, EINVAL); |
663 | ipc_answer_0(rid, EINVAL); |
639 | return; |
664 | return; |
640 | } |
665 | } |
641 | 666 | ||
- | 667 | bs = block_bb_get(dev_handle); |
|
- | 668 | bps = uint16_t_le2host(bs->bps); |
|
- | 669 | spc = bs->spc; |
|
- | 670 | bpc = bps * spc; |
|
- | 671 | ||
642 | /* |
672 | /* |
643 | * In all scenarios, we will attempt to write out only one block worth |
673 | * In all scenarios, we will attempt to write out only one block worth |
644 | * of data at maximum. There might be some more efficient approaches, |
674 | * of data at maximum. There might be some more efficient approaches, |
645 | * but this one greatly simplifies fat_write(). Note that we can afford |
675 | * but this one greatly simplifies fat_write(). Note that we can afford |
646 | * to do this because the client must be ready to handle the return |
676 | * to do this because the client must be ready to handle the return |
647 | * value signalizing a smaller number of bytes written. |
677 | * value signalizing a smaller number of bytes written. |
648 | */ |
678 | */ |
649 | bytes = min(len, bps - pos % bps); |
679 | bytes = min(len, bps - pos % bps); |
650 | - | ||
651 | bs = block_bb_get(dev_handle); |
680 | if (bytes == bps) |
652 | bps = uint16_t_le2host(bs->bps); |
681 | flags |= BLOCK_FLAGS_NOREAD; |
653 | spc = bs->spc; |
- | |
654 | 682 | ||
655 | boundary = ROUND_UP(nodep->size, bps * spc); |
683 | boundary = ROUND_UP(nodep->size, bpc); |
656 | if (pos < boundary) { |
684 | if (pos < boundary) { |
657 | /* |
685 | /* |
658 | * This is the easier case - we are either overwriting already |
686 | * This is the easier case - we are either overwriting already |
659 | * existing contents or writing behind the EOF, but still within |
687 | * existing contents or writing behind the EOF, but still within |
660 | * the limits of the last cluster. The node size may grow to the |
688 | * the limits of the last cluster. The node size may grow to the |
661 | * next block size boundary. |
689 | * next block size boundary. |
662 | */ |
690 | */ |
663 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
691 | fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
664 | b = fat_block_get(bs, nodep, pos / bps); |
692 | b = fat_block_get(bs, nodep, pos / bps, flags); |
665 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
693 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
666 | bytes); |
694 | bytes); |
667 | b->dirty = true; /* need to sync block */ |
695 | b->dirty = true; /* need to sync block */ |
668 | block_put(b); |
696 | block_put(b); |
669 | if (pos + bytes > nodep->size) { |
697 | if (pos + bytes > nodep->size) { |
670 | nodep->size = pos + bytes; |
698 | nodep->size = pos + bytes; |
671 | nodep->dirty = true; /* need to sync node */ |
699 | nodep->dirty = true; /* need to sync node */ |
672 | } |
700 | } |
- | 701 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
673 | fat_node_put(nodep); |
702 | fat_node_put(nodep); |
674 | ipc_answer_1(rid, EOK, bytes); |
- | |
675 | return; |
703 | return; |
676 | } else { |
704 | } else { |
677 | /* |
705 | /* |
678 | * This is the more difficult case. We must allocate new |
706 | * This is the more difficult case. We must allocate new |
679 | * clusters for the node and zero them out. |
707 | * clusters for the node and zero them out. |
680 | */ |
708 | */ |
681 | int status; |
709 | int status; |
682 | unsigned nclsts; |
710 | unsigned nclsts; |
683 | fat_cluster_t mcl, lcl; |
711 | fat_cluster_t mcl, lcl; |
684 | 712 | ||
685 | nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) / |
713 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
686 | bps * spc; |
- | |
687 | /* create an independent chain of nclsts clusters in all FATs */ |
714 | /* create an independent chain of nclsts clusters in all FATs */ |
688 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, |
715 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
689 | &lcl); |
- | |
690 | if (status != EOK) { |
716 | if (status != EOK) { |
691 | /* could not allocate a chain of nclsts clusters */ |
717 | /* could not allocate a chain of nclsts clusters */ |
692 | fat_node_put(nodep); |
718 | fat_node_put(nodep); |
693 | ipc_answer_0(callid, status); |
719 | ipc_answer_0(callid, status); |
694 | ipc_answer_0(rid, status); |
720 | ipc_answer_0(rid, status); |
695 | return; |
721 | return; |
696 | } |
722 | } |
697 | /* zero fill any gaps */ |
723 | /* zero fill any gaps */ |
698 | fat_fill_gap(bs, nodep, mcl, pos); |
724 | fat_fill_gap(bs, nodep, mcl, pos); |
699 | b = _fat_block_get(bs, dev_handle, lcl, |
725 | b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, |
700 | (pos / bps) % spc); |
726 | flags); |
701 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
727 | (void) ipc_data_write_finalize(callid, b->data + pos % bps, |
702 | bytes); |
728 | bytes); |
703 | b->dirty = true; /* need to sync block */ |
729 | b->dirty = true; /* need to sync block */ |
704 | block_put(b); |
730 | block_put(b); |
705 | /* |
731 | /* |
706 | * Append the cluster chain starting in mcl to the end of the |
732 | * Append the cluster chain starting in mcl to the end of the |
707 | * node's cluster chain. |
733 | * node's cluster chain. |
708 | */ |
734 | */ |
709 | fat_append_clusters(bs, nodep, mcl); |
735 | fat_append_clusters(bs, nodep, mcl); |
710 | nodep->size = pos + bytes; |
736 | nodep->size = pos + bytes; |
711 | nodep->dirty = true; /* need to sync node */ |
737 | nodep->dirty = true; /* need to sync node */ |
- | 738 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
712 | fat_node_put(nodep); |
739 | fat_node_put(nodep); |
713 | ipc_answer_1(rid, EOK, bytes); |
- | |
714 | return; |
740 | return; |
715 | } |
741 | } |
716 | } |
742 | } |
- | 743 | ||
- | 744 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
|
- | 745 | { |
|
- | 746 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
|
- | 747 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
|
- | 748 | size_t size = (off_t)IPC_GET_ARG3(*request); |
|
- | 749 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
|
- | 750 | fat_bs_t *bs; |
|
- | 751 | uint16_t bps; |
|
- | 752 | uint8_t spc; |
|
- | 753 | unsigned bpc; /* bytes per cluster */ |
|
- | 754 | int rc; |
|
- | 755 | ||
- | 756 | if (!nodep) { |
|
- | 757 | ipc_answer_0(rid, ENOENT); |
|
- | 758 | return; |
|
- | 759 | } |
|
- | 760 | ||
- | 761 | bs = block_bb_get(dev_handle); |
|
- | 762 | bps = uint16_t_le2host(bs->bps); |
|
- | 763 | spc = bs->spc; |
|
- | 764 | bpc = bps * spc; |
|
- | 765 | ||
- | 766 | if (nodep->size == size) { |
|
- | 767 | rc = EOK; |
|
- | 768 | } else if (nodep->size < size) { |
|
- | 769 | /* |
|
- | 770 | * The standard says we have the freedom to grow the node. |
|
- | 771 | * For now, we simply return an error. |
|
- | 772 | */ |
|
- | 773 | rc = EINVAL; |
|
- | 774 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
|
- | 775 | /* |
|
- | 776 | * The node will be shrunk, but no clusters will be deallocated. |
|
- | 777 | */ |
|
- | 778 | nodep->size = size; |
|
- | 779 | nodep->dirty = true; /* need to sync node */ |
|
- | 780 | rc = EOK; |
|
- | 781 | } else { |
|
- | 782 | /* |
|
- | 783 | * The node will be shrunk, clusters will be deallocated. |
|
- | 784 | */ |
|
- | 785 | if (size == 0) { |
|
- | 786 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
|
- | 787 | } else { |
|
- | 788 | fat_cluster_t lastc; |
|
- | 789 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
|
- | 790 | &lastc, (size - 1) / bpc); |
|
- | 791 | fat_chop_clusters(bs, nodep, lastc); |
|
- | 792 | } |
|
- | 793 | nodep->size = size; |
|
- | 794 | nodep->dirty = true; /* need to sync node */ |
|
- | 795 | rc = EOK; |
|
- | 796 | } |
|
- | 797 | fat_node_put(nodep); |
|
- | 798 | ipc_answer_0(rid, rc); |
|
- | 799 | return; |
|
- | 800 | } |
|
717 | 801 | ||
718 | /** |
802 | /** |
719 | * @} |
803 | * @} |
720 | */ |
804 | */ |
721 | 805 |