Rev 3561 | Rev 3598 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3561 | Rev 3588 | ||
---|---|---|---|
Line 100... | Line 100... | ||
100 | 100 | ||
101 | b->dirty = true; /* need to sync block */ |
101 | b->dirty = true; /* need to sync block */ |
102 | block_put(b); |
102 | block_put(b); |
103 | } |
103 | } |
104 | 104 | ||
- | 105 | static fat_node_t *fat_node_get_new(void) |
|
- | 106 | { |
|
- | 107 | fat_node_t *nodep; |
|
- | 108 | ||
- | 109 | futex_down(&ffn_futex); |
|
- | 110 | if (!list_empty(&ffn_head)) { |
|
- | 111 | /* Try to use a cached free node structure. */ |
|
- | 112 | fat_idx_t *idxp_tmp; |
|
- | 113 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
|
- | 114 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
|
- | 115 | goto skip_cache; |
|
- | 116 | idxp_tmp = nodep->idx; |
|
- | 117 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
|
- | 118 | futex_up(&nodep->lock); |
|
- | 119 | goto skip_cache; |
|
- | 120 | } |
|
- | 121 | list_remove(&nodep->ffn_link); |
|
- | 122 | futex_up(&ffn_futex); |
|
- | 123 | if (nodep->dirty) |
|
- | 124 | fat_node_sync(nodep); |
|
- | 125 | idxp_tmp->nodep = NULL; |
|
- | 126 | futex_up(&nodep->lock); |
|
- | 127 | futex_up(&idxp_tmp->lock); |
|
- | 128 | } else { |
|
- | 129 | skip_cache: |
|
- | 130 | /* Try to allocate a new node structure. */ |
|
- | 131 | futex_up(&ffn_futex); |
|
- | 132 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
|
- | 133 | if (!nodep) |
|
- | 134 | return NULL; |
|
- | 135 | } |
|
- | 136 | fat_node_initialize(nodep); |
|
- | 137 | ||
- | 138 | return nodep; |
|
- | 139 | } |
|
- | 140 | ||
105 | /** Internal version of fat_node_get(). |
141 | /** Internal version of fat_node_get(). |
106 | * |
142 | * |
107 | * @param idxp Locked index structure. |
143 | * @param idxp Locked index structure. |
108 | */ |
144 | */ |
109 | static void *fat_node_get_core(fat_idx_t *idxp) |
145 | static void *fat_node_get_core(fat_idx_t *idxp) |
Line 132... | Line 168... | ||
132 | * We must instantiate the node from the file system. |
168 | * We must instantiate the node from the file system. |
133 | */ |
169 | */ |
134 | 170 | ||
135 | assert(idxp->pfc); |
171 | assert(idxp->pfc); |
136 | 172 | ||
137 | futex_down(&ffn_futex); |
- | |
138 | if (!list_empty(&ffn_head)) { |
- | |
139 | /* Try to use a cached free node structure. */ |
- | |
140 | fat_idx_t *idxp_tmp; |
- | |
141 | nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); |
- | |
142 | if (futex_trydown(&nodep->lock) == ESYNCH_WOULD_BLOCK) |
- | |
143 | goto skip_cache; |
- | |
144 | idxp_tmp = nodep->idx; |
- | |
145 | if (futex_trydown(&idxp_tmp->lock) == ESYNCH_WOULD_BLOCK) { |
- | |
146 | futex_up(&nodep->lock); |
- | |
147 | goto skip_cache; |
- | |
148 | } |
- | |
149 | list_remove(&nodep->ffn_link); |
- | |
150 | futex_up(&ffn_futex); |
- | |
151 | if (nodep->dirty) |
- | |
152 | fat_node_sync(nodep); |
173 | nodep = fat_node_get_new(); |
153 | idxp_tmp->nodep = NULL; |
- | |
154 | futex_up(&nodep->lock); |
- | |
155 | futex_up(&idxp_tmp->lock); |
- | |
156 | } else { |
- | |
157 | skip_cache: |
- | |
158 | /* Try to allocate a new node structure. */ |
- | |
159 | futex_up(&ffn_futex); |
- | |
160 | nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); |
- | |
161 | if (!nodep) |
174 | if (!nodep) |
162 | return NULL; |
175 | return NULL; |
163 | } |
- | |
164 | fat_node_initialize(nodep); |
- | |
165 | 176 | ||
166 | bs = block_bb_get(idxp->dev_handle); |
177 | bs = block_bb_get(idxp->dev_handle); |
167 | bps = uint16_t_le2host(bs->bps); |
178 | bps = uint16_t_le2host(bs->bps); |
168 | spc = bs->spc; |
179 | spc = bs->spc; |
169 | dps = bps / sizeof(fat_dentry_t); |
180 | dps = bps / sizeof(fat_dentry_t); |
Line 231... | Line 242... | ||
231 | futex_up(&ffn_futex); |
242 | futex_up(&ffn_futex); |
232 | } |
243 | } |
233 | futex_up(&nodep->lock); |
244 | futex_up(&nodep->lock); |
234 | } |
245 | } |
235 | 246 | ||
236 | static void *fat_create(int flags) |
247 | static void *fat_create(dev_handle_t dev_handle, int flags) |
237 | { |
248 | { |
238 | return NULL; /* not supported at the moment */ |
249 | return NULL; /* not supported at the moment */ |
239 | } |
250 | } |
240 | 251 | ||
241 | static int fat_destroy(void *node) |
252 | static int fat_destroy(void *node) |
Line 632... | Line 643... | ||
632 | fat_bs_t *bs; |
643 | fat_bs_t *bs; |
633 | size_t bytes; |
644 | size_t bytes; |
634 | block_t *b; |
645 | block_t *b; |
635 | uint16_t bps; |
646 | uint16_t bps; |
636 | unsigned spc; |
647 | unsigned spc; |
- | 648 | unsigned bpc; /* bytes per cluster */ |
|
637 | off_t boundary; |
649 | off_t boundary; |
638 | 650 | ||
639 | if (!nodep) { |
651 | if (!nodep) { |
640 | ipc_answer_0(rid, ENOENT); |
652 | ipc_answer_0(rid, ENOENT); |
641 | return; |
653 | return; |
Line 648... | Line 660... | ||
648 | ipc_answer_0(callid, EINVAL); |
660 | ipc_answer_0(callid, EINVAL); |
649 | ipc_answer_0(rid, EINVAL); |
661 | ipc_answer_0(rid, EINVAL); |
650 | return; |
662 | return; |
651 | } |
663 | } |
652 | 664 | ||
- | 665 | bs = block_bb_get(dev_handle); |
|
- | 666 | bps = uint16_t_le2host(bs->bps); |
|
- | 667 | spc = bs->spc; |
|
- | 668 | bpc = bps * spc; |
|
- | 669 | ||
653 | /* |
670 | /* |
654 | * In all scenarios, we will attempt to write out only one block worth |
671 | * In all scenarios, we will attempt to write out only one block worth |
655 | * of data at maximum. There might be some more efficient approaches, |
672 | * of data at maximum. There might be some more efficient approaches, |
656 | * but this one greatly simplifies fat_write(). Note that we can afford |
673 | * but this one greatly simplifies fat_write(). Note that we can afford |
657 | * to do this because the client must be ready to handle the return |
674 | * to do this because the client must be ready to handle the return |
658 | * value signalizing a smaller number of bytes written. |
675 | * value signalizing a smaller number of bytes written. |
659 | */ |
676 | */ |
660 | bytes = min(len, bps - pos % bps); |
677 | bytes = min(len, bps - pos % bps); |
661 | - | ||
662 | bs = block_bb_get(dev_handle); |
- | |
663 | bps = uint16_t_le2host(bs->bps); |
- | |
664 | spc = bs->spc; |
- | |
665 | 678 | ||
666 | boundary = ROUND_UP(nodep->size, bps * spc); |
679 | boundary = ROUND_UP(nodep->size, bpc); |
667 | if (pos < boundary) { |
680 | if (pos < boundary) { |
668 | /* |
681 | /* |
669 | * This is the easier case - we are either overwriting already |
682 | * This is the easier case - we are either overwriting already |
670 | * existing contents or writing behind the EOF, but still within |
683 | * existing contents or writing behind the EOF, but still within |
671 | * the limits of the last cluster. The node size may grow to the |
684 | * the limits of the last cluster. The node size may grow to the |
Line 679... | Line 692... | ||
679 | block_put(b); |
692 | block_put(b); |
680 | if (pos + bytes > nodep->size) { |
693 | if (pos + bytes > nodep->size) { |
681 | nodep->size = pos + bytes; |
694 | nodep->size = pos + bytes; |
682 | nodep->dirty = true; /* need to sync node */ |
695 | nodep->dirty = true; /* need to sync node */ |
683 | } |
696 | } |
- | 697 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
684 | fat_node_put(nodep); |
698 | fat_node_put(nodep); |
685 | ipc_answer_1(rid, EOK, bytes); |
- | |
686 | return; |
699 | return; |
687 | } else { |
700 | } else { |
688 | /* |
701 | /* |
689 | * This is the more difficult case. We must allocate new |
702 | * This is the more difficult case. We must allocate new |
690 | * clusters for the node and zero them out. |
703 | * clusters for the node and zero them out. |
691 | */ |
704 | */ |
692 | int status; |
705 | int status; |
693 | unsigned nclsts; |
706 | unsigned nclsts; |
694 | fat_cluster_t mcl, lcl; |
707 | fat_cluster_t mcl, lcl; |
695 | 708 | ||
696 | nclsts = (ROUND_UP(pos + bytes, bps * spc) - boundary) / |
709 | nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; |
697 | bps * spc; |
- | |
698 | /* create an independent chain of nclsts clusters in all FATs */ |
710 | /* create an independent chain of nclsts clusters in all FATs */ |
699 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, |
711 | status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); |
700 | &lcl); |
- | |
701 | if (status != EOK) { |
712 | if (status != EOK) { |
702 | /* could not allocate a chain of nclsts clusters */ |
713 | /* could not allocate a chain of nclsts clusters */ |
703 | fat_node_put(nodep); |
714 | fat_node_put(nodep); |
704 | ipc_answer_0(callid, status); |
715 | ipc_answer_0(callid, status); |
705 | ipc_answer_0(rid, status); |
716 | ipc_answer_0(rid, status); |
Line 717... | Line 728... | ||
717 | * node's cluster chain. |
728 | * node's cluster chain. |
718 | */ |
729 | */ |
719 | fat_append_clusters(bs, nodep, mcl); |
730 | fat_append_clusters(bs, nodep, mcl); |
720 | nodep->size = pos + bytes; |
731 | nodep->size = pos + bytes; |
721 | nodep->dirty = true; /* need to sync node */ |
732 | nodep->dirty = true; /* need to sync node */ |
- | 733 | ipc_answer_2(rid, EOK, bytes, nodep->size); |
|
722 | fat_node_put(nodep); |
734 | fat_node_put(nodep); |
723 | ipc_answer_1(rid, EOK, bytes); |
- | |
724 | return; |
735 | return; |
725 | } |
736 | } |
726 | } |
737 | } |
727 | 738 | ||
728 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
739 | void fat_truncate(ipc_callid_t rid, ipc_call_t *request) |
729 | { |
740 | { |
730 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
741 | dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); |
731 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
742 | fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); |
732 | size_t size = (off_t)IPC_GET_ARG3(*request); |
743 | size_t size = (off_t)IPC_GET_ARG3(*request); |
733 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
744 | fat_node_t *nodep = (fat_node_t *)fat_node_get(dev_handle, index); |
- | 745 | fat_bs_t *bs; |
|
- | 746 | uint16_t bps; |
|
- | 747 | uint8_t spc; |
|
- | 748 | unsigned bpc; /* bytes per cluster */ |
|
734 | int rc; |
749 | int rc; |
735 | 750 | ||
736 | if (!nodep) { |
751 | if (!nodep) { |
737 | ipc_answer_0(rid, ENOENT); |
752 | ipc_answer_0(rid, ENOENT); |
738 | return; |
753 | return; |
739 | } |
754 | } |
740 | 755 | ||
- | 756 | bs = block_bb_get(dev_handle); |
|
- | 757 | bps = uint16_t_le2host(bs->bps); |
|
- | 758 | spc = bs->spc; |
|
- | 759 | bpc = bps * spc; |
|
- | 760 | ||
741 | if (nodep->size == size) { |
761 | if (nodep->size == size) { |
742 | rc = EOK; |
762 | rc = EOK; |
743 | } else if (nodep->size < size) { |
763 | } else if (nodep->size < size) { |
744 | /* |
764 | /* |
745 | * TODO: the standard says we have the freedom to grow the file. |
765 | * The standard says we have the freedom to grow the node. |
746 | * For now, we simply return an error. |
766 | * For now, we simply return an error. |
747 | */ |
767 | */ |
748 | rc = EINVAL; |
768 | rc = EINVAL; |
- | 769 | } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { |
|
- | 770 | /* |
|
- | 771 | * The node will be shrunk, but no clusters will be deallocated. |
|
- | 772 | */ |
|
- | 773 | nodep->size = size; |
|
- | 774 | nodep->dirty = true; /* need to sync node */ |
|
- | 775 | rc = EOK; |
|
749 | } else { |
776 | } else { |
750 | /* |
777 | /* |
751 | * The file is to be shrunk. |
778 | * The node will be shrunk, clusters will be deallocated. |
752 | */ |
779 | */ |
- | 780 | if (size == 0) { |
|
- | 781 | fat_chop_clusters(bs, nodep, FAT_CLST_RES0); |
|
- | 782 | } else { |
|
- | 783 | fat_cluster_t lastc; |
|
- | 784 | (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, |
|
- | 785 | &lastc, (size - 1) / bpc); |
|
- | 786 | fat_chop_clusters(bs, nodep, lastc); |
|
- | 787 | } |
|
- | 788 | nodep->size = size; |
|
- | 789 | nodep->dirty = true; /* need to sync node */ |
|
753 | rc = ENOTSUP; /* XXX */ |
790 | rc = EOK; |
754 | } |
791 | } |
755 | fat_node_put(nodep); |
792 | fat_node_put(nodep); |
756 | ipc_answer_0(rid, rc); |
793 | ipc_answer_0(rid, rc); |
757 | return; |
794 | return; |
758 | - | ||
759 | } |
795 | } |
760 | 796 | ||
761 | /** |
797 | /** |
762 | * @} |
798 | * @} |
763 | */ |
799 | */ |