Rev 68 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 68 | Rev 72 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <mm/heap.h> |
29 | #include <mm/heap.h> |
30 | #include <synch/spinlock.h> |
30 | #include <synch/spinlock.h> |
31 | #include <func.h> |
31 | #include <func.h> |
32 | #include <memstr.h> |
32 | #include <memstr.h> |
33 | #include <panic.h> |
33 | #include <panic.h> |
34 | #include <arch/types.h> |
34 | #include <arch/types.h> |
35 | 35 | ||
36 | /* |
36 | /* |
37 | * First-fit algorithm. |
37 | * First-fit algorithm. |
38 | * Simple, but hopefully correct. |
38 | * Simple, but hopefully correct. |
39 | * Chunks being freed are tested for mergability with their neighbours. |
39 | * Chunks being freed are tested for mergability with their neighbours. |
40 | */ |
40 | */ |
41 | 41 | ||
42 | static chunk_t *chunk0; |
42 | static chunk_t *chunk0; |
43 | static spinlock_t heaplock; |
43 | static spinlock_t heaplock; |
44 | 44 | ||
45 | void heap_init(__address heap, int size) |
45 | void heap_init(__address heap, __u32 size) |
46 | { |
46 | { |
47 | spinlock_initialize(&heaplock); |
47 | spinlock_initialize(&heaplock); |
48 | memsetb(heap,size,0); |
48 | memsetb(heap, size, 0); |
49 | chunk0 = (chunk_t *) heap; |
49 | chunk0 = (chunk_t *) heap; |
50 | chunk0->used = 0; |
50 | chunk0->used = 0; |
51 | chunk0->size = size - sizeof(chunk_t); |
51 | chunk0->size = size - sizeof(chunk_t); |
52 | chunk0->next = NULL; |
52 | chunk0->next = NULL; |
53 | chunk0->prev = NULL; |
53 | chunk0->prev = NULL; |
54 | } |
54 | } |
55 | 55 | ||
56 | /* |
56 | /* |
57 | * Uses first-fit algorithm. |
57 | * Uses first-fit algorithm. |
58 | */ |
58 | */ |
59 | void *malloc(int size) |
59 | void *malloc(size_t size) |
60 | { |
60 | { |
61 | pri_t pri; |
61 | pri_t pri; |
62 | chunk_t *x, *y, *z; |
62 | chunk_t *x, *y, *z; |
63 | 63 | ||
64 | if (size == 0) |
64 | if (size == 0) |
65 | panic("malloc: zero-size allocation request"); |
65 | panic("malloc: zero-size allocation request"); |
66 | 66 | ||
67 | x = chunk0; |
67 | x = chunk0; |
68 | pri = cpu_priority_high(); |
68 | pri = cpu_priority_high(); |
69 | spinlock_lock(&heaplock); |
69 | spinlock_lock(&heaplock); |
70 | while (x) { |
70 | while (x) { |
71 | if (x->used || x->size < size) { |
71 | if (x->used || x->size < size) { |
72 | x = x->next; |
72 | x = x->next; |
73 | continue; |
73 | continue; |
74 | } |
74 | } |
75 | 75 | ||
76 | x->used = 1; |
76 | x->used = 1; |
77 | 77 | ||
78 | /* |
78 | /* |
79 | * If the chunk exactly matches required size or if truncating |
79 | * If the chunk exactly matches required size or if truncating |
80 | * it would not provide enough space for storing a new chunk |
80 | * it would not provide enough space for storing a new chunk |
81 | * header plus at least one byte of data, we are finished. |
81 | * header plus at least one byte of data, we are finished. |
82 | */ |
82 | */ |
83 | if (x->size < size + sizeof(chunk_t) + 1) { |
83 | if (x->size < size + sizeof(chunk_t) + 1) { |
84 | spinlock_unlock(&heaplock); |
84 | spinlock_unlock(&heaplock); |
85 | cpu_priority_restore(pri); |
85 | cpu_priority_restore(pri); |
86 | return &x->data[0]; |
86 | return &x->data[0]; |
87 | } |
87 | } |
88 | 88 | ||
89 | /* |
89 | /* |
90 | * Truncate x and create a new chunk. |
90 | * Truncate x and create a new chunk. |
91 | */ |
91 | */ |
92 | y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
92 | y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
93 | y->used = 0; |
93 | y->used = 0; |
94 | y->size = x->size - size - sizeof(chunk_t); |
94 | y->size = x->size - size - sizeof(chunk_t); |
95 | y->prev = x; |
95 | y->prev = x; |
96 | y->next = NULL; |
96 | y->next = NULL; |
97 | 97 | ||
98 | if (z = x->next) { |
98 | if (z = x->next) { |
99 | z->prev = y; |
99 | z->prev = y; |
100 | y->next = z; |
100 | y->next = z; |
101 | } |
101 | } |
102 | 102 | ||
103 | x->size = size; |
103 | x->size = size; |
104 | x->next = y; |
104 | x->next = y; |
105 | spinlock_unlock(&heaplock); |
105 | spinlock_unlock(&heaplock); |
106 | cpu_priority_restore(pri); |
106 | cpu_priority_restore(pri); |
107 | 107 | ||
108 | return &x->data[0]; |
108 | return &x->data[0]; |
109 | } |
109 | } |
110 | spinlock_unlock(&heaplock); |
110 | spinlock_unlock(&heaplock); |
111 | cpu_priority_restore(pri); |
111 | cpu_priority_restore(pri); |
112 | return NULL; |
112 | return NULL; |
113 | } |
113 | } |
114 | 114 | ||
115 | void free(void *ptr) |
115 | void free(void *ptr) |
116 | { |
116 | { |
117 | pri_t pri; |
117 | pri_t pri; |
118 | chunk_t *x, *y, *z; |
118 | chunk_t *x, *y, *z; |
119 | 119 | ||
120 | if (!ptr) |
120 | if (!ptr) |
121 | panic("free on NULL"); |
121 | panic("free on NULL"); |
122 | 122 | ||
123 | 123 | ||
124 | y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
124 | y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
125 | if (y->used != 1) |
125 | if (y->used != 1) |
126 | panic("freeing unused/damaged chunk"); |
126 | panic("freeing unused/damaged chunk"); |
127 | 127 | ||
128 | pri = cpu_priority_high(); |
128 | pri = cpu_priority_high(); |
129 | spinlock_lock(&heaplock); |
129 | spinlock_lock(&heaplock); |
130 | x = y->prev; |
130 | x = y->prev; |
131 | z = y->next; |
131 | z = y->next; |
132 | /* merge x and y */ |
132 | /* merge x and y */ |
133 | if (x && !x->used) { |
133 | if (x && !x->used) { |
134 | x->size += y->size + sizeof(chunk_t); |
134 | x->size += y->size + sizeof(chunk_t); |
135 | x->next = z; |
135 | x->next = z; |
136 | if (z) |
136 | if (z) |
137 | z->prev = x; |
137 | z->prev = x; |
138 | y = x; |
138 | y = x; |
139 | } |
139 | } |
140 | /* merge y and z or merge (x merged with y) and z */ |
140 | /* merge y and z or merge (x merged with y) and z */ |
141 | if (z && !z->used) { |
141 | if (z && !z->used) { |
142 | y->size += z->size + sizeof(chunk_t); |
142 | y->size += z->size + sizeof(chunk_t); |
143 | y->next = z->next; |
143 | y->next = z->next; |
144 | if (z->next) { |
144 | if (z->next) { |
145 | /* y is either y or x */ |
145 | /* y is either y or x */ |
146 | z->next->prev = y; |
146 | z->next->prev = y; |
147 | } |
147 | } |
148 | } |
148 | } |
149 | y->used = 0; |
149 | y->used = 0; |
150 | spinlock_unlock(&heaplock); |
150 | spinlock_unlock(&heaplock); |
151 | cpu_priority_restore(pri); |
151 | cpu_priority_restore(pri); |
152 | } |
152 | } |
153 | 153 |