Rev 102 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 102 | Rev 115 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <mm/heap.h> |
29 | #include <mm/heap.h> |
30 | #include <synch/spinlock.h> |
30 | #include <synch/spinlock.h> |
31 | #include <func.h> |
31 | #include <func.h> |
32 | #include <memstr.h> |
32 | #include <memstr.h> |
33 | #include <panic.h> |
33 | #include <panic.h> |
34 | #include <arch/types.h> |
34 | #include <arch/types.h> |
- | 35 | #include <arch/asm.h> |
|
35 | 36 | ||
36 | /* |
37 | /* |
37 | * First-fit algorithm. |
38 | * First-fit algorithm. |
38 | * Simple, but hopefully correct. |
39 | * Simple, but hopefully correct. |
39 | * Chunks being freed are tested for mergability with their neighbours. |
40 | * Chunks being freed are tested for mergability with their neighbours. |
40 | */ |
41 | */ |
41 | 42 | ||
42 | static chunk_t *chunk0; |
43 | static chunk_t *chunk0; |
43 | static spinlock_t heaplock; |
44 | static spinlock_t heaplock; |
44 | 45 | ||
45 | void heap_init(__address heap, size_t size) |
46 | void heap_init(__address heap, size_t size) |
46 | { |
47 | { |
47 | spinlock_initialize(&heaplock); |
48 | spinlock_initialize(&heaplock); |
48 | memsetb(heap, size, 0); |
49 | memsetb(heap, size, 0); |
49 | chunk0 = (chunk_t *) heap; |
50 | chunk0 = (chunk_t *) heap; |
50 | chunk0->used = 0; |
51 | chunk0->used = 0; |
51 | chunk0->size = size - sizeof(chunk_t); |
52 | chunk0->size = size - sizeof(chunk_t); |
52 | chunk0->next = NULL; |
53 | chunk0->next = NULL; |
53 | chunk0->prev = NULL; |
54 | chunk0->prev = NULL; |
54 | } |
55 | } |
55 | 56 | ||
56 | /* |
57 | /* |
57 | * Uses first-fit algorithm. |
58 | * Uses first-fit algorithm. |
58 | */ |
59 | */ |
59 | void *malloc(size_t size) |
60 | void *malloc(size_t size) |
60 | { |
61 | { |
61 | pri_t pri; |
62 | pri_t pri; |
62 | chunk_t *x, *y, *z; |
63 | chunk_t *x, *y, *z; |
63 | 64 | ||
64 | if (size == 0) |
65 | if (size == 0) |
65 | panic("zero-size allocation request"); |
66 | panic("zero-size allocation request"); |
66 | 67 | ||
67 | x = chunk0; |
68 | x = chunk0; |
68 | pri = cpu_priority_high(); |
69 | pri = cpu_priority_high(); |
69 | spinlock_lock(&heaplock); |
70 | spinlock_lock(&heaplock); |
70 | while (x) { |
71 | while (x) { |
71 | if (x->used || x->size < size) { |
72 | if (x->used || x->size < size) { |
72 | x = x->next; |
73 | x = x->next; |
73 | continue; |
74 | continue; |
74 | } |
75 | } |
75 | 76 | ||
76 | x->used = 1; |
77 | x->used = 1; |
77 | 78 | ||
78 | /* |
79 | /* |
79 | * If the chunk exactly matches required size or if truncating |
80 | * If the chunk exactly matches required size or if truncating |
80 | * it would not provide enough space for storing a new chunk |
81 | * it would not provide enough space for storing a new chunk |
81 | * header plus at least one byte of data, we are finished. |
82 | * header plus at least one byte of data, we are finished. |
82 | */ |
83 | */ |
83 | if (x->size < size + sizeof(chunk_t) + 1) { |
84 | if (x->size < size + sizeof(chunk_t) + 1) { |
84 | spinlock_unlock(&heaplock); |
85 | spinlock_unlock(&heaplock); |
85 | cpu_priority_restore(pri); |
86 | cpu_priority_restore(pri); |
86 | return &x->data[0]; |
87 | return &x->data[0]; |
87 | } |
88 | } |
88 | 89 | ||
89 | /* |
90 | /* |
90 | * Truncate x and create a new chunk. |
91 | * Truncate x and create a new chunk. |
91 | */ |
92 | */ |
92 | y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
93 | y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
93 | y->used = 0; |
94 | y->used = 0; |
94 | y->size = x->size - size - sizeof(chunk_t); |
95 | y->size = x->size - size - sizeof(chunk_t); |
95 | y->prev = x; |
96 | y->prev = x; |
96 | y->next = NULL; |
97 | y->next = NULL; |
97 | 98 | ||
98 | if (z = x->next) { |
99 | if (z = x->next) { |
99 | z->prev = y; |
100 | z->prev = y; |
100 | y->next = z; |
101 | y->next = z; |
101 | } |
102 | } |
102 | 103 | ||
103 | x->size = size; |
104 | x->size = size; |
104 | x->next = y; |
105 | x->next = y; |
105 | spinlock_unlock(&heaplock); |
106 | spinlock_unlock(&heaplock); |
106 | cpu_priority_restore(pri); |
107 | cpu_priority_restore(pri); |
107 | 108 | ||
108 | return &x->data[0]; |
109 | return &x->data[0]; |
109 | } |
110 | } |
110 | spinlock_unlock(&heaplock); |
111 | spinlock_unlock(&heaplock); |
111 | cpu_priority_restore(pri); |
112 | cpu_priority_restore(pri); |
112 | return NULL; |
113 | return NULL; |
113 | } |
114 | } |
114 | 115 | ||
115 | void free(void *ptr) |
116 | void free(void *ptr) |
116 | { |
117 | { |
117 | pri_t pri; |
118 | pri_t pri; |
118 | chunk_t *x, *y, *z; |
119 | chunk_t *x, *y, *z; |
119 | 120 | ||
120 | if (!ptr) |
121 | if (!ptr) |
121 | panic("free on NULL"); |
122 | panic("free on NULL"); |
122 | 123 | ||
123 | 124 | ||
124 | y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
125 | y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
125 | if (y->used != 1) |
126 | if (y->used != 1) |
126 | panic("freeing unused/damaged chunk"); |
127 | panic("freeing unused/damaged chunk"); |
127 | 128 | ||
128 | pri = cpu_priority_high(); |
129 | pri = cpu_priority_high(); |
129 | spinlock_lock(&heaplock); |
130 | spinlock_lock(&heaplock); |
130 | x = y->prev; |
131 | x = y->prev; |
131 | z = y->next; |
132 | z = y->next; |
132 | /* merge x and y */ |
133 | /* merge x and y */ |
133 | if (x && !x->used) { |
134 | if (x && !x->used) { |
134 | x->size += y->size + sizeof(chunk_t); |
135 | x->size += y->size + sizeof(chunk_t); |
135 | x->next = z; |
136 | x->next = z; |
136 | if (z) |
137 | if (z) |
137 | z->prev = x; |
138 | z->prev = x; |
138 | y = x; |
139 | y = x; |
139 | } |
140 | } |
140 | /* merge y and z or merge (x merged with y) and z */ |
141 | /* merge y and z or merge (x merged with y) and z */ |
141 | if (z && !z->used) { |
142 | if (z && !z->used) { |
142 | y->size += z->size + sizeof(chunk_t); |
143 | y->size += z->size + sizeof(chunk_t); |
143 | y->next = z->next; |
144 | y->next = z->next; |
144 | if (z->next) { |
145 | if (z->next) { |
145 | /* y is either y or x */ |
146 | /* y is either y or x */ |
146 | z->next->prev = y; |
147 | z->next->prev = y; |
147 | } |
148 | } |
148 | } |
149 | } |
149 | y->used = 0; |
150 | y->used = 0; |
150 | spinlock_unlock(&heaplock); |
151 | spinlock_unlock(&heaplock); |
151 | cpu_priority_restore(pri); |
152 | cpu_priority_restore(pri); |
152 | } |
153 | } |
153 | 154 |