Rev 1 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2 | * Copyright (C) 2001-2004 Jakub Jermar |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
29 | #include <mm/heap.h> |
||
30 | #include <synch/spinlock.h> |
||
31 | #include <func.h> |
||
32 | #include <memstr.h> |
||
68 | decky | 33 | #include <panic.h> |
1 | jermar | 34 | #include <arch/types.h> |
35 | |||
36 | /* |
||
37 | * First-fit algorithm. |
||
38 | * Simple, but hopefully correct. |
||
39 | * Chunks being freed are tested for mergability with their neighbours. |
||
40 | */ |
||
41 | |||
42 | static chunk_t *chunk0; |
||
43 | static spinlock_t heaplock; |
||
44 | |||
45 | void heap_init(__address heap, int size) |
||
46 | { |
||
47 | spinlock_initialize(&heaplock); |
||
48 | memsetb(heap,size,0); |
||
49 | chunk0 = (chunk_t *) heap; |
||
50 | chunk0->used = 0; |
||
51 | chunk0->size = size - sizeof(chunk_t); |
||
52 | chunk0->next = NULL; |
||
53 | chunk0->prev = NULL; |
||
54 | } |
||
55 | |||
56 | /* |
||
57 | * Uses first-fit algorithm. |
||
58 | */ |
||
59 | void *malloc(int size) |
||
60 | { |
||
61 | pri_t pri; |
||
62 | chunk_t *x, *y, *z; |
||
63 | |||
64 | if (size == 0) |
||
65 | panic("malloc: zero-size allocation request"); |
||
66 | |||
67 | x = chunk0; |
||
68 | pri = cpu_priority_high(); |
||
69 | spinlock_lock(&heaplock); |
||
70 | while (x) { |
||
71 | if (x->used || x->size < size) { |
||
72 | x = x->next; |
||
73 | continue; |
||
74 | } |
||
75 | |||
76 | x->used = 1; |
||
77 | |||
78 | /* |
||
79 | * If the chunk exactly matches required size or if truncating |
||
80 | * it would not provide enough space for storing a new chunk |
||
81 | * header plus at least one byte of data, we are finished. |
||
82 | */ |
||
83 | if (x->size < size + sizeof(chunk_t) + 1) { |
||
84 | spinlock_unlock(&heaplock); |
||
85 | cpu_priority_restore(pri); |
||
86 | return &x->data[0]; |
||
87 | } |
||
88 | |||
89 | /* |
||
90 | * Truncate x and create a new chunk. |
||
91 | */ |
||
92 | y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
||
93 | y->used = 0; |
||
94 | y->size = x->size - size - sizeof(chunk_t); |
||
95 | y->prev = x; |
||
96 | y->next = NULL; |
||
97 | |||
98 | if (z = x->next) { |
||
99 | z->prev = y; |
||
100 | y->next = z; |
||
101 | } |
||
102 | |||
103 | x->size = size; |
||
104 | x->next = y; |
||
105 | spinlock_unlock(&heaplock); |
||
106 | cpu_priority_restore(pri); |
||
107 | |||
108 | return &x->data[0]; |
||
109 | } |
||
110 | spinlock_unlock(&heaplock); |
||
111 | cpu_priority_restore(pri); |
||
112 | return NULL; |
||
113 | } |
||
114 | |||
115 | void free(void *ptr) |
||
116 | { |
||
117 | pri_t pri; |
||
118 | chunk_t *x, *y, *z; |
||
119 | |||
120 | if (!ptr) |
||
121 | panic("free on NULL"); |
||
122 | |||
123 | |||
124 | y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
||
125 | if (y->used != 1) |
||
126 | panic("freeing unused/damaged chunk"); |
||
127 | |||
128 | pri = cpu_priority_high(); |
||
129 | spinlock_lock(&heaplock); |
||
130 | x = y->prev; |
||
131 | z = y->next; |
||
132 | /* merge x and y */ |
||
133 | if (x && !x->used) { |
||
134 | x->size += y->size + sizeof(chunk_t); |
||
135 | x->next = z; |
||
136 | if (z) |
||
137 | z->prev = x; |
||
138 | y = x; |
||
139 | } |
||
140 | /* merge y and z or merge (x merged with y) and z */ |
||
141 | if (z && !z->used) { |
||
142 | y->size += z->size + sizeof(chunk_t); |
||
143 | y->next = z->next; |
||
144 | if (z->next) { |
||
145 | /* y is either y or x */ |
||
146 | z->next->prev = y; |
||
147 | } |
||
148 | } |
||
149 | y->used = 0; |
||
150 | spinlock_unlock(&heaplock); |
||
151 | cpu_priority_restore(pri); |
||
152 | } |