Rev 985 | Rev 1787 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 985 | Rev 1653 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | Default header file for malloc-2.8.x, written by Doug Lea |
2 | Default header file for malloc-2.8.x, written by Doug Lea |
3 | and released to the public domain, as explained at |
3 | and released to the public domain, as explained at |
4 | http://creativecommons.org/licenses/publicdomain. |
4 | http://creativecommons.org/licenses/publicdomain. |
5 | |
5 | |
6 | last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee) |
6 | last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee) |
7 | 7 | ||
8 | This header is for ANSI C/C++ only. You can set any of |
8 | This header is for ANSI C/C++ only. You can set any of |
9 | the following #defines before including: |
9 | the following #defines before including: |
10 | 10 | ||
11 | * If USE_DL_PREFIX is defined, it is assumed that malloc.c |
11 | * If USE_DL_PREFIX is defined, it is assumed that malloc.c |
12 | was also compiled with this option, so all routines |
12 | was also compiled with this option, so all routines |
13 | have names starting with "dl". |
13 | have names starting with "dl". |
14 | 14 | ||
15 | * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this |
15 | * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this |
16 | file will be #included AFTER <malloc.h>. This is needed only if |
16 | file will be #included AFTER <malloc.h>. This is needed only if |
17 | your system defines a struct mallinfo that is incompatible with the |
17 | your system defines a struct mallinfo that is incompatible with the |
18 | standard one declared here. Otherwise, you can include this file |
18 | standard one declared here. Otherwise, you can include this file |
19 | INSTEAD of your system system <malloc.h>. At least on ANSI, all |
19 | INSTEAD of your system system <malloc.h>. At least on ANSI, all |
20 | declarations should be compatible with system versions |
20 | declarations should be compatible with system versions |
21 | 21 | ||
22 | * If MSPACES is defined, declarations for mspace versions are included. |
22 | * If MSPACES is defined, declarations for mspace versions are included. |
23 | */ |
23 | */ |
24 | 24 | ||
25 | #ifndef MALLOC_280_H |
25 | #ifndef MALLOC_280_H |
26 | #define MALLOC_280_H |
26 | #define MALLOC_280_H |
27 | 27 | ||
28 | #ifdef __cplusplus |
28 | #ifdef __cplusplus |
29 | extern "C" { |
29 | extern "C" { |
30 | #endif |
30 | #endif |
31 | 31 | ||
32 | #include <stddef.h> /* for size_t */ |
32 | #include <stddef.h> /* for size_t */ |
33 | 33 | ||
34 | #if !ONLY_MSPACES |
34 | #if !ONLY_MSPACES |
35 | 35 | ||
36 | #ifndef USE_DL_PREFIX |
36 | #ifndef USE_DL_PREFIX |
37 | #define dlcalloc calloc |
37 | #define dlcalloc calloc |
38 | #define dlfree free |
38 | #define dlfree free |
39 | #define dlmalloc malloc |
39 | #define dlmalloc malloc |
40 | #define dlmemalign memalign |
40 | #define dlmemalign memalign |
41 | #define dlrealloc realloc |
41 | #define dlrealloc realloc |
42 | #define dlvalloc valloc |
42 | #define dlvalloc valloc |
43 | #define dlpvalloc pvalloc |
43 | #define dlpvalloc pvalloc |
44 | #define dlmallinfo mallinfo |
44 | #define dlmallinfo mallinfo |
45 | #define dlmallopt mallopt |
45 | #define dlmallopt mallopt |
46 | #define dlmalloc_trim malloc_trim |
46 | #define dlmalloc_trim malloc_trim |
47 | #define dlmalloc_stats malloc_stats |
47 | #define dlmalloc_stats malloc_stats |
48 | #define dlmalloc_usable_size malloc_usable_size |
48 | #define dlmalloc_usable_size malloc_usable_size |
49 | #define dlmalloc_footprint malloc_footprint |
49 | #define dlmalloc_footprint malloc_footprint |
50 | #define dlmalloc_max_footprint malloc_max_footprint |
50 | #define dlmalloc_max_footprint malloc_max_footprint |
51 | #define dlindependent_calloc independent_calloc |
51 | #define dlindependent_calloc independent_calloc |
52 | #define dlindependent_comalloc independent_comalloc |
52 | #define dlindependent_comalloc independent_comalloc |
53 | #endif /* USE_DL_PREFIX */ |
53 | #endif /* USE_DL_PREFIX */ |
54 | 54 | ||
55 | 55 | ||
56 | /* |
56 | /* |
57 | malloc(size_t n) |
57 | malloc(size_t n) |
58 | Returns a pointer to a newly allocated chunk of at least n bytes, or |
58 | Returns a pointer to a newly allocated chunk of at least n bytes, or |
59 | null if no space is available, in which case errno is set to ENOMEM |
59 | null if no space is available, in which case errno is set to ENOMEM |
60 | on ANSI C systems. |
60 | on ANSI C systems. |
61 | 61 | ||
62 | If n is zero, malloc returns a minimum-sized chunk. (The minimum |
62 | If n is zero, malloc returns a minimum-sized chunk. (The minimum |
63 | size is 16 bytes on most 32bit systems, and 32 bytes on 64bit |
63 | size is 16 bytes on most 32bit systems, and 32 bytes on 64bit |
64 | systems.) Note that size_t is an unsigned type, so calls with |
64 | systems.) Note that size_t is an unsigned type, so calls with |
65 | arguments that would be negative if signed are interpreted as |
65 | arguments that would be negative if signed are interpreted as |
66 | requests for huge amounts of space, which will often fail. The |
66 | requests for huge amounts of space, which will often fail. The |
67 | maximum supported value of n differs across systems, but is in all |
67 | maximum supported value of n differs across systems, but is in all |
68 | cases less than the maximum representable value of a size_t. |
68 | cases less than the maximum representable value of a size_t. |
69 | */ |
69 | */ |
70 | void* dlmalloc(size_t); |
70 | void* dlmalloc(size_t); |
71 | 71 | ||
72 | /* |
72 | /* |
73 | free(void* p) |
73 | free(void* p) |
74 | Releases the chunk of memory pointed to by p, that had been previously |
74 | Releases the chunk of memory pointed to by p, that had been previously |
75 | allocated using malloc or a related routine such as realloc. |
75 | allocated using malloc or a related routine such as realloc. |
76 | It has no effect if p is null. If p was not malloced or already |
76 | It has no effect if p is null. If p was not malloced or already |
77 | freed, free(p) will by default cuase the current program to abort. |
77 | freed, free(p) will by default cuase the current program to abort. |
78 | */ |
78 | */ |
79 | void dlfree(void*); |
79 | void dlfree(void*); |
80 | 80 | ||
81 | /* |
81 | /* |
82 | calloc(size_t n_elements, size_t element_size); |
82 | calloc(size_t n_elements, size_t element_size); |
83 | Returns a pointer to n_elements * element_size bytes, with all locations |
83 | Returns a pointer to n_elements * element_size bytes, with all locations |
84 | set to zero. |
84 | set to zero. |
85 | */ |
85 | */ |
86 | void* dlcalloc(size_t, size_t); |
86 | void* dlcalloc(size_t, size_t); |
87 | 87 | ||
88 | /* |
88 | /* |
89 | realloc(void* p, size_t n) |
89 | realloc(void* p, size_t n) |
90 | Returns a pointer to a chunk of size n that contains the same data |
90 | Returns a pointer to a chunk of size n that contains the same data |
91 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
91 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
92 | if no space is available. |
92 | if no space is available. |
93 | 93 | ||
94 | The returned pointer may or may not be the same as p. The algorithm |
94 | The returned pointer may or may not be the same as p. The algorithm |
95 | prefers extending p in most cases when possible, otherwise it |
95 | prefers extending p in most cases when possible, otherwise it |
96 | employs the equivalent of a malloc-copy-free sequence. |
96 | employs the equivalent of a malloc-copy-free sequence. |
97 | 97 | ||
98 | If p is null, realloc is equivalent to malloc. |
98 | If p is null, realloc is equivalent to malloc. |
99 | 99 | ||
100 | If space is not available, realloc returns null, errno is set (if on |
100 | If space is not available, realloc returns null, errno is set (if on |
101 | ANSI) and p is NOT freed. |
101 | ANSI) and p is NOT freed. |
102 | 102 | ||
103 | if n is for fewer bytes than already held by p, the newly unused |
103 | if n is for fewer bytes than already held by p, the newly unused |
104 | space is lopped off and freed if possible. realloc with a size |
104 | space is lopped off and freed if possible. realloc with a size |
105 | argument of zero (re)allocates a minimum-sized chunk. |
105 | argument of zero (re)allocates a minimum-sized chunk. |
106 | 106 | ||
107 | The old unix realloc convention of allowing the last-free'd chunk |
107 | The old unix realloc convention of allowing the last-free'd chunk |
108 | to be used as an argument to realloc is not supported. |
108 | to be used as an argument to realloc is not supported. |
109 | */ |
109 | */ |
110 | 110 | ||
111 | void* dlrealloc(void*, size_t); |
111 | void* dlrealloc(void*, size_t); |
112 | 112 | ||
113 | /* |
113 | /* |
114 | memalign(size_t alignment, size_t n); |
114 | memalign(size_t alignment, size_t n); |
115 | Returns a pointer to a newly allocated chunk of n bytes, aligned |
115 | Returns a pointer to a newly allocated chunk of n bytes, aligned |
116 | in accord with the alignment argument. |
116 | in accord with the alignment argument. |
117 | 117 | ||
118 | The alignment argument should be a power of two. If the argument is |
118 | The alignment argument should be a power of two. If the argument is |
119 | not a power of two, the nearest greater power is used. |
119 | not a power of two, the nearest greater power is used. |
120 | 8-byte alignment is guaranteed by normal malloc calls, so don't |
120 | 8-byte alignment is guaranteed by normal malloc calls, so don't |
121 | bother calling memalign with an argument of 8 or less. |
121 | bother calling memalign with an argument of 8 or less. |
122 | 122 | ||
123 | Overreliance on memalign is a sure way to fragment space. |
123 | Overreliance on memalign is a sure way to fragment space. |
124 | */ |
124 | */ |
125 | void* dlmemalign(size_t, size_t); |
125 | void* dlmemalign(size_t, size_t); |
126 | 126 | ||
127 | /* |
127 | /* |
128 | valloc(size_t n); |
128 | valloc(size_t n); |
129 | Equivalent to memalign(pagesize, n), where pagesize is the page |
129 | Equivalent to memalign(pagesize, n), where pagesize is the page |
130 | size of the system. If the pagesize is unknown, 4096 is used. |
130 | size of the system. If the pagesize is unknown, 4096 is used. |
131 | */ |
131 | */ |
132 | void* dlvalloc(size_t); |
132 | void* dlvalloc(size_t); |
133 | 133 | ||
134 | /* |
134 | /* |
135 | mallopt(int parameter_number, int parameter_value) |
135 | mallopt(int parameter_number, int parameter_value) |
136 | Sets tunable parameters The format is to provide a |
136 | Sets tunable parameters The format is to provide a |
137 | (parameter-number, parameter-value) pair. mallopt then sets the |
137 | (parameter-number, parameter-value) pair. mallopt then sets the |
138 | corresponding parameter to the argument value if it can (i.e., so |
138 | corresponding parameter to the argument value if it can (i.e., so |
139 | long as the value is meaningful), and returns 1 if successful else |
139 | long as the value is meaningful), and returns 1 if successful else |
140 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, |
140 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, |
141 | normally defined in malloc.h. None of these are use in this malloc, |
141 | normally defined in malloc.h. None of these are use in this malloc, |
142 | so setting them has no effect. But this malloc also supports other |
142 | so setting them has no effect. But this malloc also supports other |
143 | options in mallopt: |
143 | options in mallopt: |
144 | 144 | ||
145 | Symbol param # default allowed param values |
145 | Symbol param # default allowed param values |
146 | M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming) |
146 | M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming) |
147 | M_GRANULARITY -2 page size any power of 2 >= page size |
147 | M_GRANULARITY -2 page size any power of 2 >= page size |
148 | M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) |
148 | M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) |
149 | */ |
149 | */ |
150 | int dlmallopt(int, int); |
150 | int dlmallopt(int, int); |
151 | 151 | ||
152 | #define M_TRIM_THRESHOLD (-1) |
152 | #define M_TRIM_THRESHOLD (-1) |
153 | #define M_GRANULARITY (-2) |
153 | #define M_GRANULARITY (-2) |
154 | #define M_MMAP_THRESHOLD (-3) |
154 | #define M_MMAP_THRESHOLD (-3) |
155 | 155 | ||
156 | 156 | ||
157 | /* |
157 | /* |
158 | malloc_footprint(); |
158 | malloc_footprint(); |
159 | Returns the number of bytes obtained from the system. The total |
159 | Returns the number of bytes obtained from the system. The total |
160 | number of bytes allocated by malloc, realloc etc., is less than this |
160 | number of bytes allocated by malloc, realloc etc., is less than this |
161 | value. Unlike mallinfo, this function returns only a precomputed |
161 | value. Unlike mallinfo, this function returns only a precomputed |
162 | result, so can be called frequently to monitor memory consumption. |
162 | result, so can be called frequently to monitor memory consumption. |
163 | Even if locks are otherwise defined, this function does not use them, |
163 | Even if locks are otherwise defined, this function does not use them, |
164 | so results might not be up to date. |
164 | so results might not be up to date. |
165 | */ |
165 | */ |
166 | size_t dlmalloc_footprint(void); |
166 | size_t dlmalloc_footprint(void); |
167 | size_t dlmalloc_max_footprint(void); |
167 | size_t dlmalloc_max_footprint(void); |
168 | 168 | ||
169 | #if !NO_MALLINFO |
169 | #if !NO_MALLINFO |
170 | /* |
170 | /* |
171 | mallinfo() |
171 | mallinfo() |
172 | Returns (by copy) a struct containing various summary statistics: |
172 | Returns (by copy) a struct containing various summary statistics: |
173 | 173 | ||
174 | arena: current total non-mmapped bytes allocated from system |
174 | arena: current total non-mmapped bytes allocated from system |
175 | ordblks: the number of free chunks |
175 | ordblks: the number of free chunks |
176 | smblks: always zero. |
176 | smblks: always zero. |
177 | hblks: current number of mmapped regions |
177 | hblks: current number of mmapped regions |
178 | hblkhd: total bytes held in mmapped regions |
178 | hblkhd: total bytes held in mmapped regions |
179 | usmblks: the maximum total allocated space. This will be greater |
179 | usmblks: the maximum total allocated space. This will be greater |
180 | than current total if trimming has occurred. |
180 | than current total if trimming has occurred. |
181 | fsmblks: always zero |
181 | fsmblks: always zero |
182 | uordblks: current total allocated space (normal or mmapped) |
182 | uordblks: current total allocated space (normal or mmapped) |
183 | fordblks: total free space |
183 | fordblks: total free space |
184 | keepcost: the maximum number of bytes that could ideally be released |
184 | keepcost: the maximum number of bytes that could ideally be released |
185 | back to system via malloc_trim. ("ideally" means that |
185 | back to system via malloc_trim. ("ideally" means that |
186 | it ignores page restrictions etc.) |
186 | it ignores page restrictions etc.) |
187 | 187 | ||
188 | Because these fields are ints, but internal bookkeeping may |
188 | Because these fields are ints, but internal bookkeeping may |
189 | be kept as longs, the reported values may wrap around zero and |
189 | be kept as longs, the reported values may wrap around zero and |
190 | thus be inaccurate. |
190 | thus be inaccurate. |
191 | */ |
191 | */ |
192 | #ifndef HAVE_USR_INCLUDE_MALLOC_H |
192 | #ifndef HAVE_USR_INCLUDE_MALLOC_H |
193 | #ifndef _MALLOC_H |
193 | #ifndef _MALLOC_H |
194 | #ifndef MALLINFO_FIELD_TYPE |
194 | #ifndef MALLINFO_FIELD_TYPE |
195 | #define MALLINFO_FIELD_TYPE size_t |
195 | #define MALLINFO_FIELD_TYPE size_t |
196 | #endif /* MALLINFO_FIELD_TYPE */ |
196 | #endif /* MALLINFO_FIELD_TYPE */ |
197 | struct mallinfo { |
197 | struct mallinfo { |
198 | MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ |
198 | MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ |
199 | MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ |
199 | MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ |
200 | MALLINFO_FIELD_TYPE smblks; /* always 0 */ |
200 | MALLINFO_FIELD_TYPE smblks; /* always 0 */ |
201 | MALLINFO_FIELD_TYPE hblks; /* always 0 */ |
201 | MALLINFO_FIELD_TYPE hblks; /* always 0 */ |
202 | MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ |
202 | MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ |
203 | MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ |
203 | MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ |
204 | MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ |
204 | MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ |
205 | MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ |
205 | MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ |
206 | MALLINFO_FIELD_TYPE fordblks; /* total free space */ |
206 | MALLINFO_FIELD_TYPE fordblks; /* total free space */ |
207 | MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ |
207 | MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ |
208 | }; |
208 | }; |
209 | #endif /* _MALLOC_H */ |
209 | #endif /* _MALLOC_H */ |
210 | #endif /* HAVE_USR_INCLUDE_MALLOC_H */ |
210 | #endif /* HAVE_USR_INCLUDE_MALLOC_H */ |
211 | 211 | ||
212 | struct mallinfo dlmallinfo(void); |
212 | struct mallinfo dlmallinfo(void); |
213 | #endif /* NO_MALLINFO */ |
213 | #endif /* NO_MALLINFO */ |
214 | 214 | ||
215 | /* |
215 | /* |
216 | independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); |
216 | independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); |
217 | 217 | ||
218 | independent_calloc is similar to calloc, but instead of returning a |
218 | independent_calloc is similar to calloc, but instead of returning a |
219 | single cleared space, it returns an array of pointers to n_elements |
219 | single cleared space, it returns an array of pointers to n_elements |
220 | independent elements that can hold contents of size elem_size, each |
220 | independent elements that can hold contents of size elem_size, each |
221 | of which starts out cleared, and can be independently freed, |
221 | of which starts out cleared, and can be independently freed, |
222 | realloc'ed etc. The elements are guaranteed to be adjacently |
222 | realloc'ed etc. The elements are guaranteed to be adjacently |
223 | allocated (this is not guaranteed to occur with multiple callocs or |
223 | allocated (this is not guaranteed to occur with multiple callocs or |
224 | mallocs), which may also improve cache locality in some |
224 | mallocs), which may also improve cache locality in some |
225 | applications. |
225 | applications. |
226 | 226 | ||
227 | The "chunks" argument is optional (i.e., may be null, which is |
227 | The "chunks" argument is optional (i.e., may be null, which is |
228 | probably the most typical usage). If it is null, the returned array |
228 | probably the most typical usage). If it is null, the returned array |
229 | is itself dynamically allocated and should also be freed when it is |
229 | is itself dynamically allocated and should also be freed when it is |
230 | no longer needed. Otherwise, the chunks array must be of at least |
230 | no longer needed. Otherwise, the chunks array must be of at least |
231 | n_elements in length. It is filled in with the pointers to the |
231 | n_elements in length. It is filled in with the pointers to the |
232 | chunks. |
232 | chunks. |
233 | 233 | ||
234 | In either case, independent_calloc returns this pointer array, or |
234 | In either case, independent_calloc returns this pointer array, or |
235 | null if the allocation failed. If n_elements is zero and "chunks" |
235 | null if the allocation failed. If n_elements is zero and "chunks" |
236 | is null, it returns a chunk representing an array with zero elements |
236 | is null, it returns a chunk representing an array with zero elements |
237 | (which should be freed if not wanted). |
237 | (which should be freed if not wanted). |
238 | 238 | ||
239 | Each element must be individually freed when it is no longer |
239 | Each element must be individually freed when it is no longer |
240 | needed. If you'd like to instead be able to free all at once, you |
240 | needed. If you'd like to instead be able to free all at once, you |
241 | should instead use regular calloc and assign pointers into this |
241 | should instead use regular calloc and assign pointers into this |
242 | space to represent elements. (In this case though, you cannot |
242 | space to represent elements. (In this case though, you cannot |
243 | independently free elements.) |
243 | independently free elements.) |
244 | 244 | ||
245 | independent_calloc simplifies and speeds up implementations of many |
245 | independent_calloc simplifies and speeds up implementations of many |
246 | kinds of pools. It may also be useful when constructing large data |
246 | kinds of pools. It may also be useful when constructing large data |
247 | structures that initially have a fixed number of fixed-sized nodes, |
247 | structures that initially have a fixed number of fixed-sized nodes, |
248 | but the number is not known at compile time, and some of the nodes |
248 | but the number is not known at compile time, and some of the nodes |
249 | may later need to be freed. For example: |
249 | may later need to be freed. For example: |
250 | 250 | ||
251 | struct Node { int item; struct Node* next; }; |
251 | struct Node { int item; struct Node* next; }; |
252 | 252 | ||
253 | struct Node* build_list() { |
253 | struct Node* build_list() { |
254 | struct Node** pool; |
254 | struct Node** pool; |
255 | int n = read_number_of_nodes_needed(); |
255 | int n = read_number_of_nodes_needed(); |
256 | if (n <= 0) return 0; |
256 | if (n <= 0) return 0; |
257 | pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); |
257 | pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); |
258 | if (pool == 0) die(); |
258 | if (pool == 0) die(); |
259 | // organize into a linked list... |
259 | // organize into a linked list... |
260 | struct Node* first = pool[0]; |
260 | struct Node* first = pool[0]; |
261 | for (i = 0; i < n-1; ++i) |
261 | for (i = 0; i < n-1; ++i) |
262 | pool[i]->next = pool[i+1]; |
262 | pool[i]->next = pool[i+1]; |
263 | free(pool); // Can now free the array (or not, if it is needed later) |
263 | free(pool); // Can now free the array (or not, if it is needed later) |
264 | return first; |
264 | return first; |
265 | } |
265 | } |
266 | */ |
266 | */ |
267 | void** dlindependent_calloc(size_t, size_t, void**); |
267 | void** dlindependent_calloc(size_t, size_t, void**); |
268 | 268 | ||
269 | /* |
269 | /* |
270 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); |
270 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); |
271 | 271 | ||
272 | independent_comalloc allocates, all at once, a set of n_elements |
272 | independent_comalloc allocates, all at once, a set of n_elements |
273 | chunks with sizes indicated in the "sizes" array. It returns |
273 | chunks with sizes indicated in the "sizes" array. It returns |
274 | an array of pointers to these elements, each of which can be |
274 | an array of pointers to these elements, each of which can be |
275 | independently freed, realloc'ed etc. The elements are guaranteed to |
275 | independently freed, realloc'ed etc. The elements are guaranteed to |
276 | be adjacently allocated (this is not guaranteed to occur with |
276 | be adjacently allocated (this is not guaranteed to occur with |
277 | multiple callocs or mallocs), which may also improve cache locality |
277 | multiple callocs or mallocs), which may also improve cache locality |
278 | in some applications. |
278 | in some applications. |
279 | 279 | ||
280 | The "chunks" argument is optional (i.e., may be null). If it is null |
280 | The "chunks" argument is optional (i.e., may be null). If it is null |
281 | the returned array is itself dynamically allocated and should also |
281 | the returned array is itself dynamically allocated and should also |
282 | be freed when it is no longer needed. Otherwise, the chunks array |
282 | be freed when it is no longer needed. Otherwise, the chunks array |
283 | must be of at least n_elements in length. It is filled in with the |
283 | must be of at least n_elements in length. It is filled in with the |
284 | pointers to the chunks. |
284 | pointers to the chunks. |
285 | 285 | ||
286 | In either case, independent_comalloc returns this pointer array, or |
286 | In either case, independent_comalloc returns this pointer array, or |
287 | null if the allocation failed. If n_elements is zero and chunks is |
287 | null if the allocation failed. If n_elements is zero and chunks is |
288 | null, it returns a chunk representing an array with zero elements |
288 | null, it returns a chunk representing an array with zero elements |
289 | (which should be freed if not wanted). |
289 | (which should be freed if not wanted). |
290 | 290 | ||
291 | Each element must be individually freed when it is no longer |
291 | Each element must be individually freed when it is no longer |
292 | needed. If you'd like to instead be able to free all at once, you |
292 | needed. If you'd like to instead be able to free all at once, you |
293 | should instead use a single regular malloc, and assign pointers at |
293 | should instead use a single regular malloc, and assign pointers at |
294 | particular offsets in the aggregate space. (In this case though, you |
294 | particular offsets in the aggregate space. (In this case though, you |
295 | cannot independently free elements.) |
295 | cannot independently free elements.) |
296 | 296 | ||
297 | independent_comallac differs from independent_calloc in that each |
297 | independent_comallac differs from independent_calloc in that each |
298 | element may have a different size, and also that it does not |
298 | element may have a different size, and also that it does not |
299 | automatically clear elements. |
299 | automatically clear elements. |
300 | 300 | ||
301 | independent_comalloc can be used to speed up allocation in cases |
301 | independent_comalloc can be used to speed up allocation in cases |
302 | where several structs or objects must always be allocated at the |
302 | where several structs or objects must always be allocated at the |
303 | same time. For example: |
303 | same time. For example: |
304 | 304 | ||
305 | struct Head { ... } |
305 | struct Head { ... } |
306 | struct Foot { ... } |
306 | struct Foot { ... } |
307 | 307 | ||
308 | void send_message(char* msg) { |
308 | void send_message(char* msg) { |
309 | int msglen = strlen(msg); |
309 | int msglen = strlen(msg); |
310 | size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; |
310 | size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; |
311 | void* chunks[3]; |
311 | void* chunks[3]; |
312 | if (independent_comalloc(3, sizes, chunks) == 0) |
312 | if (independent_comalloc(3, sizes, chunks) == 0) |
313 | die(); |
313 | die(); |
314 | struct Head* head = (struct Head*)(chunks[0]); |
314 | struct Head* head = (struct Head*)(chunks[0]); |
315 | char* body = (char*)(chunks[1]); |
315 | char* body = (char*)(chunks[1]); |
316 | struct Foot* foot = (struct Foot*)(chunks[2]); |
316 | struct Foot* foot = (struct Foot*)(chunks[2]); |
317 | // ... |
317 | // ... |
318 | } |
318 | } |
319 | 319 | ||
320 | In general though, independent_comalloc is worth using only for |
320 | In general though, independent_comalloc is worth using only for |
321 | larger values of n_elements. For small values, you probably won't |
321 | larger values of n_elements. For small values, you probably won't |
322 | detect enough difference from series of malloc calls to bother. |
322 | detect enough difference from series of malloc calls to bother. |
323 | 323 | ||
324 | Overuse of independent_comalloc can increase overall memory usage, |
324 | Overuse of independent_comalloc can increase overall memory usage, |
325 | since it cannot reuse existing noncontiguous small chunks that |
325 | since it cannot reuse existing noncontiguous small chunks that |
326 | might be available for some of the elements. |
326 | might be available for some of the elements. |
327 | */ |
327 | */ |
328 | void** dlindependent_comalloc(size_t, size_t*, void**); |
328 | void** dlindependent_comalloc(size_t, size_t*, void**); |
329 | 329 | ||
330 | 330 | ||
331 | /* |
331 | /* |
332 | pvalloc(size_t n); |
332 | pvalloc(size_t n); |
333 | Equivalent to valloc(minimum-page-that-holds(n)), that is, |
333 | Equivalent to valloc(minimum-page-that-holds(n)), that is, |
334 | round up n to nearest pagesize. |
334 | round up n to nearest pagesize. |
335 | */ |
335 | */ |
336 | void* dlpvalloc(size_t); |
336 | void* dlpvalloc(size_t); |
337 | 337 | ||
338 | /* |
338 | /* |
339 | malloc_trim(size_t pad); |
339 | malloc_trim(size_t pad); |
340 | 340 | ||
341 | If possible, gives memory back to the system (via negative arguments |
341 | If possible, gives memory back to the system (via negative arguments |
342 | to sbrk) if there is unused memory at the `high' end of the malloc |
342 | to sbrk) if there is unused memory at the `high' end of the malloc |
343 | pool or in unused MMAP segments. You can call this after freeing |
343 | pool or in unused MMAP segments. You can call this after freeing |
344 | large blocks of memory to potentially reduce the system-level memory |
344 | large blocks of memory to potentially reduce the system-level memory |
345 | requirements of a program. However, it cannot guarantee to reduce |
345 | requirements of a program. However, it cannot guarantee to reduce |
346 | memory. Under some allocation patterns, some large free blocks of |
346 | memory. Under some allocation patterns, some large free blocks of |
347 | memory will be locked between two used chunks, so they cannot be |
347 | memory will be locked between two used chunks, so they cannot be |
348 | given back to the system. |
348 | given back to the system. |
349 | 349 | ||
350 | The `pad' argument to malloc_trim represents the amount of free |
350 | The `pad' argument to malloc_trim represents the amount of free |
351 | trailing space to leave untrimmed. If this argument is zero, only |
351 | trailing space to leave untrimmed. If this argument is zero, only |
352 | the minimum amount of memory to maintain internal data structures |
352 | the minimum amount of memory to maintain internal data structures |
353 | will be left. Non-zero arguments can be supplied to maintain enough |
353 | will be left. Non-zero arguments can be supplied to maintain enough |
354 | trailing space to service future expected allocations without having |
354 | trailing space to service future expected allocations without having |
355 | to re-obtain memory from the system. |
355 | to re-obtain memory from the system. |
356 | 356 | ||
357 | Malloc_trim returns 1 if it actually released any memory, else 0. |
357 | Malloc_trim returns 1 if it actually released any memory, else 0. |
358 | */ |
358 | */ |
359 | int dlmalloc_trim(size_t); |
359 | int dlmalloc_trim(size_t); |
360 | 360 | ||
361 | /* |
361 | /* |
362 | malloc_usable_size(void* p); |
362 | malloc_usable_size(void* p); |
363 | 363 | ||
364 | Returns the number of bytes you can actually use in |
364 | Returns the number of bytes you can actually use in |
365 | an allocated chunk, which may be more than you requested (although |
365 | an allocated chunk, which may be more than you requested (although |
366 | often not) due to alignment and minimum size constraints. |
366 | often not) due to alignment and minimum size constraints. |
367 | You can use this many bytes without worrying about |
367 | You can use this many bytes without worrying about |
368 | overwriting other allocated objects. This is not a particularly great |
368 | overwriting other allocated objects. This is not a particularly great |
369 | programming practice. malloc_usable_size can be more useful in |
369 | programming practice. malloc_usable_size can be more useful in |
370 | debugging and assertions, for example: |
370 | debugging and assertions, for example: |
371 | 371 | ||
372 | p = malloc(n); |
372 | p = malloc(n); |
373 | assert(malloc_usable_size(p) >= 256); |
373 | assert(malloc_usable_size(p) >= 256); |
374 | */ |
374 | */ |
375 | size_t dlmalloc_usable_size(void*); |
375 | size_t dlmalloc_usable_size(void*); |
376 | 376 | ||
377 | /* |
377 | /* |
378 | malloc_stats(); |
378 | malloc_stats(); |
379 | Prints on stderr the amount of space obtained from the system (both |
379 | Prints on stderr the amount of space obtained from the system (both |
380 | via sbrk and mmap), the maximum amount (which may be more than |
380 | via sbrk and mmap), the maximum amount (which may be more than |
381 | current if malloc_trim and/or munmap got called), and the current |
381 | current if malloc_trim and/or munmap got called), and the current |
382 | number of bytes allocated via malloc (or realloc, etc) but not yet |
382 | number of bytes allocated via malloc (or realloc, etc) but not yet |
383 | freed. Note that this is the number of bytes allocated, not the |
383 | freed. Note that this is the number of bytes allocated, not the |
384 | number requested. It will be larger than the number requested |
384 | number requested. It will be larger than the number requested |
385 | because of alignment and bookkeeping overhead. Because it includes |
385 | because of alignment and bookkeeping overhead. Because it includes |
386 | alignment wastage as being in use, this figure may be greater than |
386 | alignment wastage as being in use, this figure may be greater than |
387 | zero even when no user-level chunks are allocated. |
387 | zero even when no user-level chunks are allocated. |
388 | 388 | ||
389 | The reported current and maximum system memory can be inaccurate if |
389 | The reported current and maximum system memory can be inaccurate if |
390 | a program makes other calls to system memory allocation functions |
390 | a program makes other calls to system memory allocation functions |
391 | (normally sbrk) outside of malloc. |
391 | (normally sbrk) outside of malloc. |
392 | 392 | ||
393 | malloc_stats prints only the most commonly interesting statistics. |
393 | malloc_stats prints only the most commonly interesting statistics. |
394 | More information can be obtained by calling mallinfo. |
394 | More information can be obtained by calling mallinfo. |
395 | */ |
395 | */ |
396 | void dlmalloc_stats(void); |
396 | void dlmalloc_stats(void); |
397 | 397 | ||
398 | #endif /* !ONLY_MSPACES */ |
398 | #endif /* !ONLY_MSPACES */ |
399 | 399 | ||
400 | #if MSPACES |
400 | #if MSPACES |
401 | 401 | ||
402 | /* |
402 | /* |
403 | mspace is an opaque type representing an independent |
403 | mspace is an opaque type representing an independent |
404 | region of space that supports mspace_malloc, etc. |
404 | region of space that supports mspace_malloc, etc. |
405 | */ |
405 | */ |
406 | typedef void* mspace; |
406 | typedef void* mspace; |
407 | 407 | ||
408 | /* |
408 | /* |
409 | create_mspace creates and returns a new independent space with the |
409 | create_mspace creates and returns a new independent space with the |
410 | given initial capacity, or, if 0, the default granularity size. It |
410 | given initial capacity, or, if 0, the default granularity size. It |
411 | returns null if there is no system memory available to create the |
411 | returns null if there is no system memory available to create the |
412 | space. If argument locked is non-zero, the space uses a separate |
412 | space. If argument locked is non-zero, the space uses a separate |
413 | lock to control access. The capacity of the space will grow |
413 | lock to control access. The capacity of the space will grow |
414 | dynamically as needed to service mspace_malloc requests. You can |
414 | dynamically as needed to service mspace_malloc requests. You can |
415 | control the sizes of incremental increases of this space by |
415 | control the sizes of incremental increases of this space by |
416 | compiling with a different DEFAULT_GRANULARITY or dynamically |
416 | compiling with a different DEFAULT_GRANULARITY or dynamically |
417 | setting with mallopt(M_GRANULARITY, value). |
417 | setting with mallopt(M_GRANULARITY, value). |
418 | */ |
418 | */ |
419 | mspace create_mspace(size_t capacity, int locked); |
419 | mspace create_mspace(size_t capacity, int locked); |
420 | 420 | ||
421 | /* |
421 | /* |
422 | destroy_mspace destroys the given space, and attempts to return all |
422 | destroy_mspace destroys the given space, and attempts to return all |
423 | of its memory back to the system, returning the total number of |
423 | of its memory back to the system, returning the total number of |
424 | bytes freed. After destruction, the results of access to all memory |
424 | bytes freed. After destruction, the results of access to all memory |
425 | used by the space become undefined. |
425 | used by the space become undefined. |
426 | */ |
426 | */ |
427 | size_t destroy_mspace(mspace msp); |
427 | size_t destroy_mspace(mspace msp); |
428 | 428 | ||
429 | /* |
429 | /* |
430 | create_mspace_with_base uses the memory supplied as the initial base |
430 | create_mspace_with_base uses the memory supplied as the initial base |
431 | of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this |
431 | of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this |
432 | space is used for bookkeeping, so the capacity must be at least this |
432 | space is used for bookkeeping, so the capacity must be at least this |
433 | large. (Otherwise 0 is returned.) When this initial space is |
433 | large. (Otherwise 0 is returned.) When this initial space is |
434 | exhausted, additional memory will be obtained from the system. |
434 | exhausted, additional memory will be obtained from the system. |
435 | Destroying this space will deallocate all additionally allocated |
435 | Destroying this space will deallocate all additionally allocated |
436 | space (if possible) but not the initial base. |
436 | space (if possible) but not the initial base. |
437 | */ |
437 | */ |
438 | mspace create_mspace_with_base(void* base, size_t capacity, int locked); |
438 | mspace create_mspace_with_base(void* base, size_t capacity, int locked); |
439 | 439 | ||
440 | /* |
440 | /* |
441 | mspace_malloc behaves as malloc, but operates within |
441 | mspace_malloc behaves as malloc, but operates within |
442 | the given space. |
442 | the given space. |
443 | */ |
443 | */ |
444 | void* mspace_malloc(mspace msp, size_t bytes); |
444 | void* mspace_malloc(mspace msp, size_t bytes); |
445 | 445 | ||
446 | /* |
446 | /* |
447 | mspace_free behaves as free, but operates within |
447 | mspace_free behaves as free, but operates within |
448 | the given space. |
448 | the given space. |
449 | 449 | ||
450 | If compiled with FOOTERS==1, mspace_free is not actually needed. |
450 | If compiled with FOOTERS==1, mspace_free is not actually needed. |
451 | free may be called instead of mspace_free because freed chunks from |
451 | free may be called instead of mspace_free because freed chunks from |
452 | any space are handled by their originating spaces. |
452 | any space are handled by their originating spaces. |
453 | */ |
453 | */ |
454 | void mspace_free(mspace msp, void* mem); |
454 | void mspace_free(mspace msp, void* mem); |
455 | 455 | ||
456 | /* |
456 | /* |
457 | mspace_realloc behaves as realloc, but operates within |
457 | mspace_realloc behaves as realloc, but operates within |
458 | the given space. |
458 | the given space. |
459 | 459 | ||
460 | If compiled with FOOTERS==1, mspace_realloc is not actually |
460 | If compiled with FOOTERS==1, mspace_realloc is not actually |
461 | needed. realloc may be called instead of mspace_realloc because |
461 | needed. realloc may be called instead of mspace_realloc because |
462 | realloced chunks from any space are handled by their originating |
462 | realloced chunks from any space are handled by their originating |
463 | spaces. |
463 | spaces. |
464 | */ |
464 | */ |
465 | void* mspace_realloc(mspace msp, void* mem, size_t newsize); |
465 | void* mspace_realloc(mspace msp, void* mem, size_t newsize); |
466 | 466 | ||
467 | /* |
467 | /* |
468 | mspace_calloc behaves as calloc, but operates within |
468 | mspace_calloc behaves as calloc, but operates within |
469 | the given space. |
469 | the given space. |
470 | */ |
470 | */ |
471 | void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); |
471 | void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); |
472 | 472 | ||
473 | /* |
473 | /* |
474 | mspace_memalign behaves as memalign, but operates within |
474 | mspace_memalign behaves as memalign, but operates within |
475 | the given space. |
475 | the given space. |
476 | */ |
476 | */ |
477 | void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); |
477 | void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); |
478 | 478 | ||
479 | /* |
479 | /* |
480 | mspace_independent_calloc behaves as independent_calloc, but |
480 | mspace_independent_calloc behaves as independent_calloc, but |
481 | operates within the given space. |
481 | operates within the given space. |
482 | */ |
482 | */ |
483 | void** mspace_independent_calloc(mspace msp, size_t n_elements, |
483 | void** mspace_independent_calloc(mspace msp, size_t n_elements, |
484 | size_t elem_size, void* chunks[]); |
484 | size_t elem_size, void* chunks[]); |
485 | 485 | ||
486 | /* |
486 | /* |
487 | mspace_independent_comalloc behaves as independent_comalloc, but |
487 | mspace_independent_comalloc behaves as independent_comalloc, but |
488 | operates within the given space. |
488 | operates within the given space. |
489 | */ |
489 | */ |
490 | void** mspace_independent_comalloc(mspace msp, size_t n_elements, |
490 | void** mspace_independent_comalloc(mspace msp, size_t n_elements, |
491 | size_t sizes[], void* chunks[]); |
491 | size_t sizes[], void* chunks[]); |
492 | 492 | ||
493 | /* |
493 | /* |
494 | mspace_footprint() returns the number of bytes obtained from the |
494 | mspace_footprint() returns the number of bytes obtained from the |
495 | system for this space. |
495 | system for this space. |
496 | */ |
496 | */ |
497 | size_t mspace_footprint(mspace msp); |
497 | size_t mspace_footprint(mspace msp); |
498 | 498 | ||
499 | 499 | ||
500 | #if !NO_MALLINFO |
500 | #if !NO_MALLINFO |
501 | /* |
501 | /* |
502 | mspace_mallinfo behaves as mallinfo, but reports properties of |
502 | mspace_mallinfo behaves as mallinfo, but reports properties of |
503 | the given space. |
503 | the given space. |
504 | */ |
504 | */ |
505 | struct mallinfo mspace_mallinfo(mspace msp); |
505 | struct mallinfo mspace_mallinfo(mspace msp); |
506 | #endif /* NO_MALLINFO */ |
506 | #endif /* NO_MALLINFO */ |
507 | 507 | ||
508 | /* |
508 | /* |
509 | mspace_malloc_stats behaves as malloc_stats, but reports |
509 | mspace_malloc_stats behaves as malloc_stats, but reports |
510 | properties of the given space. |
510 | properties of the given space. |
511 | */ |
511 | */ |
512 | void mspace_malloc_stats(mspace msp); |
512 | void mspace_malloc_stats(mspace msp); |
513 | 513 | ||
514 | /* |
514 | /* |
515 | mspace_trim behaves as malloc_trim, but |
515 | mspace_trim behaves as malloc_trim, but |
516 | operates within the given space. |
516 | operates within the given space. |
517 | */ |
517 | */ |
518 | int mspace_trim(mspace msp, size_t pad); |
518 | int mspace_trim(mspace msp, size_t pad); |
519 | 519 | ||
520 | /* |
520 | /* |
521 | An alias for mallopt. |
521 | An alias for mallopt. |
522 | */ |
522 | */ |
523 | int mspace_mallopt(int, int); |
523 | int mspace_mallopt(int, int); |
524 | 524 | ||
525 | #endif /* MSPACES */ |
525 | #endif /* MSPACES */ |
526 | 526 | ||
527 | #ifdef __cplusplus |
527 | #ifdef __cplusplus |
528 | }; /* end of extern "C" */ |
528 | }; /* end of extern "C" */ |
529 | #endif |
529 | #endif |
530 | 530 | ||
531 | #endif /* MALLOC_280_H */ |
531 | #endif /* MALLOC_280_H */ |
- | 532 | ||
- | 533 | ||
- | 534 | /** @} |
|
- | 535 | */ |
|
- | 536 | ||
- | 537 | ||
532 | 538 |