Rev 824 | Rev 946 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
564 | jermar | 2 | * Copyright (C) 2001-2005 Jakub Jermar |
3 | * Copyright (C) 2005 Sergey Bondari |
||
1 | jermar | 4 | * All rights reserved. |
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | |||
814 | palkovsky | 30 | /* |
31 | * Locking order |
||
32 | * |
||
33 | * In order to access particular zone, the process must first lock |
||
34 | * the zones.lock, then lock the zone and then unlock the zones.lock. |
||
35 | * This insures, that we can fiddle with the zones in runtime without |
||
36 | * affecting the processes. |
||
37 | * |
||
38 | */ |
||
39 | |||
368 | jermar | 40 | #include <typedefs.h> |
1 | jermar | 41 | #include <arch/types.h> |
42 | #include <mm/frame.h> |
||
703 | jermar | 43 | #include <mm/as.h> |
1 | jermar | 44 | #include <panic.h> |
367 | jermar | 45 | #include <debug.h> |
788 | jermar | 46 | #include <adt/list.h> |
1 | jermar | 47 | #include <synch/spinlock.h> |
115 | jermar | 48 | #include <arch/asm.h> |
195 | vana | 49 | #include <arch.h> |
533 | bondari | 50 | #include <print.h> |
536 | bondari | 51 | #include <align.h> |
814 | palkovsky | 52 | #include <mm/slab.h> |
822 | palkovsky | 53 | #include <bitops.h> |
115 | jermar | 54 | |
814 | palkovsky | 55 | typedef struct { |
56 | count_t refcount; /**< tracking of shared frames */ |
||
57 | __u8 buddy_order; /**< buddy system block order */ |
||
58 | link_t buddy_link; /**< link to the next free block inside one order */ |
||
59 | void *parent; /**< If allocated by slab, this points there */ |
||
60 | }frame_t; |
||
367 | jermar | 61 | |
814 | palkovsky | 62 | typedef struct { |
63 | SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
||
64 | pfn_t base; /**< frame_no of the first frame in the frames array */ |
||
820 | jermar | 65 | count_t count; /**< Size of zone */ |
533 | bondari | 66 | |
814 | palkovsky | 67 | frame_t *frames; /**< array of frame_t structures in this zone */ |
68 | count_t free_count; /**< number of free frame_t structures */ |
||
69 | count_t busy_count; /**< number of busy frame_t structures */ |
||
70 | |||
71 | buddy_system_t * buddy_system; /**< buddy system for the zone */ |
||
72 | int flags; |
||
73 | }zone_t; |
||
479 | bondari | 74 | |
814 | palkovsky | 75 | /* |
76 | * The zoneinfo.lock must be locked when accessing zoneinfo structure. |
||
77 | * Some of the attributes in zone_t structures are 'read-only' |
||
368 | jermar | 78 | */ |
1 | jermar | 79 | |
814 | palkovsky | 80 | struct { |
81 | SPINLOCK_DECLARE(lock); |
||
82 | int count; |
||
83 | zone_t *info[ZONES_MAX]; |
||
84 | }zones; |
||
1 | jermar | 85 | |
814 | palkovsky | 86 | |
87 | /*********************************/ |
||
88 | /* Helper functions */ |
||
89 | static inline index_t frame_index(zone_t *zone, frame_t *frame) |
||
762 | palkovsky | 90 | { |
814 | palkovsky | 91 | return (index_t)(frame - zone->frames); |
762 | palkovsky | 92 | } |
814 | palkovsky | 93 | static inline index_t frame_index_abs(zone_t *zone, frame_t *frame) |
1 | jermar | 94 | { |
814 | palkovsky | 95 | return (index_t)(frame - zone->frames) + zone->base; |
1 | jermar | 96 | } |
814 | palkovsky | 97 | static inline int frame_index_valid(zone_t *zone, index_t index) |
98 | { |
||
99 | return index >= 0 && index < zone->count; |
||
100 | } |
||
1 | jermar | 101 | |
814 | palkovsky | 102 | /** Compute pfn_t from frame_t pointer & zone pointer */ |
820 | jermar | 103 | static index_t make_frame_index(zone_t *zone, frame_t *frame) |
762 | palkovsky | 104 | { |
814 | palkovsky | 105 | return frame - zone->frames; |
762 | palkovsky | 106 | } |
107 | |||
814 | palkovsky | 108 | /** Initialize frame structure |
762 | palkovsky | 109 | * |
814 | palkovsky | 110 | * Initialize frame structure. |
111 | * |
||
112 | * @param frame Frame structure to be initialized. |
||
762 | palkovsky | 113 | */ |
814 | palkovsky | 114 | static void frame_initialize(frame_t *frame) |
762 | palkovsky | 115 | { |
814 | palkovsky | 116 | frame->refcount = 1; |
117 | frame->buddy_order = 0; |
||
762 | palkovsky | 118 | } |
119 | |||
814 | palkovsky | 120 | /*************************************/ |
121 | /* Zoneinfo functions */ |
||
762 | palkovsky | 122 | |
814 | palkovsky | 123 | /** |
124 | * Insert-sort zone into zones list |
||
822 | palkovsky | 125 | * |
126 | * @return zone number on success, -1 on error |
||
1 | jermar | 127 | */ |
822 | palkovsky | 128 | static int zones_add_zone(zone_t *newzone) |
1 | jermar | 129 | { |
822 | palkovsky | 130 | int i,j; |
131 | ipl_t ipl; |
||
132 | zone_t *z; |
||
762 | palkovsky | 133 | |
822 | palkovsky | 134 | ipl = interrupts_disable(); |
814 | palkovsky | 135 | spinlock_lock(&zones.lock); |
136 | /* Try to merge */ |
||
822 | palkovsky | 137 | if (zones.count+1 == ZONES_MAX) |
138 | panic("Maximum zone(%d) count exceeded.", ZONES_MAX); |
||
139 | for (i=0; i < zones.count; i++) { |
||
140 | /* Check for overflow */ |
||
852 | palkovsky | 141 | z = zones.info[i]; |
822 | palkovsky | 142 | if (overlaps(newzone->base,newzone->count, |
143 | z->base, z->count)) { |
||
144 | printf("Zones overlap!\n"); |
||
145 | return -1; |
||
814 | palkovsky | 146 | } |
852 | palkovsky | 147 | if (newzone->base < z->base) |
822 | palkovsky | 148 | break; |
368 | jermar | 149 | } |
822 | palkovsky | 150 | /* Move other zones up */ |
151 | for (j=i;j < zones.count;j++) |
||
152 | zones.info[j+1] = zones.info[j]; |
||
153 | zones.info[i] = newzone; |
||
154 | zones.count++; |
||
814 | palkovsky | 155 | spinlock_unlock(&zones.lock); |
822 | palkovsky | 156 | interrupts_restore(ipl); |
157 | |||
158 | return i; |
||
1 | jermar | 159 | } |
160 | |||
814 | palkovsky | 161 | /** |
162 | * Try to find a zone where can we find the frame |
||
368 | jermar | 163 | * |
814 | palkovsky | 164 | * @param hint Start searching in zone 'hint' |
165 | * @param lock Lock zone if true |
||
368 | jermar | 166 | * |
814 | palkovsky | 167 | * Assume interrupts disable |
368 | jermar | 168 | */ |
814 | palkovsky | 169 | static zone_t * find_zone_and_lock(pfn_t frame, int *pzone) |
1 | jermar | 170 | { |
533 | bondari | 171 | int i; |
814 | palkovsky | 172 | int hint = pzone ? *pzone : 0; |
173 | zone_t *z; |
||
533 | bondari | 174 | |
814 | palkovsky | 175 | spinlock_lock(&zones.lock); |
176 | |||
177 | if (hint >= zones.count || hint < 0) |
||
178 | hint = 0; |
||
533 | bondari | 179 | |
814 | palkovsky | 180 | i = hint; |
181 | do { |
||
182 | z = zones.info[i]; |
||
183 | spinlock_lock(&z->lock); |
||
184 | if (z->base <= frame && z->base + z->count > frame) { |
||
185 | spinlock_unlock(&zones.lock); /* Unlock the global lock */ |
||
186 | if (pzone) |
||
187 | *pzone = i; |
||
188 | return z; |
||
533 | bondari | 189 | } |
814 | palkovsky | 190 | spinlock_unlock(&z->lock); |
533 | bondari | 191 | |
814 | palkovsky | 192 | i++; |
193 | if (i >= zones.count) |
||
194 | i = 0; |
||
195 | } while(i != hint); |
||
196 | |||
197 | spinlock_unlock(&zones.lock); |
||
198 | return NULL; |
||
533 | bondari | 199 | } |
200 | |||
822 | palkovsky | 201 | /** @return True if zone can allocate specified order */ |
202 | static int zone_can_alloc(zone_t *z, __u8 order) |
||
203 | { |
||
204 | return buddy_system_can_alloc(z->buddy_system, order); |
||
205 | } |
||
206 | |||
814 | palkovsky | 207 | /** |
208 | * Find AND LOCK zone that can allocate order frames |
||
367 | jermar | 209 | * |
814 | palkovsky | 210 | * Assume interrupts are disabled!! |
367 | jermar | 211 | * |
814 | palkovsky | 212 | * @param pzone Pointer to preferred zone or NULL, on return contains zone number |
367 | jermar | 213 | */ |
814 | palkovsky | 214 | static zone_t * find_free_zone_lock(__u8 order, int *pzone) |
367 | jermar | 215 | { |
814 | palkovsky | 216 | int i; |
367 | jermar | 217 | zone_t *z; |
814 | palkovsky | 218 | int hint = pzone ? *pzone : 0; |
367 | jermar | 219 | |
814 | palkovsky | 220 | spinlock_lock(&zones.lock); |
221 | if (hint >= zones.count) |
||
222 | hint = 0; |
||
223 | i = hint; |
||
224 | do { |
||
225 | z = zones.info[i]; |
||
724 | palkovsky | 226 | |
814 | palkovsky | 227 | spinlock_lock(&z->lock); |
367 | jermar | 228 | |
814 | palkovsky | 229 | /* Check if the zone has 2^order frames area available */ |
822 | palkovsky | 230 | if (zone_can_alloc(z, order)) { |
814 | palkovsky | 231 | spinlock_unlock(&zones.lock); |
232 | if (pzone) |
||
233 | *pzone = i; |
||
234 | return z; |
||
367 | jermar | 235 | } |
814 | palkovsky | 236 | spinlock_unlock(&z->lock); |
237 | if (++i >= zones.count) |
||
238 | i = 0; |
||
239 | } while(i != hint); |
||
240 | spinlock_unlock(&zones.lock); |
||
241 | return NULL; |
||
367 | jermar | 242 | } |
243 | |||
814 | palkovsky | 244 | /********************************************/ |
245 | /* Buddy system functions */ |
||
246 | |||
247 | /** Buddy system find_block implementation |
||
367 | jermar | 248 | * |
814 | palkovsky | 249 | * Find block that is parent of current list. |
250 | * That means go to lower addresses, until such block is found |
||
367 | jermar | 251 | * |
814 | palkovsky | 252 | * @param order - Order of parent must be different then this parameter!! |
367 | jermar | 253 | */ |
814 | palkovsky | 254 | static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
255 | __u8 order) |
||
367 | jermar | 256 | { |
814 | palkovsky | 257 | frame_t * frame; |
258 | zone_t * zone; |
||
259 | index_t index; |
||
367 | jermar | 260 | |
814 | palkovsky | 261 | frame = list_get_instance(child, frame_t, buddy_link); |
262 | zone = (zone_t *) b->data; |
||
367 | jermar | 263 | |
814 | palkovsky | 264 | index = frame_index(zone, frame); |
265 | do { |
||
266 | if (zone->frames[index].buddy_order != order) { |
||
267 | return &zone->frames[index].buddy_link; |
||
268 | } |
||
269 | } while(index-- > 0); |
||
270 | return NULL; |
||
367 | jermar | 271 | } |
479 | bondari | 272 | |
822 | palkovsky | 273 | static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
274 | { |
||
275 | frame_t * frame; |
||
276 | zone_t * zone; |
||
277 | index_t index; |
||
479 | bondari | 278 | |
822 | palkovsky | 279 | frame = list_get_instance(block, frame_t, buddy_link); |
280 | zone = (zone_t *) b->data; |
||
281 | index = frame_index(zone, frame); |
||
282 | printf("%d", index); |
||
283 | } |
||
284 | |||
479 | bondari | 285 | /** Buddy system find_buddy implementation |
489 | jermar | 286 | * |
287 | * @param b Buddy system. |
||
480 | bondari | 288 | * @param block Block for which buddy should be found |
479 | bondari | 289 | * |
480 | bondari | 290 | * @return Buddy for given block if found |
479 | bondari | 291 | */ |
814 | palkovsky | 292 | static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) |
293 | { |
||
539 | jermar | 294 | frame_t * frame; |
480 | bondari | 295 | zone_t * zone; |
564 | jermar | 296 | index_t index; |
480 | bondari | 297 | bool is_left, is_right; |
479 | bondari | 298 | |
480 | bondari | 299 | frame = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 300 | zone = (zone_t *) b->data; |
814 | palkovsky | 301 | ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order)); |
480 | bondari | 302 | |
724 | palkovsky | 303 | is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
304 | is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
||
814 | palkovsky | 305 | |
564 | jermar | 306 | ASSERT(is_left ^ is_right); |
533 | bondari | 307 | if (is_left) { |
814 | palkovsky | 308 | index = (frame_index(zone, frame)) + (1 << frame->buddy_order); |
615 | palkovsky | 309 | } else { // if (is_right) |
814 | palkovsky | 310 | index = (frame_index(zone, frame)) - (1 << frame->buddy_order); |
533 | bondari | 311 | } |
312 | |||
814 | palkovsky | 313 | |
314 | if (frame_index_valid(zone, index)) { |
||
315 | if (zone->frames[index].buddy_order == frame->buddy_order && |
||
316 | zone->frames[index].refcount == 0) { |
||
533 | bondari | 317 | return &zone->frames[index].buddy_link; |
480 | bondari | 318 | } |
319 | } |
||
814 | palkovsky | 320 | |
539 | jermar | 321 | return NULL; |
479 | bondari | 322 | } |
323 | |||
324 | /** Buddy system bisect implementation |
||
325 | * |
||
489 | jermar | 326 | * @param b Buddy system. |
480 | bondari | 327 | * @param block Block to bisect |
328 | * |
||
329 | * @return right block |
||
479 | bondari | 330 | */ |
814 | palkovsky | 331 | static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
480 | bondari | 332 | frame_t * frame_l, * frame_r; |
564 | jermar | 333 | |
480 | bondari | 334 | frame_l = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 335 | frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
564 | jermar | 336 | |
480 | bondari | 337 | return &frame_r->buddy_link; |
479 | bondari | 338 | } |
339 | |||
340 | /** Buddy system coalesce implementation |
||
341 | * |
||
489 | jermar | 342 | * @param b Buddy system. |
480 | bondari | 343 | * @param block_1 First block |
344 | * @param block_2 First block's buddy |
||
345 | * |
||
346 | * @return Coalesced block (actually block that represents lower address) |
||
479 | bondari | 347 | */ |
814 | palkovsky | 348 | static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, |
822 | palkovsky | 349 | link_t * block_2) |
350 | { |
||
814 | palkovsky | 351 | frame_t *frame1, *frame2; |
564 | jermar | 352 | |
480 | bondari | 353 | frame1 = list_get_instance(block_1, frame_t, buddy_link); |
354 | frame2 = list_get_instance(block_2, frame_t, buddy_link); |
||
564 | jermar | 355 | |
533 | bondari | 356 | return frame1 < frame2 ? block_1 : block_2; |
479 | bondari | 357 | } |
358 | |||
359 | /** Buddy system set_order implementation |
||
489 | jermar | 360 | * |
361 | * @param b Buddy system. |
||
480 | bondari | 362 | * @param block Buddy system block |
363 | * @param order Order to set |
||
479 | bondari | 364 | */ |
814 | palkovsky | 365 | static void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
480 | bondari | 366 | frame_t * frame; |
367 | frame = list_get_instance(block, frame_t, buddy_link); |
||
368 | frame->buddy_order = order; |
||
479 | bondari | 369 | } |
370 | |||
371 | /** Buddy system get_order implementation |
||
489 | jermar | 372 | * |
373 | * @param b Buddy system. |
||
480 | bondari | 374 | * @param block Buddy system block |
479 | bondari | 375 | * |
480 | bondari | 376 | * @return Order of block |
479 | bondari | 377 | */ |
814 | palkovsky | 378 | static __u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
480 | bondari | 379 | frame_t * frame; |
380 | frame = list_get_instance(block, frame_t, buddy_link); |
||
381 | return frame->buddy_order; |
||
479 | bondari | 382 | } |
533 | bondari | 383 | |
384 | /** Buddy system mark_busy implementation |
||
385 | * |
||
386 | * @param b Buddy system |
||
387 | * @param block Buddy system block |
||
388 | * |
||
389 | */ |
||
814 | palkovsky | 390 | static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
533 | bondari | 391 | frame_t * frame; |
822 | palkovsky | 392 | |
533 | bondari | 393 | frame = list_get_instance(block, frame_t, buddy_link); |
394 | frame->refcount = 1; |
||
395 | } |
||
676 | bondari | 396 | |
814 | palkovsky | 397 | /** Buddy system mark_available implementation |
398 | * |
||
399 | * @param b Buddy system |
||
400 | * @param block Buddy system block |
||
401 | * |
||
402 | */ |
||
403 | static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) { |
||
404 | frame_t * frame; |
||
405 | frame = list_get_instance(block, frame_t, buddy_link); |
||
406 | frame->refcount = 0; |
||
407 | } |
||
408 | |||
409 | static struct buddy_system_operations zone_buddy_system_operations = { |
||
410 | .find_buddy = zone_buddy_find_buddy, |
||
411 | .bisect = zone_buddy_bisect, |
||
412 | .coalesce = zone_buddy_coalesce, |
||
413 | .set_order = zone_buddy_set_order, |
||
414 | .get_order = zone_buddy_get_order, |
||
415 | .mark_busy = zone_buddy_mark_busy, |
||
416 | .mark_available = zone_buddy_mark_available, |
||
822 | palkovsky | 417 | .find_block = zone_buddy_find_block, |
418 | .print_id = zone_buddy_print_id |
||
814 | palkovsky | 419 | }; |
420 | |||
421 | /*************************************/ |
||
422 | /* Zone functions */ |
||
423 | |||
424 | /** Allocate frame in particular zone |
||
425 | * |
||
426 | * Assume zone is locked |
||
822 | palkovsky | 427 | * Panics, if allocation is impossible. |
814 | palkovsky | 428 | * |
429 | * @return Frame index in zone |
||
430 | */ |
||
822 | palkovsky | 431 | static pfn_t zone_frame_alloc(zone_t *zone,__u8 order) |
814 | palkovsky | 432 | { |
433 | pfn_t v; |
||
434 | link_t *tmp; |
||
435 | frame_t *frame; |
||
436 | |||
437 | /* Allocate frames from zone buddy system */ |
||
438 | tmp = buddy_system_alloc(zone->buddy_system, order); |
||
439 | |||
440 | ASSERT(tmp); |
||
441 | |||
442 | /* Update zone information. */ |
||
443 | zone->free_count -= (1 << order); |
||
444 | zone->busy_count += (1 << order); |
||
445 | |||
446 | /* Frame will be actually a first frame of the block. */ |
||
447 | frame = list_get_instance(tmp, frame_t, buddy_link); |
||
448 | |||
449 | /* get frame address */ |
||
450 | v = make_frame_index(zone, frame); |
||
451 | return v; |
||
452 | } |
||
453 | |||
454 | /** Free frame from zone |
||
455 | * |
||
456 | * Assume zone is locked |
||
457 | */ |
||
820 | jermar | 458 | static void zone_frame_free(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 459 | { |
460 | frame_t *frame; |
||
461 | __u8 order; |
||
462 | |||
463 | frame = &zone->frames[frame_idx]; |
||
464 | |||
465 | /* remember frame order */ |
||
466 | order = frame->buddy_order; |
||
467 | |||
468 | ASSERT(frame->refcount); |
||
469 | |||
470 | if (!--frame->refcount) { |
||
471 | buddy_system_free(zone->buddy_system, &frame->buddy_link); |
||
472 | } |
||
473 | |||
474 | /* Update zone information. */ |
||
475 | zone->free_count += (1 << order); |
||
476 | zone->busy_count -= (1 << order); |
||
477 | } |
||
478 | |||
479 | /** Return frame from zone */ |
||
820 | jermar | 480 | static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 481 | { |
482 | ASSERT(frame_idx < zone->count); |
||
483 | return &zone->frames[frame_idx]; |
||
484 | } |
||
485 | |||
486 | /** Mark frame in zone unavailable to allocation */ |
||
820 | jermar | 487 | static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 488 | { |
489 | frame_t *frame; |
||
490 | link_t *link; |
||
491 | |||
492 | frame = zone_get_frame(zone, frame_idx); |
||
822 | palkovsky | 493 | if (frame->refcount) |
494 | return; |
||
814 | palkovsky | 495 | link = buddy_system_alloc_block(zone->buddy_system, |
496 | &frame->buddy_link); |
||
497 | ASSERT(link); |
||
498 | zone->free_count--; |
||
499 | } |
||
500 | |||
822 | palkovsky | 501 | /** |
502 | * Join 2 zones |
||
503 | * |
||
504 | * Expect zone_t *z to point to space at least zone_conf_size large |
||
505 | * |
||
506 | * Assume z1 & z2 are locked |
||
507 | */ |
||
508 | |||
509 | static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
||
510 | { |
||
511 | __u8 max_order; |
||
512 | int i, z2idx; |
||
513 | pfn_t frame_idx; |
||
514 | frame_t *frame; |
||
515 | |||
516 | ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count)); |
||
517 | ASSERT(z1->base < z2->base); |
||
518 | |||
519 | spinlock_initialize(&z->lock, "zone_lock"); |
||
520 | z->base = z1->base; |
||
521 | z->count = z2->base+z2->count - z1->base; |
||
522 | z->flags = z1->flags & z2->flags; |
||
523 | |||
524 | z->free_count = z1->free_count + z2->free_count; |
||
525 | z->busy_count = z1->busy_count + z2->busy_count; |
||
526 | |||
527 | max_order = fnzb(z->count); |
||
528 | |||
529 | z->buddy_system = (buddy_system_t *)&z[1]; |
||
530 | buddy_system_create(z->buddy_system, max_order, |
||
531 | &zone_buddy_system_operations, |
||
532 | (void *) z); |
||
533 | |||
534 | z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
||
535 | for (i = 0; i < z->count; i++) { |
||
536 | /* This marks all frames busy */ |
||
537 | frame_initialize(&z->frames[i]); |
||
538 | } |
||
539 | /* Copy frames from both zones to preserve full frame orders, |
||
540 | * parents etc. Set all frames with refcount=0 to 1, because |
||
541 | * we add all free frames to buddy allocator later again, clear |
||
542 | * order to 0. |
||
543 | */ |
||
544 | for (i=0; i<z1->count; i++) |
||
545 | z->frames[i] = z1->frames[i]; |
||
546 | for (i=0; i < z2->count; i++) { |
||
547 | z2idx = i + (z2->base - z1->base); |
||
548 | z->frames[z2idx] = z2->frames[i]; |
||
549 | } |
||
550 | for (i=0; i < z->count; i++) { |
||
551 | if (!z->frames[i].refcount) { |
||
552 | z->frames[i].refcount = 1; |
||
553 | z->frames[i].buddy_order = 0; |
||
554 | } |
||
555 | } |
||
556 | /* Add free blocks from the 2 original zones */ |
||
557 | while (zone_can_alloc(z1, 0)) { |
||
558 | frame_idx = zone_frame_alloc(z1, 0); |
||
559 | frame = &z->frames[frame_idx]; |
||
560 | frame->refcount = 0; |
||
561 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
562 | } |
||
563 | while (zone_can_alloc(z2, 0)) { |
||
564 | frame_idx = zone_frame_alloc(z2, 0); |
||
565 | frame = &z->frames[frame_idx + (z2->base-z1->base)]; |
||
566 | frame->refcount = 0; |
||
567 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
568 | } |
||
569 | } |
||
570 | |||
571 | /** Return old configuration frames into the zone |
||
572 | * |
||
573 | * We have several cases |
||
574 | * - the conf. data is outside of zone -> exit, shall we call frame_free?? |
||
824 | palkovsky | 575 | * - the conf. data was created by zone_create or |
576 | * updated with reduce_region -> free every frame |
||
577 | * |
||
578 | * @param newzone The actual zone where freeing should occur |
||
579 | * @param oldzone Pointer to old zone configuration data that should |
||
580 | * be freed from new zone |
||
822 | palkovsky | 581 | */ |
582 | static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
||
583 | { |
||
584 | pfn_t pfn; |
||
585 | frame_t *frame; |
||
586 | count_t cframes; |
||
587 | int i; |
||
588 | |||
589 | pfn = ADDR2PFN((__address)KA2PA(oldzone)); |
||
590 | cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
||
591 | |||
592 | if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
||
593 | return; |
||
594 | |||
595 | frame = &newzone->frames[pfn - newzone->base]; |
||
824 | palkovsky | 596 | ASSERT(!frame->buddy_order); |
822 | palkovsky | 597 | |
598 | for (i=0; i < cframes; i++) { |
||
599 | newzone->busy_count++; |
||
600 | zone_frame_free(newzone, pfn+i-newzone->base); |
||
601 | } |
||
602 | } |
||
603 | |||
824 | palkovsky | 604 | |
605 | /** Reduce allocated block to count of order 0 frames |
||
606 | * |
||
607 | * The allocated block need 2^order frames of space. Reduce all frames |
||
608 | * in block to order 0 and free the unneded frames. This means, that |
||
609 | * when freeing the block, you have to free every frame from block. |
||
610 | * |
||
611 | * @param zone |
||
612 | * @param frame_idx Index to block |
||
613 | * @param count Allocated space in block |
||
614 | */ |
||
615 | static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
||
616 | { |
||
617 | count_t i; |
||
618 | __u8 order; |
||
619 | frame_t *frame; |
||
620 | |||
621 | ASSERT(frame_idx+count < zone->count); |
||
622 | |||
623 | order = zone->frames[frame_idx].buddy_order; |
||
624 | ASSERT((1 << order) >= count); |
||
625 | |||
626 | /* Reduce all blocks to order 0 */ |
||
627 | for (i=0; i < (1 << order); i++) { |
||
628 | frame = &zone->frames[i + frame_idx]; |
||
629 | frame->buddy_order = 0; |
||
630 | if (! frame->refcount) |
||
631 | frame->refcount = 1; |
||
632 | ASSERT(frame->refcount == 1); |
||
633 | } |
||
634 | /* Free unneeded frames */ |
||
635 | for (i=count; i < (1 << order); i++) { |
||
636 | zone_frame_free(zone, i + frame_idx); |
||
637 | } |
||
638 | } |
||
639 | |||
822 | palkovsky | 640 | /** Merge zones z1 and z2 |
641 | * |
||
642 | * - the zones must be 2 zones with no zone existing in between, |
||
643 | * which means that z2 = z1+1 |
||
644 | * |
||
645 | * - When you create a new zone, the frame allocator configuration does |
||
646 | * not to be 2^order size. Once the allocator is running it is no longer |
||
647 | * possible, merged configuration data occupies more space :-/ |
||
648 | */ |
||
649 | void zone_merge(int z1, int z2) |
||
650 | { |
||
651 | ipl_t ipl; |
||
652 | zone_t *zone1, *zone2, *newzone; |
||
653 | int cframes; |
||
654 | __u8 order; |
||
655 | int i; |
||
656 | pfn_t pfn; |
||
657 | |||
658 | ipl = interrupts_disable(); |
||
659 | spinlock_lock(&zones.lock); |
||
660 | |||
661 | if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count) |
||
662 | goto errout; |
||
663 | /* We can join only 2 zones with none existing inbetween */ |
||
664 | if (z2-z1 != 1) |
||
665 | goto errout; |
||
666 | |||
667 | zone1 = zones.info[z1]; |
||
668 | zone2 = zones.info[z2]; |
||
669 | spinlock_lock(&zone1->lock); |
||
670 | spinlock_lock(&zone2->lock); |
||
671 | |||
672 | cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base)); |
||
673 | order = fnzb(cframes) + 1; |
||
674 | |||
675 | /* Allocate zonedata inside one of the zones */ |
||
676 | if (zone_can_alloc(zone1, order)) |
||
677 | pfn = zone1->base + zone_frame_alloc(zone1, order); |
||
678 | else if (zone_can_alloc(zone2, order)) |
||
679 | pfn = zone2->base + zone_frame_alloc(zone2, order); |
||
680 | else |
||
681 | goto errout2; |
||
682 | |||
683 | newzone = (zone_t *)PA2KA(PFN2ADDR(pfn)); |
||
684 | |||
685 | _zone_merge(newzone, zone1, zone2); |
||
686 | |||
824 | palkovsky | 687 | /* Free unneeded config frames */ |
688 | zone_reduce_region(newzone, pfn - newzone->base, cframes); |
||
822 | palkovsky | 689 | /* Subtract zone information from busy frames */ |
824 | palkovsky | 690 | newzone->busy_count -= cframes; |
822 | palkovsky | 691 | |
824 | palkovsky | 692 | /* Replace existing zones in zoneinfo list */ |
822 | palkovsky | 693 | zones.info[z1] = newzone; |
694 | for (i=z2+1;i < zones.count;i++) |
||
695 | zones.info[i-1] = zones.info[i]; |
||
696 | zones.count--; |
||
697 | |||
698 | /* Free old zone information */ |
||
699 | return_config_frames(newzone, zone1); |
||
700 | return_config_frames(newzone, zone2); |
||
701 | errout2: |
||
702 | /* Nobody is allowed to enter to zone, so we are safe |
||
703 | * to touch the spinlocks last time */ |
||
704 | spinlock_unlock(&zone1->lock); |
||
705 | spinlock_unlock(&zone2->lock); |
||
706 | errout: |
||
707 | spinlock_unlock(&zones.lock); |
||
708 | interrupts_restore(ipl); |
||
709 | } |
||
710 | |||
711 | |||
712 | /** |
||
713 | * Merge all zones into one big zone |
||
714 | * |
||
715 | * It is reasonable to do this on systems whose bios reports parts in chunks, |
||
716 | * so that we could have 1 zone (it's faster). |
||
717 | */ |
||
718 | void zone_merge_all(void) |
||
719 | { |
||
720 | int count = zones.count; |
||
721 | |||
722 | while (zones.count > 1 && --count) { |
||
723 | zone_merge(0,1); |
||
724 | break; |
||
725 | } |
||
726 | } |
||
727 | |||
814 | palkovsky | 728 | /** Create frame zone |
729 | * |
||
730 | * Create new frame zone. |
||
731 | * |
||
732 | * @param start Physical address of the first frame within the zone. |
||
733 | * @param size Size of the zone. Must be a multiple of FRAME_SIZE. |
||
734 | * @param conffram Address of configuration frame |
||
735 | * @param flags Zone flags. |
||
736 | * |
||
737 | * @return Initialized zone. |
||
738 | */ |
||
822 | palkovsky | 739 | static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
814 | palkovsky | 740 | { |
741 | int i; |
||
742 | __u8 max_order; |
||
743 | |||
744 | spinlock_initialize(&z->lock, "zone_lock"); |
||
745 | z->base = start; |
||
746 | z->count = count; |
||
747 | z->flags = flags; |
||
748 | z->free_count = count; |
||
749 | z->busy_count = 0; |
||
750 | |||
751 | /* |
||
752 | * Compute order for buddy system, initialize |
||
753 | */ |
||
822 | palkovsky | 754 | max_order = fnzb(count); |
814 | palkovsky | 755 | z->buddy_system = (buddy_system_t *)&z[1]; |
756 | |||
757 | buddy_system_create(z->buddy_system, max_order, |
||
758 | &zone_buddy_system_operations, |
||
759 | (void *) z); |
||
760 | |||
761 | /* Allocate frames _after_ the conframe */ |
||
762 | /* Check sizes */ |
||
763 | z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
||
764 | for (i = 0; i<count; i++) { |
||
765 | frame_initialize(&z->frames[i]); |
||
766 | } |
||
852 | palkovsky | 767 | |
814 | palkovsky | 768 | /* Stuffing frames */ |
769 | for (i = 0; i < count; i++) { |
||
770 | z->frames[i].refcount = 0; |
||
771 | buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
||
772 | } |
||
773 | } |
||
774 | |||
775 | |||
776 | /** Compute configuration data size for zone */ |
||
822 | palkovsky | 777 | __address zone_conf_size(count_t count) |
814 | palkovsky | 778 | { |
779 | int size = sizeof(zone_t) + count*sizeof(frame_t); |
||
780 | int max_order; |
||
781 | |||
822 | palkovsky | 782 | max_order = fnzb(count); |
814 | palkovsky | 783 | size += buddy_conf_size(max_order); |
784 | return size; |
||
785 | } |
||
786 | |||
822 | palkovsky | 787 | |
814 | palkovsky | 788 | /** Create and add zone to system |
789 | * |
||
790 | * @param confframe Where configuration frame is supposed to be. |
||
820 | jermar | 791 | * Always check, that we will not disturb the kernel and possibly init. |
814 | palkovsky | 792 | * If confframe is given _outside_ this zone, it is expected, |
793 | * that the area is already marked BUSY and big enough |
||
794 | * to contain zone_conf_size() amount of data |
||
822 | palkovsky | 795 | * |
796 | * @return Zone number or -1 on error |
||
814 | palkovsky | 797 | */ |
822 | palkovsky | 798 | int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
814 | palkovsky | 799 | { |
800 | zone_t *z; |
||
822 | palkovsky | 801 | __address addr; |
820 | jermar | 802 | count_t confcount; |
814 | palkovsky | 803 | int i; |
822 | palkovsky | 804 | int znum; |
814 | palkovsky | 805 | |
806 | /* Theoretically we could have here 0, practically make sure |
||
807 | * nobody tries to do that. If some platform requires, remove |
||
808 | * the assert |
||
809 | */ |
||
810 | ASSERT(confframe); |
||
811 | /* If conframe is supposed to be inside our zone, then make sure |
||
812 | * it does not span kernel & init |
||
813 | */ |
||
822 | palkovsky | 814 | confcount = SIZE2FRAMES(zone_conf_size(count)); |
814 | palkovsky | 815 | if (confframe >= start && confframe < start+count) { |
816 | for (;confframe < start+count;confframe++) { |
||
817 | addr = PFN2ADDR(confframe); |
||
822 | palkovsky | 818 | if (overlaps(addr, PFN2ADDR(confcount), |
819 | KA2PA(config.base),config.kernel_size)) |
||
814 | palkovsky | 820 | continue; |
821 | if (config.init_addr) |
||
822 | palkovsky | 822 | if (overlaps(addr,PFN2ADDR(confcount), |
814 | palkovsky | 823 | KA2PA(config.init_addr), |
822 | palkovsky | 824 | config.init_size)) |
814 | palkovsky | 825 | continue; |
826 | break; |
||
827 | } |
||
828 | if (confframe >= start+count) |
||
829 | panic("Cannot find configuration data for zone."); |
||
830 | } |
||
831 | |||
822 | palkovsky | 832 | z = (zone_t *)PA2KA(PFN2ADDR(confframe)); |
833 | zone_construct(start, count, z, flags); |
||
834 | znum = zones_add_zone(z); |
||
835 | if (znum == -1) |
||
836 | return -1; |
||
837 | |||
814 | palkovsky | 838 | /* If confdata in zone, mark as unavailable */ |
839 | if (confframe >= start && confframe < start+count) |
||
840 | for (i=confframe; i<confframe+confcount; i++) { |
||
841 | zone_mark_unavailable(z, i - z->base); |
||
842 | } |
||
822 | palkovsky | 843 | return znum; |
814 | palkovsky | 844 | } |
845 | |||
846 | /***************************************/ |
||
847 | /* Frame functions */ |
||
848 | |||
849 | /** Set parent of frame */ |
||
850 | void frame_set_parent(pfn_t pfn, void *data, int hint) |
||
851 | { |
||
852 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
853 | |||
854 | ASSERT(zone); |
||
855 | |||
856 | zone_get_frame(zone, pfn-zone->base)->parent = data; |
||
857 | spinlock_unlock(&zone->lock); |
||
858 | } |
||
859 | |||
860 | void * frame_get_parent(pfn_t pfn, int hint) |
||
861 | { |
||
862 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
863 | void *res; |
||
864 | |||
865 | ASSERT(zone); |
||
866 | res = zone_get_frame(zone, pfn - zone->base)->parent; |
||
867 | |||
868 | spinlock_unlock(&zone->lock); |
||
869 | return res; |
||
870 | } |
||
871 | |||
872 | /** Allocate power-of-two frames of physical memory. |
||
873 | * |
||
874 | * @param flags Flags for host zone selection and address processing. |
||
875 | * @param order Allocate exactly 2^order frames. |
||
876 | * @param pzone Preferred zone |
||
877 | * |
||
878 | * @return Allocated frame. |
||
879 | */ |
||
880 | pfn_t frame_alloc_generic(__u8 order, int flags, int * status, int *pzone) |
||
881 | { |
||
882 | ipl_t ipl; |
||
883 | int freed; |
||
884 | pfn_t v; |
||
885 | zone_t *zone; |
||
886 | |||
887 | loop: |
||
888 | ipl = interrupts_disable(); |
||
889 | /* |
||
890 | * First, find suitable frame zone. |
||
891 | */ |
||
892 | zone = find_free_zone_lock(order,pzone); |
||
893 | /* If no memory, reclaim some slab memory, |
||
894 | if it does not help, reclaim all */ |
||
895 | if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
||
896 | freed = slab_reclaim(0); |
||
897 | if (freed) |
||
898 | zone = find_free_zone_lock(order,pzone); |
||
899 | if (!zone) { |
||
900 | freed = slab_reclaim(SLAB_RECLAIM_ALL); |
||
901 | if (freed) |
||
902 | zone = find_free_zone_lock(order,pzone); |
||
903 | } |
||
904 | } |
||
905 | if (!zone) { |
||
906 | if (flags & FRAME_PANIC) |
||
907 | panic("Can't allocate frame.\n"); |
||
908 | |||
909 | /* |
||
910 | * TODO: Sleep until frames are available again. |
||
911 | */ |
||
912 | interrupts_restore(ipl); |
||
913 | |||
914 | if (flags & FRAME_ATOMIC) { |
||
915 | ASSERT(status != NULL); |
||
916 | if (status) |
||
917 | *status = FRAME_NO_MEMORY; |
||
918 | return NULL; |
||
919 | } |
||
920 | |||
921 | panic("Sleep not implemented.\n"); |
||
922 | goto loop; |
||
923 | } |
||
822 | palkovsky | 924 | v = zone_frame_alloc(zone,order); |
814 | palkovsky | 925 | v += zone->base; |
926 | |||
927 | spinlock_unlock(&zone->lock); |
||
928 | interrupts_restore(ipl); |
||
929 | |||
930 | if (status) |
||
931 | *status = FRAME_OK; |
||
932 | return v; |
||
933 | } |
||
934 | |||
935 | /** Free a frame. |
||
936 | * |
||
822 | palkovsky | 937 | * Find respective frame structure for supplied addr. |
814 | palkovsky | 938 | * Decrement frame reference count. |
939 | * If it drops to zero, move the frame structure to free list. |
||
940 | * |
||
941 | * @param frame Frame no to be freed. |
||
942 | */ |
||
943 | void frame_free(pfn_t pfn) |
||
944 | { |
||
945 | ipl_t ipl; |
||
946 | zone_t *zone; |
||
947 | |||
948 | ipl = interrupts_disable(); |
||
949 | |||
950 | /* |
||
951 | * First, find host frame zone for addr. |
||
952 | */ |
||
953 | zone = find_zone_and_lock(pfn,NULL); |
||
954 | ASSERT(zone); |
||
955 | |||
956 | zone_frame_free(zone, pfn-zone->base); |
||
957 | |||
958 | spinlock_unlock(&zone->lock); |
||
959 | interrupts_restore(ipl); |
||
960 | } |
||
961 | |||
962 | |||
963 | |||
964 | /** Mark given range unavailable in frame zones */ |
||
820 | jermar | 965 | void frame_mark_unavailable(pfn_t start, count_t count) |
814 | palkovsky | 966 | { |
967 | int i; |
||
968 | zone_t *zone; |
||
969 | int prefzone = 0; |
||
852 | palkovsky | 970 | |
822 | palkovsky | 971 | for (i=0; i < count; i++) { |
814 | palkovsky | 972 | zone = find_zone_and_lock(start+i,&prefzone); |
973 | if (!zone) /* PFN not found */ |
||
974 | continue; |
||
975 | zone_mark_unavailable(zone, start+i-zone->base); |
||
976 | |||
977 | spinlock_unlock(&zone->lock); |
||
978 | } |
||
979 | } |
||
980 | |||
981 | /** Initialize physical memory management |
||
982 | * |
||
983 | * Initialize physical memory managemnt. |
||
984 | */ |
||
985 | void frame_init(void) |
||
986 | { |
||
987 | if (config.cpu_active == 1) { |
||
988 | zones.count = 0; |
||
989 | spinlock_initialize(&zones.lock,"zones_glob_lock"); |
||
990 | } |
||
991 | /* Tell the architecture to create some memory */ |
||
992 | frame_arch_init(); |
||
993 | if (config.cpu_active == 1) { |
||
852 | palkovsky | 994 | pfn_t firstframe = ADDR2PFN(KA2PA(config.base)); |
995 | pfn_t lastframe = ADDR2PFN(KA2PA(config.base+config.kernel_size)); |
||
996 | frame_mark_unavailable(firstframe,lastframe-firstframe+1); |
||
814 | palkovsky | 997 | if (config.init_size > 0) |
998 | frame_mark_unavailable(ADDR2PFN(KA2PA(config.init_addr)), |
||
820 | jermar | 999 | SIZE2FRAMES(config.init_size)); |
814 | palkovsky | 1000 | } |
1001 | } |
||
1002 | |||
1003 | |||
1004 | |||
677 | bondari | 1005 | /** Prints list of zones |
1006 | * |
||
1007 | */ |
||
676 | bondari | 1008 | void zone_print_list(void) { |
1009 | zone_t *zone = NULL; |
||
814 | palkovsky | 1010 | int i; |
701 | jermar | 1011 | ipl_t ipl; |
1012 | |||
1013 | ipl = interrupts_disable(); |
||
814 | palkovsky | 1014 | spinlock_lock(&zones.lock); |
822 | palkovsky | 1015 | printf("# Base address\tFree Frames\tBusy Frames\n"); |
1016 | printf(" ------------\t-----------\t-----------\n"); |
||
814 | palkovsky | 1017 | for (i=0;i<zones.count;i++) { |
1018 | zone = zones.info[i]; |
||
683 | bondari | 1019 | spinlock_lock(&zone->lock); |
824 | palkovsky | 1020 | printf("%d: %L\t%d\t\t%d\n",i,PFN2ADDR(zone->base), |
814 | palkovsky | 1021 | zone->free_count, zone->busy_count); |
701 | jermar | 1022 | spinlock_unlock(&zone->lock); |
676 | bondari | 1023 | } |
814 | palkovsky | 1024 | spinlock_unlock(&zones.lock); |
701 | jermar | 1025 | interrupts_restore(ipl); |
676 | bondari | 1026 | } |
1027 | |||
677 | bondari | 1028 | /** Prints zone details |
1029 | * |
||
822 | palkovsky | 1030 | * @param base Zone base address OR zone number |
677 | bondari | 1031 | */ |
822 | palkovsky | 1032 | void zone_print_one(int num) { |
814 | palkovsky | 1033 | zone_t *zone = NULL; |
701 | jermar | 1034 | ipl_t ipl; |
822 | palkovsky | 1035 | int i; |
701 | jermar | 1036 | |
1037 | ipl = interrupts_disable(); |
||
814 | palkovsky | 1038 | spinlock_lock(&zones.lock); |
822 | palkovsky | 1039 | |
1040 | for (i=0;i < zones.count; i++) { |
||
824 | palkovsky | 1041 | if (i == num || PFN2ADDR(zones.info[i]->base) == num) { |
822 | palkovsky | 1042 | zone = zones.info[i]; |
1043 | break; |
||
1044 | } |
||
676 | bondari | 1045 | } |
822 | palkovsky | 1046 | if (!zone) { |
1047 | printf("Zone not found.\n"); |
||
1048 | goto out; |
||
1049 | } |
||
676 | bondari | 1050 | |
683 | bondari | 1051 | spinlock_lock(&zone->lock); |
822 | palkovsky | 1052 | printf("Memory zone information\n"); |
814 | palkovsky | 1053 | printf("Zone base address: %P\n", PFN2ADDR(zone->base)); |
1054 | printf("Zone size: %d frames (%dK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
||
701 | jermar | 1055 | printf("Allocated space: %d frames (%dK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
1056 | printf("Available space: %d (%dK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
||
686 | bondari | 1057 | buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
683 | bondari | 1058 | |
1059 | spinlock_unlock(&zone->lock); |
||
822 | palkovsky | 1060 | out: |
814 | palkovsky | 1061 | spinlock_unlock(&zones.lock); |
701 | jermar | 1062 | interrupts_restore(ipl); |
676 | bondari | 1063 | } |
1064 |