Rev 1224 | Rev 1248 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
564 | jermar | 2 | * Copyright (C) 2001-2005 Jakub Jermar |
3 | * Copyright (C) 2005 Sergey Bondari |
||
1 | jermar | 4 | * All rights reserved. |
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | |||
814 | palkovsky | 30 | /* |
31 | * Locking order |
||
32 | * |
||
33 | * In order to access particular zone, the process must first lock |
||
34 | * the zones.lock, then lock the zone and then unlock the zones.lock. |
||
35 | * This insures, that we can fiddle with the zones in runtime without |
||
36 | * affecting the processes. |
||
37 | * |
||
38 | */ |
||
39 | |||
368 | jermar | 40 | #include <typedefs.h> |
1 | jermar | 41 | #include <arch/types.h> |
42 | #include <mm/frame.h> |
||
703 | jermar | 43 | #include <mm/as.h> |
1 | jermar | 44 | #include <panic.h> |
367 | jermar | 45 | #include <debug.h> |
788 | jermar | 46 | #include <adt/list.h> |
1 | jermar | 47 | #include <synch/spinlock.h> |
115 | jermar | 48 | #include <arch/asm.h> |
195 | vana | 49 | #include <arch.h> |
533 | bondari | 50 | #include <print.h> |
536 | bondari | 51 | #include <align.h> |
814 | palkovsky | 52 | #include <mm/slab.h> |
822 | palkovsky | 53 | #include <bitops.h> |
1063 | palkovsky | 54 | #include <macros.h> |
115 | jermar | 55 | |
814 | palkovsky | 56 | typedef struct { |
57 | count_t refcount; /**< tracking of shared frames */ |
||
58 | __u8 buddy_order; /**< buddy system block order */ |
||
59 | link_t buddy_link; /**< link to the next free block inside one order */ |
||
60 | void *parent; /**< If allocated by slab, this points there */ |
||
1236 | jermar | 61 | } frame_t; |
367 | jermar | 62 | |
814 | palkovsky | 63 | typedef struct { |
64 | SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
||
1236 | jermar | 65 | pfn_t base; /**< frame_no of the first frame in the frames array */ |
820 | jermar | 66 | count_t count; /**< Size of zone */ |
533 | bondari | 67 | |
814 | palkovsky | 68 | frame_t *frames; /**< array of frame_t structures in this zone */ |
69 | count_t free_count; /**< number of free frame_t structures */ |
||
70 | count_t busy_count; /**< number of busy frame_t structures */ |
||
71 | |||
72 | buddy_system_t * buddy_system; /**< buddy system for the zone */ |
||
73 | int flags; |
||
1236 | jermar | 74 | } zone_t; |
479 | bondari | 75 | |
814 | palkovsky | 76 | /* |
77 | * The zoneinfo.lock must be locked when accessing zoneinfo structure. |
||
78 | * Some of the attributes in zone_t structures are 'read-only' |
||
368 | jermar | 79 | */ |
1 | jermar | 80 | |
814 | palkovsky | 81 | struct { |
82 | SPINLOCK_DECLARE(lock); |
||
83 | int count; |
||
84 | zone_t *info[ZONES_MAX]; |
||
1236 | jermar | 85 | } zones; |
1 | jermar | 86 | |
814 | palkovsky | 87 | |
88 | /*********************************/ |
||
89 | /* Helper functions */ |
||
90 | static inline index_t frame_index(zone_t *zone, frame_t *frame) |
||
762 | palkovsky | 91 | { |
814 | palkovsky | 92 | return (index_t)(frame - zone->frames); |
762 | palkovsky | 93 | } |
814 | palkovsky | 94 | static inline index_t frame_index_abs(zone_t *zone, frame_t *frame) |
1 | jermar | 95 | { |
814 | palkovsky | 96 | return (index_t)(frame - zone->frames) + zone->base; |
1 | jermar | 97 | } |
814 | palkovsky | 98 | static inline int frame_index_valid(zone_t *zone, index_t index) |
99 | { |
||
100 | return index >= 0 && index < zone->count; |
||
101 | } |
||
1 | jermar | 102 | |
814 | palkovsky | 103 | /** Compute pfn_t from frame_t pointer & zone pointer */ |
820 | jermar | 104 | static index_t make_frame_index(zone_t *zone, frame_t *frame) |
762 | palkovsky | 105 | { |
814 | palkovsky | 106 | return frame - zone->frames; |
762 | palkovsky | 107 | } |
108 | |||
814 | palkovsky | 109 | /** Initialize frame structure |
762 | palkovsky | 110 | * |
814 | palkovsky | 111 | * Initialize frame structure. |
112 | * |
||
113 | * @param frame Frame structure to be initialized. |
||
762 | palkovsky | 114 | */ |
814 | palkovsky | 115 | static void frame_initialize(frame_t *frame) |
762 | palkovsky | 116 | { |
814 | palkovsky | 117 | frame->refcount = 1; |
118 | frame->buddy_order = 0; |
||
762 | palkovsky | 119 | } |
120 | |||
814 | palkovsky | 121 | /*************************************/ |
122 | /* Zoneinfo functions */ |
||
762 | palkovsky | 123 | |
814 | palkovsky | 124 | /** |
125 | * Insert-sort zone into zones list |
||
822 | palkovsky | 126 | * |
127 | * @return zone number on success, -1 on error |
||
1 | jermar | 128 | */ |
822 | palkovsky | 129 | static int zones_add_zone(zone_t *newzone) |
1 | jermar | 130 | { |
822 | palkovsky | 131 | int i,j; |
132 | ipl_t ipl; |
||
133 | zone_t *z; |
||
762 | palkovsky | 134 | |
822 | palkovsky | 135 | ipl = interrupts_disable(); |
814 | palkovsky | 136 | spinlock_lock(&zones.lock); |
137 | /* Try to merge */ |
||
1037 | decky | 138 | if (zones.count + 1 == ZONES_MAX) |
822 | palkovsky | 139 | panic("Maximum zone(%d) count exceeded.", ZONES_MAX); |
1037 | decky | 140 | for (i = 0; i < zones.count; i++) { |
822 | palkovsky | 141 | /* Check for overflow */ |
852 | palkovsky | 142 | z = zones.info[i]; |
822 | palkovsky | 143 | if (overlaps(newzone->base,newzone->count, |
144 | z->base, z->count)) { |
||
145 | printf("Zones overlap!\n"); |
||
146 | return -1; |
||
814 | palkovsky | 147 | } |
852 | palkovsky | 148 | if (newzone->base < z->base) |
822 | palkovsky | 149 | break; |
368 | jermar | 150 | } |
822 | palkovsky | 151 | /* Move other zones up */ |
1037 | decky | 152 | for (j = i;j < zones.count; j++) |
153 | zones.info[j + 1] = zones.info[j]; |
||
822 | palkovsky | 154 | zones.info[i] = newzone; |
155 | zones.count++; |
||
814 | palkovsky | 156 | spinlock_unlock(&zones.lock); |
822 | palkovsky | 157 | interrupts_restore(ipl); |
158 | |||
159 | return i; |
||
1 | jermar | 160 | } |
161 | |||
814 | palkovsky | 162 | /** |
163 | * Try to find a zone where can we find the frame |
||
368 | jermar | 164 | * |
814 | palkovsky | 165 | * @param hint Start searching in zone 'hint' |
166 | * @param lock Lock zone if true |
||
368 | jermar | 167 | * |
814 | palkovsky | 168 | * Assume interrupts disable |
368 | jermar | 169 | */ |
814 | palkovsky | 170 | static zone_t * find_zone_and_lock(pfn_t frame, int *pzone) |
1 | jermar | 171 | { |
533 | bondari | 172 | int i; |
814 | palkovsky | 173 | int hint = pzone ? *pzone : 0; |
174 | zone_t *z; |
||
533 | bondari | 175 | |
814 | palkovsky | 176 | spinlock_lock(&zones.lock); |
177 | |||
178 | if (hint >= zones.count || hint < 0) |
||
179 | hint = 0; |
||
533 | bondari | 180 | |
814 | palkovsky | 181 | i = hint; |
182 | do { |
||
183 | z = zones.info[i]; |
||
184 | spinlock_lock(&z->lock); |
||
185 | if (z->base <= frame && z->base + z->count > frame) { |
||
186 | spinlock_unlock(&zones.lock); /* Unlock the global lock */ |
||
187 | if (pzone) |
||
188 | *pzone = i; |
||
189 | return z; |
||
533 | bondari | 190 | } |
814 | palkovsky | 191 | spinlock_unlock(&z->lock); |
533 | bondari | 192 | |
814 | palkovsky | 193 | i++; |
194 | if (i >= zones.count) |
||
195 | i = 0; |
||
196 | } while(i != hint); |
||
197 | |||
198 | spinlock_unlock(&zones.lock); |
||
199 | return NULL; |
||
533 | bondari | 200 | } |
201 | |||
822 | palkovsky | 202 | /** @return True if zone can allocate specified order */ |
203 | static int zone_can_alloc(zone_t *z, __u8 order) |
||
204 | { |
||
205 | return buddy_system_can_alloc(z->buddy_system, order); |
||
206 | } |
||
207 | |||
814 | palkovsky | 208 | /** |
209 | * Find AND LOCK zone that can allocate order frames |
||
367 | jermar | 210 | * |
814 | palkovsky | 211 | * Assume interrupts are disabled!! |
367 | jermar | 212 | * |
814 | palkovsky | 213 | * @param pzone Pointer to preferred zone or NULL, on return contains zone number |
367 | jermar | 214 | */ |
814 | palkovsky | 215 | static zone_t * find_free_zone_lock(__u8 order, int *pzone) |
367 | jermar | 216 | { |
814 | palkovsky | 217 | int i; |
367 | jermar | 218 | zone_t *z; |
814 | palkovsky | 219 | int hint = pzone ? *pzone : 0; |
367 | jermar | 220 | |
814 | palkovsky | 221 | spinlock_lock(&zones.lock); |
222 | if (hint >= zones.count) |
||
223 | hint = 0; |
||
224 | i = hint; |
||
225 | do { |
||
226 | z = zones.info[i]; |
||
724 | palkovsky | 227 | |
814 | palkovsky | 228 | spinlock_lock(&z->lock); |
367 | jermar | 229 | |
814 | palkovsky | 230 | /* Check if the zone has 2^order frames area available */ |
822 | palkovsky | 231 | if (zone_can_alloc(z, order)) { |
814 | palkovsky | 232 | spinlock_unlock(&zones.lock); |
233 | if (pzone) |
||
234 | *pzone = i; |
||
235 | return z; |
||
367 | jermar | 236 | } |
814 | palkovsky | 237 | spinlock_unlock(&z->lock); |
238 | if (++i >= zones.count) |
||
239 | i = 0; |
||
240 | } while(i != hint); |
||
241 | spinlock_unlock(&zones.lock); |
||
242 | return NULL; |
||
367 | jermar | 243 | } |
244 | |||
814 | palkovsky | 245 | /********************************************/ |
246 | /* Buddy system functions */ |
||
247 | |||
248 | /** Buddy system find_block implementation |
||
367 | jermar | 249 | * |
814 | palkovsky | 250 | * Find block that is parent of current list. |
251 | * That means go to lower addresses, until such block is found |
||
367 | jermar | 252 | * |
814 | palkovsky | 253 | * @param order - Order of parent must be different then this parameter!! |
367 | jermar | 254 | */ |
814 | palkovsky | 255 | static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
256 | __u8 order) |
||
367 | jermar | 257 | { |
814 | palkovsky | 258 | frame_t * frame; |
259 | zone_t * zone; |
||
260 | index_t index; |
||
367 | jermar | 261 | |
814 | palkovsky | 262 | frame = list_get_instance(child, frame_t, buddy_link); |
263 | zone = (zone_t *) b->data; |
||
367 | jermar | 264 | |
814 | palkovsky | 265 | index = frame_index(zone, frame); |
266 | do { |
||
267 | if (zone->frames[index].buddy_order != order) { |
||
268 | return &zone->frames[index].buddy_link; |
||
269 | } |
||
270 | } while(index-- > 0); |
||
271 | return NULL; |
||
367 | jermar | 272 | } |
479 | bondari | 273 | |
822 | palkovsky | 274 | static void zone_buddy_print_id(buddy_system_t *b, link_t *block) |
275 | { |
||
276 | frame_t * frame; |
||
277 | zone_t * zone; |
||
278 | index_t index; |
||
479 | bondari | 279 | |
822 | palkovsky | 280 | frame = list_get_instance(block, frame_t, buddy_link); |
281 | zone = (zone_t *) b->data; |
||
282 | index = frame_index(zone, frame); |
||
1196 | cejka | 283 | printf("%zd", index); |
822 | palkovsky | 284 | } |
285 | |||
479 | bondari | 286 | /** Buddy system find_buddy implementation |
489 | jermar | 287 | * |
288 | * @param b Buddy system. |
||
480 | bondari | 289 | * @param block Block for which buddy should be found |
479 | bondari | 290 | * |
480 | bondari | 291 | * @return Buddy for given block if found |
479 | bondari | 292 | */ |
814 | palkovsky | 293 | static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) |
294 | { |
||
539 | jermar | 295 | frame_t * frame; |
480 | bondari | 296 | zone_t * zone; |
564 | jermar | 297 | index_t index; |
480 | bondari | 298 | bool is_left, is_right; |
479 | bondari | 299 | |
480 | bondari | 300 | frame = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 301 | zone = (zone_t *) b->data; |
814 | palkovsky | 302 | ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order)); |
480 | bondari | 303 | |
724 | palkovsky | 304 | is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
305 | is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
||
814 | palkovsky | 306 | |
564 | jermar | 307 | ASSERT(is_left ^ is_right); |
533 | bondari | 308 | if (is_left) { |
814 | palkovsky | 309 | index = (frame_index(zone, frame)) + (1 << frame->buddy_order); |
615 | palkovsky | 310 | } else { // if (is_right) |
814 | palkovsky | 311 | index = (frame_index(zone, frame)) - (1 << frame->buddy_order); |
533 | bondari | 312 | } |
313 | |||
814 | palkovsky | 314 | if (frame_index_valid(zone, index)) { |
315 | if (zone->frames[index].buddy_order == frame->buddy_order && |
||
316 | zone->frames[index].refcount == 0) { |
||
533 | bondari | 317 | return &zone->frames[index].buddy_link; |
480 | bondari | 318 | } |
319 | } |
||
814 | palkovsky | 320 | |
539 | jermar | 321 | return NULL; |
479 | bondari | 322 | } |
323 | |||
324 | /** Buddy system bisect implementation |
||
325 | * |
||
489 | jermar | 326 | * @param b Buddy system. |
480 | bondari | 327 | * @param block Block to bisect |
328 | * |
||
329 | * @return right block |
||
479 | bondari | 330 | */ |
814 | palkovsky | 331 | static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
480 | bondari | 332 | frame_t * frame_l, * frame_r; |
564 | jermar | 333 | |
480 | bondari | 334 | frame_l = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 335 | frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
564 | jermar | 336 | |
480 | bondari | 337 | return &frame_r->buddy_link; |
479 | bondari | 338 | } |
339 | |||
340 | /** Buddy system coalesce implementation |
||
341 | * |
||
489 | jermar | 342 | * @param b Buddy system. |
480 | bondari | 343 | * @param block_1 First block |
344 | * @param block_2 First block's buddy |
||
345 | * |
||
346 | * @return Coalesced block (actually block that represents lower address) |
||
479 | bondari | 347 | */ |
814 | palkovsky | 348 | static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, |
822 | palkovsky | 349 | link_t * block_2) |
350 | { |
||
814 | palkovsky | 351 | frame_t *frame1, *frame2; |
564 | jermar | 352 | |
480 | bondari | 353 | frame1 = list_get_instance(block_1, frame_t, buddy_link); |
354 | frame2 = list_get_instance(block_2, frame_t, buddy_link); |
||
564 | jermar | 355 | |
533 | bondari | 356 | return frame1 < frame2 ? block_1 : block_2; |
479 | bondari | 357 | } |
358 | |||
359 | /** Buddy system set_order implementation |
||
489 | jermar | 360 | * |
361 | * @param b Buddy system. |
||
480 | bondari | 362 | * @param block Buddy system block |
363 | * @param order Order to set |
||
479 | bondari | 364 | */ |
814 | palkovsky | 365 | static void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
480 | bondari | 366 | frame_t * frame; |
367 | frame = list_get_instance(block, frame_t, buddy_link); |
||
368 | frame->buddy_order = order; |
||
479 | bondari | 369 | } |
370 | |||
371 | /** Buddy system get_order implementation |
||
489 | jermar | 372 | * |
373 | * @param b Buddy system. |
||
480 | bondari | 374 | * @param block Buddy system block |
479 | bondari | 375 | * |
480 | bondari | 376 | * @return Order of block |
479 | bondari | 377 | */ |
814 | palkovsky | 378 | static __u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
480 | bondari | 379 | frame_t * frame; |
380 | frame = list_get_instance(block, frame_t, buddy_link); |
||
381 | return frame->buddy_order; |
||
479 | bondari | 382 | } |
533 | bondari | 383 | |
384 | /** Buddy system mark_busy implementation |
||
385 | * |
||
386 | * @param b Buddy system |
||
387 | * @param block Buddy system block |
||
388 | * |
||
389 | */ |
||
814 | palkovsky | 390 | static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
533 | bondari | 391 | frame_t * frame; |
822 | palkovsky | 392 | |
533 | bondari | 393 | frame = list_get_instance(block, frame_t, buddy_link); |
394 | frame->refcount = 1; |
||
395 | } |
||
676 | bondari | 396 | |
814 | palkovsky | 397 | /** Buddy system mark_available implementation |
398 | * |
||
399 | * @param b Buddy system |
||
400 | * @param block Buddy system block |
||
401 | * |
||
402 | */ |
||
403 | static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) { |
||
404 | frame_t * frame; |
||
405 | frame = list_get_instance(block, frame_t, buddy_link); |
||
406 | frame->refcount = 0; |
||
407 | } |
||
408 | |||
409 | static struct buddy_system_operations zone_buddy_system_operations = { |
||
410 | .find_buddy = zone_buddy_find_buddy, |
||
411 | .bisect = zone_buddy_bisect, |
||
412 | .coalesce = zone_buddy_coalesce, |
||
413 | .set_order = zone_buddy_set_order, |
||
414 | .get_order = zone_buddy_get_order, |
||
415 | .mark_busy = zone_buddy_mark_busy, |
||
416 | .mark_available = zone_buddy_mark_available, |
||
822 | palkovsky | 417 | .find_block = zone_buddy_find_block, |
418 | .print_id = zone_buddy_print_id |
||
814 | palkovsky | 419 | }; |
420 | |||
421 | /*************************************/ |
||
422 | /* Zone functions */ |
||
423 | |||
424 | /** Allocate frame in particular zone |
||
425 | * |
||
426 | * Assume zone is locked |
||
822 | palkovsky | 427 | * Panics, if allocation is impossible. |
814 | palkovsky | 428 | * |
429 | * @return Frame index in zone |
||
430 | */ |
||
822 | palkovsky | 431 | static pfn_t zone_frame_alloc(zone_t *zone,__u8 order) |
814 | palkovsky | 432 | { |
433 | pfn_t v; |
||
434 | link_t *tmp; |
||
435 | frame_t *frame; |
||
436 | |||
437 | /* Allocate frames from zone buddy system */ |
||
438 | tmp = buddy_system_alloc(zone->buddy_system, order); |
||
439 | |||
440 | ASSERT(tmp); |
||
441 | |||
442 | /* Update zone information. */ |
||
443 | zone->free_count -= (1 << order); |
||
444 | zone->busy_count += (1 << order); |
||
445 | |||
446 | /* Frame will be actually a first frame of the block. */ |
||
447 | frame = list_get_instance(tmp, frame_t, buddy_link); |
||
448 | |||
449 | /* get frame address */ |
||
450 | v = make_frame_index(zone, frame); |
||
451 | return v; |
||
452 | } |
||
453 | |||
454 | /** Free frame from zone |
||
455 | * |
||
456 | * Assume zone is locked |
||
457 | */ |
||
820 | jermar | 458 | static void zone_frame_free(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 459 | { |
460 | frame_t *frame; |
||
461 | __u8 order; |
||
462 | |||
463 | frame = &zone->frames[frame_idx]; |
||
464 | |||
465 | /* remember frame order */ |
||
466 | order = frame->buddy_order; |
||
467 | |||
468 | ASSERT(frame->refcount); |
||
469 | |||
470 | if (!--frame->refcount) { |
||
471 | buddy_system_free(zone->buddy_system, &frame->buddy_link); |
||
946 | jermar | 472 | |
473 | /* Update zone information. */ |
||
474 | zone->free_count += (1 << order); |
||
475 | zone->busy_count -= (1 << order); |
||
814 | palkovsky | 476 | } |
477 | } |
||
478 | |||
479 | /** Return frame from zone */ |
||
820 | jermar | 480 | static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 481 | { |
482 | ASSERT(frame_idx < zone->count); |
||
483 | return &zone->frames[frame_idx]; |
||
484 | } |
||
485 | |||
486 | /** Mark frame in zone unavailable to allocation */ |
||
820 | jermar | 487 | static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 488 | { |
489 | frame_t *frame; |
||
490 | link_t *link; |
||
491 | |||
492 | frame = zone_get_frame(zone, frame_idx); |
||
822 | palkovsky | 493 | if (frame->refcount) |
494 | return; |
||
814 | palkovsky | 495 | link = buddy_system_alloc_block(zone->buddy_system, |
496 | &frame->buddy_link); |
||
497 | ASSERT(link); |
||
498 | zone->free_count--; |
||
499 | } |
||
500 | |||
822 | palkovsky | 501 | /** |
502 | * Join 2 zones |
||
503 | * |
||
504 | * Expect zone_t *z to point to space at least zone_conf_size large |
||
505 | * |
||
506 | * Assume z1 & z2 are locked |
||
507 | */ |
||
508 | |||
509 | static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
||
510 | { |
||
511 | __u8 max_order; |
||
512 | int i, z2idx; |
||
513 | pfn_t frame_idx; |
||
514 | frame_t *frame; |
||
515 | |||
516 | ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count)); |
||
517 | ASSERT(z1->base < z2->base); |
||
518 | |||
519 | spinlock_initialize(&z->lock, "zone_lock"); |
||
520 | z->base = z1->base; |
||
521 | z->count = z2->base+z2->count - z1->base; |
||
522 | z->flags = z1->flags & z2->flags; |
||
523 | |||
524 | z->free_count = z1->free_count + z2->free_count; |
||
525 | z->busy_count = z1->busy_count + z2->busy_count; |
||
526 | |||
527 | max_order = fnzb(z->count); |
||
528 | |||
529 | z->buddy_system = (buddy_system_t *)&z[1]; |
||
530 | buddy_system_create(z->buddy_system, max_order, |
||
531 | &zone_buddy_system_operations, |
||
532 | (void *) z); |
||
533 | |||
534 | z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
||
535 | for (i = 0; i < z->count; i++) { |
||
536 | /* This marks all frames busy */ |
||
537 | frame_initialize(&z->frames[i]); |
||
538 | } |
||
539 | /* Copy frames from both zones to preserve full frame orders, |
||
1093 | palkovsky | 540 | * parents etc. Set all free frames with refcount=0 to 1, because |
822 | palkovsky | 541 | * we add all free frames to buddy allocator later again, clear |
1093 | palkovsky | 542 | * order to 0. Don't set busy frames with refcount=0, as they |
543 | * will not be reallocated during merge and it would make later |
||
544 | * problems with allocation/free. |
||
822 | palkovsky | 545 | */ |
546 | for (i=0; i<z1->count; i++) |
||
547 | z->frames[i] = z1->frames[i]; |
||
548 | for (i=0; i < z2->count; i++) { |
||
549 | z2idx = i + (z2->base - z1->base); |
||
550 | z->frames[z2idx] = z2->frames[i]; |
||
551 | } |
||
1093 | palkovsky | 552 | i = 0; |
553 | while (i < z->count) { |
||
554 | if (z->frames[i].refcount) { |
||
555 | /* skip busy frames */ |
||
556 | i += 1 << z->frames[i].buddy_order; |
||
557 | } else { /* Free frames, set refcount=1 */ |
||
558 | /* All free frames have refcount=0, we need not |
||
559 | * to check the order */ |
||
822 | palkovsky | 560 | z->frames[i].refcount = 1; |
561 | z->frames[i].buddy_order = 0; |
||
1093 | palkovsky | 562 | i++; |
822 | palkovsky | 563 | } |
564 | } |
||
565 | /* Add free blocks from the 2 original zones */ |
||
566 | while (zone_can_alloc(z1, 0)) { |
||
567 | frame_idx = zone_frame_alloc(z1, 0); |
||
568 | frame = &z->frames[frame_idx]; |
||
569 | frame->refcount = 0; |
||
570 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
571 | } |
||
572 | while (zone_can_alloc(z2, 0)) { |
||
573 | frame_idx = zone_frame_alloc(z2, 0); |
||
574 | frame = &z->frames[frame_idx + (z2->base-z1->base)]; |
||
575 | frame->refcount = 0; |
||
576 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
577 | } |
||
578 | } |
||
579 | |||
580 | /** Return old configuration frames into the zone |
||
581 | * |
||
582 | * We have several cases |
||
583 | * - the conf. data is outside of zone -> exit, shall we call frame_free?? |
||
824 | palkovsky | 584 | * - the conf. data was created by zone_create or |
585 | * updated with reduce_region -> free every frame |
||
586 | * |
||
587 | * @param newzone The actual zone where freeing should occur |
||
588 | * @param oldzone Pointer to old zone configuration data that should |
||
589 | * be freed from new zone |
||
822 | palkovsky | 590 | */ |
591 | static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
||
592 | { |
||
593 | pfn_t pfn; |
||
594 | frame_t *frame; |
||
595 | count_t cframes; |
||
596 | int i; |
||
597 | |||
598 | pfn = ADDR2PFN((__address)KA2PA(oldzone)); |
||
599 | cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
||
600 | |||
601 | if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
||
602 | return; |
||
603 | |||
604 | frame = &newzone->frames[pfn - newzone->base]; |
||
824 | palkovsky | 605 | ASSERT(!frame->buddy_order); |
822 | palkovsky | 606 | |
607 | for (i=0; i < cframes; i++) { |
||
608 | newzone->busy_count++; |
||
609 | zone_frame_free(newzone, pfn+i-newzone->base); |
||
610 | } |
||
611 | } |
||
612 | |||
824 | palkovsky | 613 | /** Reduce allocated block to count of order 0 frames |
614 | * |
||
615 | * The allocated block need 2^order frames of space. Reduce all frames |
||
616 | * in block to order 0 and free the unneded frames. This means, that |
||
617 | * when freeing the block, you have to free every frame from block. |
||
618 | * |
||
619 | * @param zone |
||
620 | * @param frame_idx Index to block |
||
621 | * @param count Allocated space in block |
||
622 | */ |
||
623 | static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
||
624 | { |
||
625 | count_t i; |
||
626 | __u8 order; |
||
627 | frame_t *frame; |
||
628 | |||
629 | ASSERT(frame_idx+count < zone->count); |
||
630 | |||
631 | order = zone->frames[frame_idx].buddy_order; |
||
632 | ASSERT((1 << order) >= count); |
||
633 | |||
634 | /* Reduce all blocks to order 0 */ |
||
635 | for (i=0; i < (1 << order); i++) { |
||
636 | frame = &zone->frames[i + frame_idx]; |
||
637 | frame->buddy_order = 0; |
||
638 | if (! frame->refcount) |
||
639 | frame->refcount = 1; |
||
640 | ASSERT(frame->refcount == 1); |
||
641 | } |
||
642 | /* Free unneeded frames */ |
||
643 | for (i=count; i < (1 << order); i++) { |
||
644 | zone_frame_free(zone, i + frame_idx); |
||
645 | } |
||
646 | } |
||
647 | |||
822 | palkovsky | 648 | /** Merge zones z1 and z2 |
649 | * |
||
650 | * - the zones must be 2 zones with no zone existing in between, |
||
651 | * which means that z2 = z1+1 |
||
652 | * |
||
653 | * - When you create a new zone, the frame allocator configuration does |
||
654 | * not to be 2^order size. Once the allocator is running it is no longer |
||
655 | * possible, merged configuration data occupies more space :-/ |
||
656 | */ |
||
657 | void zone_merge(int z1, int z2) |
||
658 | { |
||
659 | ipl_t ipl; |
||
660 | zone_t *zone1, *zone2, *newzone; |
||
661 | int cframes; |
||
662 | __u8 order; |
||
663 | int i; |
||
664 | pfn_t pfn; |
||
665 | |||
666 | ipl = interrupts_disable(); |
||
667 | spinlock_lock(&zones.lock); |
||
668 | |||
669 | if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count) |
||
670 | goto errout; |
||
671 | /* We can join only 2 zones with none existing inbetween */ |
||
672 | if (z2-z1 != 1) |
||
673 | goto errout; |
||
674 | |||
675 | zone1 = zones.info[z1]; |
||
676 | zone2 = zones.info[z2]; |
||
677 | spinlock_lock(&zone1->lock); |
||
678 | spinlock_lock(&zone2->lock); |
||
679 | |||
680 | cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base)); |
||
681 | order = fnzb(cframes) + 1; |
||
682 | |||
683 | /* Allocate zonedata inside one of the zones */ |
||
684 | if (zone_can_alloc(zone1, order)) |
||
685 | pfn = zone1->base + zone_frame_alloc(zone1, order); |
||
686 | else if (zone_can_alloc(zone2, order)) |
||
687 | pfn = zone2->base + zone_frame_alloc(zone2, order); |
||
688 | else |
||
689 | goto errout2; |
||
690 | |||
691 | newzone = (zone_t *)PA2KA(PFN2ADDR(pfn)); |
||
692 | |||
693 | _zone_merge(newzone, zone1, zone2); |
||
694 | |||
824 | palkovsky | 695 | /* Free unneeded config frames */ |
696 | zone_reduce_region(newzone, pfn - newzone->base, cframes); |
||
822 | palkovsky | 697 | /* Subtract zone information from busy frames */ |
824 | palkovsky | 698 | newzone->busy_count -= cframes; |
822 | palkovsky | 699 | |
824 | palkovsky | 700 | /* Replace existing zones in zoneinfo list */ |
822 | palkovsky | 701 | zones.info[z1] = newzone; |
1037 | decky | 702 | for (i = z2 + 1; i < zones.count; i++) |
703 | zones.info[i - 1] = zones.info[i]; |
||
822 | palkovsky | 704 | zones.count--; |
705 | |||
706 | /* Free old zone information */ |
||
707 | return_config_frames(newzone, zone1); |
||
708 | return_config_frames(newzone, zone2); |
||
709 | errout2: |
||
710 | /* Nobody is allowed to enter to zone, so we are safe |
||
711 | * to touch the spinlocks last time */ |
||
712 | spinlock_unlock(&zone1->lock); |
||
713 | spinlock_unlock(&zone2->lock); |
||
714 | errout: |
||
715 | spinlock_unlock(&zones.lock); |
||
716 | interrupts_restore(ipl); |
||
717 | } |
||
718 | |||
719 | /** |
||
720 | * Merge all zones into one big zone |
||
721 | * |
||
722 | * It is reasonable to do this on systems whose bios reports parts in chunks, |
||
723 | * so that we could have 1 zone (it's faster). |
||
724 | */ |
||
725 | void zone_merge_all(void) |
||
726 | { |
||
727 | int count = zones.count; |
||
728 | |||
729 | while (zones.count > 1 && --count) { |
||
730 | zone_merge(0,1); |
||
731 | break; |
||
732 | } |
||
733 | } |
||
734 | |||
814 | palkovsky | 735 | /** Create frame zone |
736 | * |
||
737 | * Create new frame zone. |
||
738 | * |
||
739 | * @param start Physical address of the first frame within the zone. |
||
740 | * @param size Size of the zone. Must be a multiple of FRAME_SIZE. |
||
741 | * @param conffram Address of configuration frame |
||
742 | * @param flags Zone flags. |
||
743 | * |
||
744 | * @return Initialized zone. |
||
745 | */ |
||
822 | palkovsky | 746 | static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
814 | palkovsky | 747 | { |
748 | int i; |
||
749 | __u8 max_order; |
||
750 | |||
751 | spinlock_initialize(&z->lock, "zone_lock"); |
||
752 | z->base = start; |
||
753 | z->count = count; |
||
754 | z->flags = flags; |
||
755 | z->free_count = count; |
||
756 | z->busy_count = 0; |
||
757 | |||
758 | /* |
||
759 | * Compute order for buddy system, initialize |
||
760 | */ |
||
822 | palkovsky | 761 | max_order = fnzb(count); |
814 | palkovsky | 762 | z->buddy_system = (buddy_system_t *)&z[1]; |
763 | |||
764 | buddy_system_create(z->buddy_system, max_order, |
||
765 | &zone_buddy_system_operations, |
||
766 | (void *) z); |
||
767 | |||
768 | /* Allocate frames _after_ the conframe */ |
||
769 | /* Check sizes */ |
||
770 | z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order)); |
||
771 | for (i = 0; i<count; i++) { |
||
772 | frame_initialize(&z->frames[i]); |
||
773 | } |
||
852 | palkovsky | 774 | |
814 | palkovsky | 775 | /* Stuffing frames */ |
776 | for (i = 0; i < count; i++) { |
||
777 | z->frames[i].refcount = 0; |
||
778 | buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
||
779 | } |
||
780 | } |
||
781 | |||
782 | /** Compute configuration data size for zone */ |
||
822 | palkovsky | 783 | __address zone_conf_size(count_t count) |
814 | palkovsky | 784 | { |
785 | int size = sizeof(zone_t) + count*sizeof(frame_t); |
||
786 | int max_order; |
||
787 | |||
822 | palkovsky | 788 | max_order = fnzb(count); |
814 | palkovsky | 789 | size += buddy_conf_size(max_order); |
790 | return size; |
||
791 | } |
||
792 | |||
793 | /** Create and add zone to system |
||
794 | * |
||
795 | * @param confframe Where configuration frame is supposed to be. |
||
820 | jermar | 796 | * Always check, that we will not disturb the kernel and possibly init. |
814 | palkovsky | 797 | * If confframe is given _outside_ this zone, it is expected, |
798 | * that the area is already marked BUSY and big enough |
||
799 | * to contain zone_conf_size() amount of data |
||
822 | palkovsky | 800 | * |
801 | * @return Zone number or -1 on error |
||
814 | palkovsky | 802 | */ |
822 | palkovsky | 803 | int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
814 | palkovsky | 804 | { |
805 | zone_t *z; |
||
822 | palkovsky | 806 | __address addr; |
820 | jermar | 807 | count_t confcount; |
814 | palkovsky | 808 | int i; |
822 | palkovsky | 809 | int znum; |
814 | palkovsky | 810 | |
811 | /* Theoretically we could have here 0, practically make sure |
||
812 | * nobody tries to do that. If some platform requires, remove |
||
813 | * the assert |
||
814 | */ |
||
815 | ASSERT(confframe); |
||
816 | /* If conframe is supposed to be inside our zone, then make sure |
||
817 | * it does not span kernel & init |
||
818 | */ |
||
822 | palkovsky | 819 | confcount = SIZE2FRAMES(zone_conf_size(count)); |
814 | palkovsky | 820 | if (confframe >= start && confframe < start+count) { |
1037 | decky | 821 | for (;confframe < start + count; confframe++) { |
814 | palkovsky | 822 | addr = PFN2ADDR(confframe); |
1037 | decky | 823 | if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size)) |
814 | palkovsky | 824 | continue; |
1037 | decky | 825 | |
826 | bool overlap = false; |
||
827 | count_t i; |
||
828 | for (i = 0; i < init.cnt; i++) |
||
829 | if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) { |
||
830 | overlap = true; |
||
831 | break; |
||
832 | } |
||
833 | if (overlap) |
||
834 | continue; |
||
835 | |||
814 | palkovsky | 836 | break; |
837 | } |
||
1037 | decky | 838 | if (confframe >= start + count) |
814 | palkovsky | 839 | panic("Cannot find configuration data for zone."); |
840 | } |
||
841 | |||
822 | palkovsky | 842 | z = (zone_t *)PA2KA(PFN2ADDR(confframe)); |
843 | zone_construct(start, count, z, flags); |
||
844 | znum = zones_add_zone(z); |
||
845 | if (znum == -1) |
||
846 | return -1; |
||
847 | |||
814 | palkovsky | 848 | /* If confdata in zone, mark as unavailable */ |
849 | if (confframe >= start && confframe < start+count) |
||
850 | for (i=confframe; i<confframe+confcount; i++) { |
||
851 | zone_mark_unavailable(z, i - z->base); |
||
852 | } |
||
822 | palkovsky | 853 | return znum; |
814 | palkovsky | 854 | } |
855 | |||
856 | /***************************************/ |
||
857 | /* Frame functions */ |
||
858 | |||
859 | /** Set parent of frame */ |
||
860 | void frame_set_parent(pfn_t pfn, void *data, int hint) |
||
861 | { |
||
862 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
863 | |||
864 | ASSERT(zone); |
||
865 | |||
866 | zone_get_frame(zone, pfn-zone->base)->parent = data; |
||
867 | spinlock_unlock(&zone->lock); |
||
868 | } |
||
869 | |||
870 | void * frame_get_parent(pfn_t pfn, int hint) |
||
871 | { |
||
872 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
873 | void *res; |
||
874 | |||
875 | ASSERT(zone); |
||
876 | res = zone_get_frame(zone, pfn - zone->base)->parent; |
||
877 | |||
878 | spinlock_unlock(&zone->lock); |
||
879 | return res; |
||
880 | } |
||
881 | |||
882 | /** Allocate power-of-two frames of physical memory. |
||
883 | * |
||
884 | * @param flags Flags for host zone selection and address processing. |
||
885 | * @param order Allocate exactly 2^order frames. |
||
886 | * @param pzone Preferred zone |
||
887 | * |
||
888 | * @return Allocated frame. |
||
889 | */ |
||
890 | pfn_t frame_alloc_generic(__u8 order, int flags, int * status, int *pzone) |
||
891 | { |
||
892 | ipl_t ipl; |
||
893 | int freed; |
||
894 | pfn_t v; |
||
895 | zone_t *zone; |
||
896 | |||
897 | loop: |
||
898 | ipl = interrupts_disable(); |
||
899 | /* |
||
900 | * First, find suitable frame zone. |
||
901 | */ |
||
902 | zone = find_free_zone_lock(order,pzone); |
||
903 | /* If no memory, reclaim some slab memory, |
||
904 | if it does not help, reclaim all */ |
||
905 | if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
||
906 | freed = slab_reclaim(0); |
||
907 | if (freed) |
||
908 | zone = find_free_zone_lock(order,pzone); |
||
909 | if (!zone) { |
||
910 | freed = slab_reclaim(SLAB_RECLAIM_ALL); |
||
911 | if (freed) |
||
912 | zone = find_free_zone_lock(order,pzone); |
||
913 | } |
||
914 | } |
||
915 | if (!zone) { |
||
916 | if (flags & FRAME_PANIC) |
||
917 | panic("Can't allocate frame.\n"); |
||
918 | |||
919 | /* |
||
920 | * TODO: Sleep until frames are available again. |
||
921 | */ |
||
922 | interrupts_restore(ipl); |
||
923 | |||
924 | if (flags & FRAME_ATOMIC) { |
||
925 | ASSERT(status != NULL); |
||
926 | if (status) |
||
927 | *status = FRAME_NO_MEMORY; |
||
928 | return NULL; |
||
929 | } |
||
930 | |||
931 | panic("Sleep not implemented.\n"); |
||
932 | goto loop; |
||
933 | } |
||
822 | palkovsky | 934 | v = zone_frame_alloc(zone,order); |
814 | palkovsky | 935 | v += zone->base; |
936 | |||
937 | spinlock_unlock(&zone->lock); |
||
938 | interrupts_restore(ipl); |
||
939 | |||
940 | if (status) |
||
941 | *status = FRAME_OK; |
||
942 | return v; |
||
943 | } |
||
944 | |||
945 | /** Free a frame. |
||
946 | * |
||
1236 | jermar | 947 | * Find respective frame structure for supplied PFN. |
814 | palkovsky | 948 | * Decrement frame reference count. |
949 | * If it drops to zero, move the frame structure to free list. |
||
950 | * |
||
1236 | jermar | 951 | * @param frame Frame number to be freed. |
814 | palkovsky | 952 | */ |
953 | void frame_free(pfn_t pfn) |
||
954 | { |
||
955 | ipl_t ipl; |
||
956 | zone_t *zone; |
||
957 | |||
958 | ipl = interrupts_disable(); |
||
959 | |||
960 | /* |
||
961 | * First, find host frame zone for addr. |
||
962 | */ |
||
963 | zone = find_zone_and_lock(pfn,NULL); |
||
964 | ASSERT(zone); |
||
965 | |||
966 | zone_frame_free(zone, pfn-zone->base); |
||
967 | |||
968 | spinlock_unlock(&zone->lock); |
||
969 | interrupts_restore(ipl); |
||
970 | } |
||
971 | |||
1236 | jermar | 972 | /** Add reference to frame. |
973 | * |
||
974 | * Find respective frame structure for supplied PFN and |
||
975 | * increment frame reference count. |
||
976 | * |
||
977 | * @param frame Frame no to be freed. |
||
978 | */ |
||
979 | void frame_reference_add(pfn_t pfn) |
||
980 | { |
||
981 | ipl_t ipl; |
||
982 | zone_t *zone; |
||
983 | frame_t *frame; |
||
814 | palkovsky | 984 | |
1236 | jermar | 985 | ipl = interrupts_disable(); |
986 | |||
987 | /* |
||
988 | * First, find host frame zone for addr. |
||
989 | */ |
||
990 | zone = find_zone_and_lock(pfn,NULL); |
||
991 | ASSERT(zone); |
||
992 | |||
993 | frame = &zone->frames[pfn-zone->base]; |
||
994 | frame->refcount++; |
||
995 | |||
996 | spinlock_unlock(&zone->lock); |
||
997 | interrupts_restore(ipl); |
||
998 | } |
||
814 | palkovsky | 999 | |
1000 | /** Mark given range unavailable in frame zones */ |
||
820 | jermar | 1001 | void frame_mark_unavailable(pfn_t start, count_t count) |
814 | palkovsky | 1002 | { |
1003 | int i; |
||
1004 | zone_t *zone; |
||
1005 | int prefzone = 0; |
||
852 | palkovsky | 1006 | |
822 | palkovsky | 1007 | for (i=0; i < count; i++) { |
814 | palkovsky | 1008 | zone = find_zone_and_lock(start+i,&prefzone); |
1009 | if (!zone) /* PFN not found */ |
||
1010 | continue; |
||
1011 | zone_mark_unavailable(zone, start+i-zone->base); |
||
1012 | |||
1013 | spinlock_unlock(&zone->lock); |
||
1014 | } |
||
1015 | } |
||
1016 | |||
1017 | /** Initialize physical memory management |
||
1018 | * |
||
1019 | * Initialize physical memory managemnt. |
||
1020 | */ |
||
1021 | void frame_init(void) |
||
1022 | { |
||
1023 | if (config.cpu_active == 1) { |
||
1024 | zones.count = 0; |
||
1025 | spinlock_initialize(&zones.lock,"zones_glob_lock"); |
||
1026 | } |
||
1027 | /* Tell the architecture to create some memory */ |
||
1028 | frame_arch_init(); |
||
1029 | if (config.cpu_active == 1) { |
||
852 | palkovsky | 1030 | pfn_t firstframe = ADDR2PFN(KA2PA(config.base)); |
1031 | pfn_t lastframe = ADDR2PFN(KA2PA(config.base+config.kernel_size)); |
||
1032 | frame_mark_unavailable(firstframe,lastframe-firstframe+1); |
||
1037 | decky | 1033 | |
1034 | count_t i; |
||
1035 | for (i = 0; i < init.cnt; i++) |
||
1036 | frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size)); |
||
814 | palkovsky | 1037 | } |
1038 | } |
||
1039 | |||
1040 | |||
1041 | |||
677 | bondari | 1042 | /** Prints list of zones |
1043 | * |
||
1044 | */ |
||
676 | bondari | 1045 | void zone_print_list(void) { |
1046 | zone_t *zone = NULL; |
||
814 | palkovsky | 1047 | int i; |
701 | jermar | 1048 | ipl_t ipl; |
1049 | |||
1050 | ipl = interrupts_disable(); |
||
814 | palkovsky | 1051 | spinlock_lock(&zones.lock); |
822 | palkovsky | 1052 | printf("# Base address\tFree Frames\tBusy Frames\n"); |
1053 | printf(" ------------\t-----------\t-----------\n"); |
||
1037 | decky | 1054 | for (i = 0; i < zones.count; i++) { |
814 | palkovsky | 1055 | zone = zones.info[i]; |
683 | bondari | 1056 | spinlock_lock(&zone->lock); |
1224 | cejka | 1057 | printf("%d: %.*p \t%10zd\t%10zd\n", i, sizeof(__address) * 2, PFN2ADDR(zone->base), zone->free_count, zone->busy_count); |
701 | jermar | 1058 | spinlock_unlock(&zone->lock); |
676 | bondari | 1059 | } |
814 | palkovsky | 1060 | spinlock_unlock(&zones.lock); |
701 | jermar | 1061 | interrupts_restore(ipl); |
676 | bondari | 1062 | } |
1063 | |||
677 | bondari | 1064 | /** Prints zone details |
1065 | * |
||
822 | palkovsky | 1066 | * @param base Zone base address OR zone number |
677 | bondari | 1067 | */ |
822 | palkovsky | 1068 | void zone_print_one(int num) { |
814 | palkovsky | 1069 | zone_t *zone = NULL; |
701 | jermar | 1070 | ipl_t ipl; |
822 | palkovsky | 1071 | int i; |
701 | jermar | 1072 | |
1073 | ipl = interrupts_disable(); |
||
814 | palkovsky | 1074 | spinlock_lock(&zones.lock); |
822 | palkovsky | 1075 | |
1037 | decky | 1076 | for (i = 0; i < zones.count; i++) { |
824 | palkovsky | 1077 | if (i == num || PFN2ADDR(zones.info[i]->base) == num) { |
822 | palkovsky | 1078 | zone = zones.info[i]; |
1079 | break; |
||
1080 | } |
||
676 | bondari | 1081 | } |
822 | palkovsky | 1082 | if (!zone) { |
1083 | printf("Zone not found.\n"); |
||
1084 | goto out; |
||
1085 | } |
||
676 | bondari | 1086 | |
683 | bondari | 1087 | spinlock_lock(&zone->lock); |
822 | palkovsky | 1088 | printf("Memory zone information\n"); |
1224 | cejka | 1089 | printf("Zone base address: %#.*p\n", sizeof(__address) * 2, PFN2ADDR(zone->base)); |
1196 | cejka | 1090 | printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); |
1091 | printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
||
1092 | printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
||
686 | bondari | 1093 | buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
683 | bondari | 1094 | |
1095 | spinlock_unlock(&zone->lock); |
||
822 | palkovsky | 1096 | out: |
814 | palkovsky | 1097 | spinlock_unlock(&zones.lock); |
701 | jermar | 1098 | interrupts_restore(ipl); |
676 | bondari | 1099 | } |
1100 |