Rev 3206 | Rev 3208 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1 | jermar | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2001-2005 Jakub Jermar |
3 | * Copyright (c) 2005 Sergey Bondari |
||
1 | jermar | 4 | * All rights reserved. |
5 | * |
||
6 | * Redistribution and use in source and binary forms, with or without |
||
7 | * modification, are permitted provided that the following conditions |
||
8 | * are met: |
||
9 | * |
||
10 | * - Redistributions of source code must retain the above copyright |
||
11 | * notice, this list of conditions and the following disclaimer. |
||
12 | * - Redistributions in binary form must reproduce the above copyright |
||
13 | * notice, this list of conditions and the following disclaimer in the |
||
14 | * documentation and/or other materials provided with the distribution. |
||
15 | * - The name of the author may not be used to endorse or promote products |
||
16 | * derived from this software without specific prior written permission. |
||
17 | * |
||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
28 | */ |
||
29 | |||
1757 | jermar | 30 | /** @addtogroup genericmm |
1702 | cejka | 31 | * @{ |
32 | */ |
||
33 | |||
1248 | jermar | 34 | /** |
1702 | cejka | 35 | * @file |
1248 | jermar | 36 | * @brief Physical frame allocator. |
37 | * |
||
38 | * This file contains the physical frame allocator and memory zone management. |
||
39 | * The frame allocator is built on top of the buddy allocator. |
||
40 | * |
||
41 | * @see buddy.c |
||
42 | */ |
||
43 | |||
814 | palkovsky | 44 | /* |
45 | * Locking order |
||
46 | * |
||
47 | * In order to access particular zone, the process must first lock |
||
48 | * the zones.lock, then lock the zone and then unlock the zones.lock. |
||
49 | * This insures, that we can fiddle with the zones in runtime without |
||
50 | * affecting the processes. |
||
51 | * |
||
52 | */ |
||
53 | |||
1 | jermar | 54 | #include <arch/types.h> |
55 | #include <mm/frame.h> |
||
703 | jermar | 56 | #include <mm/as.h> |
1 | jermar | 57 | #include <panic.h> |
367 | jermar | 58 | #include <debug.h> |
788 | jermar | 59 | #include <adt/list.h> |
1 | jermar | 60 | #include <synch/spinlock.h> |
3187 | jermar | 61 | #include <synch/mutex.h> |
62 | #include <synch/condvar.h> |
||
115 | jermar | 63 | #include <arch/asm.h> |
195 | vana | 64 | #include <arch.h> |
533 | bondari | 65 | #include <print.h> |
536 | bondari | 66 | #include <align.h> |
814 | palkovsky | 67 | #include <mm/slab.h> |
822 | palkovsky | 68 | #include <bitops.h> |
1063 | palkovsky | 69 | #include <macros.h> |
2128 | jermar | 70 | #include <config.h> |
115 | jermar | 71 | |
814 | palkovsky | 72 | typedef struct { |
73 | count_t refcount; /**< tracking of shared frames */ |
||
1780 | jermar | 74 | uint8_t buddy_order; /**< buddy system block order */ |
2133 | jermar | 75 | link_t buddy_link; /**< link to the next free block inside one |
76 | order */ |
||
814 | palkovsky | 77 | void *parent; /**< If allocated by slab, this points there */ |
1236 | jermar | 78 | } frame_t; |
367 | jermar | 79 | |
814 | palkovsky | 80 | typedef struct { |
81 | SPINLOCK_DECLARE(lock); /**< this lock protects everything below */ |
||
2133 | jermar | 82 | pfn_t base; /**< frame_no of the first frame in the frames |
83 | array */ |
||
820 | jermar | 84 | count_t count; /**< Size of zone */ |
533 | bondari | 85 | |
2133 | jermar | 86 | frame_t *frames; /**< array of frame_t structures in this |
87 | zone */ |
||
814 | palkovsky | 88 | count_t free_count; /**< number of free frame_t structures */ |
89 | count_t busy_count; /**< number of busy frame_t structures */ |
||
90 | |||
2059 | jermar | 91 | buddy_system_t *buddy_system; /**< buddy system for the zone */ |
814 | palkovsky | 92 | int flags; |
1236 | jermar | 93 | } zone_t; |
479 | bondari | 94 | |
814 | palkovsky | 95 | /* |
96 | * The zoneinfo.lock must be locked when accessing zoneinfo structure. |
||
97 | * Some of the attributes in zone_t structures are 'read-only' |
||
368 | jermar | 98 | */ |
1 | jermar | 99 | |
2121 | decky | 100 | typedef struct { |
814 | palkovsky | 101 | SPINLOCK_DECLARE(lock); |
2122 | decky | 102 | unsigned int count; |
814 | palkovsky | 103 | zone_t *info[ZONES_MAX]; |
2121 | decky | 104 | } zones_t; |
1 | jermar | 105 | |
2121 | decky | 106 | static zones_t zones; |
814 | palkovsky | 107 | |
3187 | jermar | 108 | /* |
109 | * Synchronization primitives used to sleep when there is no memory |
||
110 | * available. |
||
111 | */ |
||
3188 | jermar | 112 | mutex_t mem_avail_mtx; |
113 | condvar_t mem_avail_cv; |
||
114 | unsigned long mem_avail_frames = 0; /**< Number of available frames. */ |
||
115 | unsigned long mem_avail_gen = 0; /**< Generation counter. */ |
||
2121 | decky | 116 | |
2725 | decky | 117 | /********************/ |
814 | palkovsky | 118 | /* Helper functions */ |
2725 | decky | 119 | /********************/ |
120 | |||
814 | palkovsky | 121 | static inline index_t frame_index(zone_t *zone, frame_t *frame) |
762 | palkovsky | 122 | { |
2725 | decky | 123 | return (index_t) (frame - zone->frames); |
762 | palkovsky | 124 | } |
2725 | decky | 125 | |
814 | palkovsky | 126 | static inline index_t frame_index_abs(zone_t *zone, frame_t *frame) |
1 | jermar | 127 | { |
2725 | decky | 128 | return (index_t) (frame - zone->frames) + zone->base; |
1 | jermar | 129 | } |
2725 | decky | 130 | |
814 | palkovsky | 131 | static inline int frame_index_valid(zone_t *zone, index_t index) |
132 | { |
||
2745 | decky | 133 | return (index < zone->count); |
814 | palkovsky | 134 | } |
1 | jermar | 135 | |
814 | palkovsky | 136 | /** Compute pfn_t from frame_t pointer & zone pointer */ |
820 | jermar | 137 | static index_t make_frame_index(zone_t *zone, frame_t *frame) |
762 | palkovsky | 138 | { |
2725 | decky | 139 | return (frame - zone->frames); |
762 | palkovsky | 140 | } |
141 | |||
3185 | jermar | 142 | /** Initialize frame structure. |
762 | palkovsky | 143 | * |
3188 | jermar | 144 | * @param frame Frame structure to be initialized. |
762 | palkovsky | 145 | */ |
814 | palkovsky | 146 | static void frame_initialize(frame_t *frame) |
762 | palkovsky | 147 | { |
814 | palkovsky | 148 | frame->refcount = 1; |
149 | frame->buddy_order = 0; |
||
762 | palkovsky | 150 | } |
151 | |||
2725 | decky | 152 | /**********************/ |
814 | palkovsky | 153 | /* Zoneinfo functions */ |
2725 | decky | 154 | /**********************/ |
762 | palkovsky | 155 | |
3185 | jermar | 156 | /** Insert-sort zone into zones list. |
822 | palkovsky | 157 | * |
3185 | jermar | 158 | * @param newzone New zone to be inserted into zone list. |
159 | * @return Zone number on success, -1 on error. |
||
1 | jermar | 160 | */ |
822 | palkovsky | 161 | static int zones_add_zone(zone_t *newzone) |
1 | jermar | 162 | { |
2122 | decky | 163 | unsigned int i, j; |
822 | palkovsky | 164 | ipl_t ipl; |
165 | zone_t *z; |
||
762 | palkovsky | 166 | |
822 | palkovsky | 167 | ipl = interrupts_disable(); |
814 | palkovsky | 168 | spinlock_lock(&zones.lock); |
2725 | decky | 169 | |
814 | palkovsky | 170 | /* Try to merge */ |
2725 | decky | 171 | if (zones.count + 1 == ZONES_MAX) { |
172 | printf("Maximum zone count %u exceeded!\n", ZONES_MAX); |
||
173 | spinlock_unlock(&zones.lock); |
||
174 | interrupts_restore(ipl); |
||
175 | return -1; |
||
176 | } |
||
177 | |||
1037 | decky | 178 | for (i = 0; i < zones.count; i++) { |
822 | palkovsky | 179 | /* Check for overflow */ |
852 | palkovsky | 180 | z = zones.info[i]; |
3185 | jermar | 181 | if (overlaps(newzone->base, newzone->count, z->base, |
182 | z->count)) { |
||
822 | palkovsky | 183 | printf("Zones overlap!\n"); |
184 | return -1; |
||
814 | palkovsky | 185 | } |
852 | palkovsky | 186 | if (newzone->base < z->base) |
822 | palkovsky | 187 | break; |
368 | jermar | 188 | } |
2725 | decky | 189 | |
822 | palkovsky | 190 | /* Move other zones up */ |
2133 | jermar | 191 | for (j = i; j < zones.count; j++) |
1037 | decky | 192 | zones.info[j + 1] = zones.info[j]; |
2725 | decky | 193 | |
822 | palkovsky | 194 | zones.info[i] = newzone; |
195 | zones.count++; |
||
2725 | decky | 196 | |
814 | palkovsky | 197 | spinlock_unlock(&zones.lock); |
822 | palkovsky | 198 | interrupts_restore(ipl); |
199 | |||
200 | return i; |
||
1 | jermar | 201 | } |
202 | |||
3207 | jermar | 203 | /** Try to find a zone where can we find the frame. |
2725 | decky | 204 | * |
2059 | jermar | 205 | * Assume interrupts are disabled. |
2725 | decky | 206 | * |
3185 | jermar | 207 | * @param frame Frame number contained in zone. |
208 | * @param pzone If not null, it is used as zone hint. Zone index is |
||
209 | * filled into the variable on success. |
||
210 | * @return Pointer to locked zone containing frame. |
||
368 | jermar | 211 | */ |
3185 | jermar | 212 | static zone_t *find_zone_and_lock(pfn_t frame, unsigned int *pzone) |
1 | jermar | 213 | { |
2122 | decky | 214 | unsigned int i; |
215 | unsigned int hint = pzone ? *pzone : 0; |
||
814 | palkovsky | 216 | zone_t *z; |
533 | bondari | 217 | |
814 | palkovsky | 218 | spinlock_lock(&zones.lock); |
219 | |||
2745 | decky | 220 | if (hint >= zones.count) |
814 | palkovsky | 221 | hint = 0; |
533 | bondari | 222 | |
814 | palkovsky | 223 | i = hint; |
224 | do { |
||
225 | z = zones.info[i]; |
||
226 | spinlock_lock(&z->lock); |
||
227 | if (z->base <= frame && z->base + z->count > frame) { |
||
2133 | jermar | 228 | /* Unlock the global lock */ |
229 | spinlock_unlock(&zones.lock); |
||
814 | palkovsky | 230 | if (pzone) |
231 | *pzone = i; |
||
232 | return z; |
||
533 | bondari | 233 | } |
814 | palkovsky | 234 | spinlock_unlock(&z->lock); |
533 | bondari | 235 | |
814 | palkovsky | 236 | i++; |
237 | if (i >= zones.count) |
||
238 | i = 0; |
||
3207 | jermar | 239 | } while (i != hint); |
814 | palkovsky | 240 | |
241 | spinlock_unlock(&zones.lock); |
||
242 | return NULL; |
||
533 | bondari | 243 | } |
244 | |||
822 | palkovsky | 245 | /** @return True if zone can allocate specified order */ |
1780 | jermar | 246 | static int zone_can_alloc(zone_t *z, uint8_t order) |
822 | palkovsky | 247 | { |
248 | return buddy_system_can_alloc(z->buddy_system, order); |
||
249 | } |
||
250 | |||
2059 | jermar | 251 | /** Find and lock zone that can allocate order frames. |
367 | jermar | 252 | * |
2059 | jermar | 253 | * Assume interrupts are disabled. |
367 | jermar | 254 | * |
3185 | jermar | 255 | * @param order Size (2^order) of free space we are trying to find. |
3207 | jermar | 256 | * @param flags Required flags of the target zone. |
3185 | jermar | 257 | * @param pzone Pointer to preferred zone or NULL, on return contains |
258 | * zone number. |
||
367 | jermar | 259 | */ |
3207 | jermar | 260 | static zone_t * |
261 | find_free_zone_and_lock(uint8_t order, int flags, unsigned int *pzone) |
||
367 | jermar | 262 | { |
2122 | decky | 263 | unsigned int i; |
367 | jermar | 264 | zone_t *z; |
2122 | decky | 265 | unsigned int hint = pzone ? *pzone : 0; |
367 | jermar | 266 | |
3207 | jermar | 267 | /* Mask off flags that are not applicable. */ |
268 | flags &= FRAME_LOW_16_GiB; |
||
269 | |||
814 | palkovsky | 270 | spinlock_lock(&zones.lock); |
271 | if (hint >= zones.count) |
||
272 | hint = 0; |
||
273 | i = hint; |
||
274 | do { |
||
275 | z = zones.info[i]; |
||
724 | palkovsky | 276 | |
814 | palkovsky | 277 | spinlock_lock(&z->lock); |
367 | jermar | 278 | |
3207 | jermar | 279 | /* |
280 | * Check whether the zone meets the search criteria. |
||
281 | */ |
||
282 | if ((z->flags & flags) == flags) { |
||
283 | /* |
||
284 | * Check if the zone has 2^order frames area available. |
||
285 | */ |
||
286 | if (zone_can_alloc(z, order)) { |
||
287 | spinlock_unlock(&zones.lock); |
||
288 | if (pzone) |
||
289 | *pzone = i; |
||
290 | return z; |
||
291 | } |
||
367 | jermar | 292 | } |
814 | palkovsky | 293 | spinlock_unlock(&z->lock); |
294 | if (++i >= zones.count) |
||
295 | i = 0; |
||
3207 | jermar | 296 | } while (i != hint); |
814 | palkovsky | 297 | spinlock_unlock(&zones.lock); |
298 | return NULL; |
||
367 | jermar | 299 | } |
300 | |||
2059 | jermar | 301 | /**************************/ |
814 | palkovsky | 302 | /* Buddy system functions */ |
2059 | jermar | 303 | /**************************/ |
814 | palkovsky | 304 | |
3185 | jermar | 305 | /** Buddy system find_block implementation. |
367 | jermar | 306 | * |
814 | palkovsky | 307 | * Find block that is parent of current list. |
308 | * That means go to lower addresses, until such block is found |
||
367 | jermar | 309 | * |
3185 | jermar | 310 | * @param order Order of parent must be different then this |
311 | * parameter!! |
||
367 | jermar | 312 | */ |
814 | palkovsky | 313 | static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child, |
2133 | jermar | 314 | uint8_t order) |
367 | jermar | 315 | { |
2133 | jermar | 316 | frame_t *frame; |
317 | zone_t *zone; |
||
814 | palkovsky | 318 | index_t index; |
367 | jermar | 319 | |
814 | palkovsky | 320 | frame = list_get_instance(child, frame_t, buddy_link); |
321 | zone = (zone_t *) b->data; |
||
367 | jermar | 322 | |
814 | palkovsky | 323 | index = frame_index(zone, frame); |
324 | do { |
||
325 | if (zone->frames[index].buddy_order != order) { |
||
326 | return &zone->frames[index].buddy_link; |
||
327 | } |
||
328 | } while(index-- > 0); |
||
329 | return NULL; |
||
367 | jermar | 330 | } |
479 | bondari | 331 | |
3185 | jermar | 332 | /** Buddy system find_buddy implementation. |
489 | jermar | 333 | * |
3185 | jermar | 334 | * @param b Buddy system. |
335 | * @param block Block for which buddy should be found. |
||
479 | bondari | 336 | * |
3185 | jermar | 337 | * @return Buddy for given block if found. |
479 | bondari | 338 | */ |
2133 | jermar | 339 | static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block) |
814 | palkovsky | 340 | { |
2133 | jermar | 341 | frame_t *frame; |
342 | zone_t *zone; |
||
564 | jermar | 343 | index_t index; |
480 | bondari | 344 | bool is_left, is_right; |
479 | bondari | 345 | |
480 | bondari | 346 | frame = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 347 | zone = (zone_t *) b->data; |
2133 | jermar | 348 | ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), |
349 | frame->buddy_order)); |
||
480 | bondari | 350 | |
724 | palkovsky | 351 | is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame); |
352 | is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame); |
||
814 | palkovsky | 353 | |
564 | jermar | 354 | ASSERT(is_left ^ is_right); |
533 | bondari | 355 | if (is_left) { |
3185 | jermar | 356 | index = (frame_index(zone, frame)) + |
357 | (1 << frame->buddy_order); |
||
1981 | jermar | 358 | } else { /* if (is_right) */ |
3185 | jermar | 359 | index = (frame_index(zone, frame)) - |
360 | (1 << frame->buddy_order); |
||
533 | bondari | 361 | } |
362 | |||
814 | palkovsky | 363 | if (frame_index_valid(zone, index)) { |
364 | if (zone->frames[index].buddy_order == frame->buddy_order && |
||
365 | zone->frames[index].refcount == 0) { |
||
533 | bondari | 366 | return &zone->frames[index].buddy_link; |
480 | bondari | 367 | } |
368 | } |
||
814 | palkovsky | 369 | |
539 | jermar | 370 | return NULL; |
479 | bondari | 371 | } |
372 | |||
3185 | jermar | 373 | /** Buddy system bisect implementation. |
479 | bondari | 374 | * |
3185 | jermar | 375 | * @param b Buddy system. |
376 | * @param block Block to bisect. |
||
480 | bondari | 377 | * |
3185 | jermar | 378 | * @return Right block. |
479 | bondari | 379 | */ |
3185 | jermar | 380 | static link_t *zone_buddy_bisect(buddy_system_t *b, link_t *block) |
381 | { |
||
2133 | jermar | 382 | frame_t *frame_l, *frame_r; |
564 | jermar | 383 | |
480 | bondari | 384 | frame_l = list_get_instance(block, frame_t, buddy_link); |
533 | bondari | 385 | frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
564 | jermar | 386 | |
480 | bondari | 387 | return &frame_r->buddy_link; |
479 | bondari | 388 | } |
389 | |||
3185 | jermar | 390 | /** Buddy system coalesce implementation. |
479 | bondari | 391 | * |
3185 | jermar | 392 | * @param b Buddy system. |
393 | * @param block_1 First block. |
||
394 | * @param block_2 First block's buddy. |
||
480 | bondari | 395 | * |
3185 | jermar | 396 | * @return Coalesced block (actually block that represents lower |
397 | * address). |
||
479 | bondari | 398 | */ |
2133 | jermar | 399 | static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1, |
400 | link_t *block_2) |
||
822 | palkovsky | 401 | { |
814 | palkovsky | 402 | frame_t *frame1, *frame2; |
564 | jermar | 403 | |
480 | bondari | 404 | frame1 = list_get_instance(block_1, frame_t, buddy_link); |
405 | frame2 = list_get_instance(block_2, frame_t, buddy_link); |
||
564 | jermar | 406 | |
533 | bondari | 407 | return frame1 < frame2 ? block_1 : block_2; |
479 | bondari | 408 | } |
409 | |||
3185 | jermar | 410 | /** Buddy system set_order implementation. |
489 | jermar | 411 | * |
3185 | jermar | 412 | * @param b Buddy system. |
413 | * @param block Buddy system block. |
||
414 | * @param order Order to set. |
||
479 | bondari | 415 | */ |
2133 | jermar | 416 | static void zone_buddy_set_order(buddy_system_t *b, link_t *block, |
3185 | jermar | 417 | uint8_t order) |
418 | { |
||
2133 | jermar | 419 | frame_t *frame; |
480 | bondari | 420 | frame = list_get_instance(block, frame_t, buddy_link); |
421 | frame->buddy_order = order; |
||
479 | bondari | 422 | } |
423 | |||
3185 | jermar | 424 | /** Buddy system get_order implementation. |
489 | jermar | 425 | * |
3185 | jermar | 426 | * @param b Buddy system. |
427 | * @param block Buddy system block. |
||
479 | bondari | 428 | * |
3185 | jermar | 429 | * @return Order of block. |
479 | bondari | 430 | */ |
3185 | jermar | 431 | static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) |
432 | { |
||
2133 | jermar | 433 | frame_t *frame; |
480 | bondari | 434 | frame = list_get_instance(block, frame_t, buddy_link); |
435 | return frame->buddy_order; |
||
479 | bondari | 436 | } |
533 | bondari | 437 | |
3185 | jermar | 438 | /** Buddy system mark_busy implementation. |
533 | bondari | 439 | * |
3185 | jermar | 440 | * @param b Buddy system. |
441 | * @param block Buddy system block. |
||
533 | bondari | 442 | */ |
3185 | jermar | 443 | static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) |
444 | { |
||
533 | bondari | 445 | frame_t * frame; |
822 | palkovsky | 446 | |
533 | bondari | 447 | frame = list_get_instance(block, frame_t, buddy_link); |
448 | frame->refcount = 1; |
||
449 | } |
||
676 | bondari | 450 | |
3185 | jermar | 451 | /** Buddy system mark_available implementation. |
814 | palkovsky | 452 | * |
3185 | jermar | 453 | * @param b Buddy system. |
454 | * @param block Buddy system block. |
||
814 | palkovsky | 455 | */ |
3185 | jermar | 456 | static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) |
457 | { |
||
2133 | jermar | 458 | frame_t *frame; |
814 | palkovsky | 459 | frame = list_get_instance(block, frame_t, buddy_link); |
460 | frame->refcount = 0; |
||
461 | } |
||
462 | |||
2083 | decky | 463 | static buddy_system_operations_t zone_buddy_system_operations = { |
814 | palkovsky | 464 | .find_buddy = zone_buddy_find_buddy, |
465 | .bisect = zone_buddy_bisect, |
||
466 | .coalesce = zone_buddy_coalesce, |
||
467 | .set_order = zone_buddy_set_order, |
||
468 | .get_order = zone_buddy_get_order, |
||
469 | .mark_busy = zone_buddy_mark_busy, |
||
470 | .mark_available = zone_buddy_mark_available, |
||
3206 | jermar | 471 | .find_block = zone_buddy_find_block |
814 | palkovsky | 472 | }; |
473 | |||
2059 | jermar | 474 | /******************/ |
814 | palkovsky | 475 | /* Zone functions */ |
2059 | jermar | 476 | /******************/ |
814 | palkovsky | 477 | |
3185 | jermar | 478 | /** Allocate frame in particular zone. |
814 | palkovsky | 479 | * |
3185 | jermar | 480 | * Assume zone is locked. |
1269 | decky | 481 | * Panics if allocation is impossible. |
814 | palkovsky | 482 | * |
3185 | jermar | 483 | * @param zone Zone to allocate from. |
484 | * @param order Allocate exactly 2^order frames. |
||
1269 | decky | 485 | * |
3185 | jermar | 486 | * @return Frame index in zone. |
1269 | decky | 487 | * |
814 | palkovsky | 488 | */ |
1780 | jermar | 489 | static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) |
814 | palkovsky | 490 | { |
491 | pfn_t v; |
||
492 | link_t *tmp; |
||
493 | frame_t *frame; |
||
494 | |||
495 | /* Allocate frames from zone buddy system */ |
||
496 | tmp = buddy_system_alloc(zone->buddy_system, order); |
||
497 | |||
498 | ASSERT(tmp); |
||
499 | |||
500 | /* Update zone information. */ |
||
501 | zone->free_count -= (1 << order); |
||
502 | zone->busy_count += (1 << order); |
||
503 | |||
504 | /* Frame will be actually a first frame of the block. */ |
||
505 | frame = list_get_instance(tmp, frame_t, buddy_link); |
||
506 | |||
507 | /* get frame address */ |
||
508 | v = make_frame_index(zone, frame); |
||
509 | return v; |
||
510 | } |
||
511 | |||
3185 | jermar | 512 | /** Free frame from zone. |
814 | palkovsky | 513 | * |
3185 | jermar | 514 | * Assume zone is locked. |
1568 | palkovsky | 515 | * |
3185 | jermar | 516 | * @param zone Pointer to zone from which the frame is to be freed. |
517 | * @param frame_idx Frame index relative to zone. |
||
814 | palkovsky | 518 | */ |
820 | jermar | 519 | static void zone_frame_free(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 520 | { |
521 | frame_t *frame; |
||
1780 | jermar | 522 | uint8_t order; |
814 | palkovsky | 523 | |
524 | frame = &zone->frames[frame_idx]; |
||
525 | |||
526 | /* remember frame order */ |
||
527 | order = frame->buddy_order; |
||
528 | |||
529 | ASSERT(frame->refcount); |
||
530 | |||
531 | if (!--frame->refcount) { |
||
532 | buddy_system_free(zone->buddy_system, &frame->buddy_link); |
||
946 | jermar | 533 | |
534 | /* Update zone information. */ |
||
535 | zone->free_count += (1 << order); |
||
536 | zone->busy_count -= (1 << order); |
||
814 | palkovsky | 537 | } |
538 | } |
||
539 | |||
3185 | jermar | 540 | /** Return frame from zone. */ |
3188 | jermar | 541 | static frame_t *zone_get_frame(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 542 | { |
543 | ASSERT(frame_idx < zone->count); |
||
544 | return &zone->frames[frame_idx]; |
||
545 | } |
||
546 | |||
3185 | jermar | 547 | /** Mark frame in zone unavailable to allocation. */ |
820 | jermar | 548 | static void zone_mark_unavailable(zone_t *zone, index_t frame_idx) |
814 | palkovsky | 549 | { |
550 | frame_t *frame; |
||
551 | link_t *link; |
||
552 | |||
553 | frame = zone_get_frame(zone, frame_idx); |
||
822 | palkovsky | 554 | if (frame->refcount) |
555 | return; |
||
2133 | jermar | 556 | link = buddy_system_alloc_block(zone->buddy_system, |
557 | &frame->buddy_link); |
||
814 | palkovsky | 558 | ASSERT(link); |
559 | zone->free_count--; |
||
3188 | jermar | 560 | |
561 | mutex_lock(&mem_avail_mtx); |
||
562 | mem_avail_frames--; |
||
563 | mutex_unlock(&mem_avail_mtx); |
||
814 | palkovsky | 564 | } |
565 | |||
3185 | jermar | 566 | /** Join two zones. |
822 | palkovsky | 567 | * |
3185 | jermar | 568 | * Expect zone_t *z to point to space at least zone_conf_size large. |
822 | palkovsky | 569 | * |
3185 | jermar | 570 | * Assume z1 & z2 are locked. |
1568 | palkovsky | 571 | * |
3185 | jermar | 572 | * @param z Target zone structure pointer. |
573 | * @param z1 Zone to merge. |
||
574 | * @param z2 Zone to merge. |
||
822 | palkovsky | 575 | */ |
576 | static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2) |
||
577 | { |
||
1780 | jermar | 578 | uint8_t max_order; |
2122 | decky | 579 | unsigned int i; |
580 | int z2idx; |
||
822 | palkovsky | 581 | pfn_t frame_idx; |
582 | frame_t *frame; |
||
583 | |||
2133 | jermar | 584 | ASSERT(!overlaps(z1->base, z1->count, z2->base, z2->count)); |
822 | palkovsky | 585 | ASSERT(z1->base < z2->base); |
586 | |||
587 | spinlock_initialize(&z->lock, "zone_lock"); |
||
588 | z->base = z1->base; |
||
2133 | jermar | 589 | z->count = z2->base + z2->count - z1->base; |
822 | palkovsky | 590 | z->flags = z1->flags & z2->flags; |
591 | |||
592 | z->free_count = z1->free_count + z2->free_count; |
||
593 | z->busy_count = z1->busy_count + z2->busy_count; |
||
594 | |||
595 | max_order = fnzb(z->count); |
||
596 | |||
2133 | jermar | 597 | z->buddy_system = (buddy_system_t *) &z[1]; |
598 | buddy_system_create(z->buddy_system, max_order, |
||
599 | &zone_buddy_system_operations, (void *) z); |
||
822 | palkovsky | 600 | |
2133 | jermar | 601 | z->frames = (frame_t *)((uint8_t *) z->buddy_system + |
602 | buddy_conf_size(max_order)); |
||
822 | palkovsky | 603 | for (i = 0; i < z->count; i++) { |
604 | /* This marks all frames busy */ |
||
605 | frame_initialize(&z->frames[i]); |
||
606 | } |
||
607 | /* Copy frames from both zones to preserve full frame orders, |
||
1093 | palkovsky | 608 | * parents etc. Set all free frames with refcount=0 to 1, because |
822 | palkovsky | 609 | * we add all free frames to buddy allocator later again, clear |
1093 | palkovsky | 610 | * order to 0. Don't set busy frames with refcount=0, as they |
611 | * will not be reallocated during merge and it would make later |
||
612 | * problems with allocation/free. |
||
822 | palkovsky | 613 | */ |
2122 | decky | 614 | for (i = 0; i < z1->count; i++) |
822 | palkovsky | 615 | z->frames[i] = z1->frames[i]; |
2122 | decky | 616 | for (i = 0; i < z2->count; i++) { |
822 | palkovsky | 617 | z2idx = i + (z2->base - z1->base); |
618 | z->frames[z2idx] = z2->frames[i]; |
||
619 | } |
||
1093 | palkovsky | 620 | i = 0; |
621 | while (i < z->count) { |
||
622 | if (z->frames[i].refcount) { |
||
623 | /* skip busy frames */ |
||
624 | i += 1 << z->frames[i].buddy_order; |
||
625 | } else { /* Free frames, set refcount=1 */ |
||
626 | /* All free frames have refcount=0, we need not |
||
627 | * to check the order */ |
||
822 | palkovsky | 628 | z->frames[i].refcount = 1; |
629 | z->frames[i].buddy_order = 0; |
||
1093 | palkovsky | 630 | i++; |
822 | palkovsky | 631 | } |
632 | } |
||
633 | /* Add free blocks from the 2 original zones */ |
||
634 | while (zone_can_alloc(z1, 0)) { |
||
635 | frame_idx = zone_frame_alloc(z1, 0); |
||
636 | frame = &z->frames[frame_idx]; |
||
637 | frame->refcount = 0; |
||
638 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
639 | } |
||
640 | while (zone_can_alloc(z2, 0)) { |
||
641 | frame_idx = zone_frame_alloc(z2, 0); |
||
2133 | jermar | 642 | frame = &z->frames[frame_idx + (z2->base - z1->base)]; |
822 | palkovsky | 643 | frame->refcount = 0; |
644 | buddy_system_free(z->buddy_system, &frame->buddy_link); |
||
645 | } |
||
646 | } |
||
647 | |||
3185 | jermar | 648 | /** Return old configuration frames into the zone. |
822 | palkovsky | 649 | * |
650 | * We have several cases |
||
651 | * - the conf. data is outside of zone -> exit, shall we call frame_free?? |
||
824 | palkovsky | 652 | * - the conf. data was created by zone_create or |
653 | * updated with reduce_region -> free every frame |
||
654 | * |
||
3185 | jermar | 655 | * @param newzone The actual zone where freeing should occur. |
656 | * @param oldzone Pointer to old zone configuration data that should |
||
657 | * be freed from new zone. |
||
822 | palkovsky | 658 | */ |
659 | static void return_config_frames(zone_t *newzone, zone_t *oldzone) |
||
660 | { |
||
661 | pfn_t pfn; |
||
662 | frame_t *frame; |
||
663 | count_t cframes; |
||
2122 | decky | 664 | unsigned int i; |
822 | palkovsky | 665 | |
1780 | jermar | 666 | pfn = ADDR2PFN((uintptr_t)KA2PA(oldzone)); |
822 | palkovsky | 667 | cframes = SIZE2FRAMES(zone_conf_size(oldzone->count)); |
668 | |||
669 | if (pfn < newzone->base || pfn >= newzone->base + newzone->count) |
||
670 | return; |
||
671 | |||
672 | frame = &newzone->frames[pfn - newzone->base]; |
||
824 | palkovsky | 673 | ASSERT(!frame->buddy_order); |
822 | palkovsky | 674 | |
2122 | decky | 675 | for (i = 0; i < cframes; i++) { |
822 | palkovsky | 676 | newzone->busy_count++; |
677 | zone_frame_free(newzone, pfn+i-newzone->base); |
||
678 | } |
||
679 | } |
||
680 | |||
3185 | jermar | 681 | /** Reduce allocated block to count of order 0 frames. |
824 | palkovsky | 682 | * |
683 | * The allocated block need 2^order frames of space. Reduce all frames |
||
1568 | palkovsky | 684 | * in block to order 0 and free the unneeded frames. This means, that |
685 | * when freeing the previously allocated block starting with frame_idx, |
||
686 | * you have to free every frame. |
||
824 | palkovsky | 687 | * |
688 | * @param zone |
||
3185 | jermar | 689 | * @param frame_idx Index to block. |
690 | * @param count Allocated space in block. |
||
824 | palkovsky | 691 | */ |
692 | static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count) |
||
693 | { |
||
694 | count_t i; |
||
1780 | jermar | 695 | uint8_t order; |
824 | palkovsky | 696 | frame_t *frame; |
697 | |||
2122 | decky | 698 | ASSERT(frame_idx + count < zone->count); |
824 | palkovsky | 699 | |
700 | order = zone->frames[frame_idx].buddy_order; |
||
2122 | decky | 701 | ASSERT((count_t) (1 << order) >= count); |
824 | palkovsky | 702 | |
703 | /* Reduce all blocks to order 0 */ |
||
2122 | decky | 704 | for (i = 0; i < (count_t) (1 << order); i++) { |
824 | palkovsky | 705 | frame = &zone->frames[i + frame_idx]; |
706 | frame->buddy_order = 0; |
||
2133 | jermar | 707 | if (!frame->refcount) |
824 | palkovsky | 708 | frame->refcount = 1; |
709 | ASSERT(frame->refcount == 1); |
||
710 | } |
||
711 | /* Free unneeded frames */ |
||
2122 | decky | 712 | for (i = count; i < (count_t) (1 << order); i++) { |
824 | palkovsky | 713 | zone_frame_free(zone, i + frame_idx); |
714 | } |
||
715 | } |
||
716 | |||
3185 | jermar | 717 | /** Merge zones z1 and z2. |
822 | palkovsky | 718 | * |
719 | * - the zones must be 2 zones with no zone existing in between, |
||
720 | * which means that z2 = z1+1 |
||
721 | * |
||
722 | * - When you create a new zone, the frame allocator configuration does |
||
723 | * not to be 2^order size. Once the allocator is running it is no longer |
||
724 | * possible, merged configuration data occupies more space :-/ |
||
725 | */ |
||
2122 | decky | 726 | void zone_merge(unsigned int z1, unsigned int z2) |
822 | palkovsky | 727 | { |
728 | ipl_t ipl; |
||
729 | zone_t *zone1, *zone2, *newzone; |
||
2122 | decky | 730 | unsigned int cframes; |
1780 | jermar | 731 | uint8_t order; |
2122 | decky | 732 | unsigned int i; |
822 | palkovsky | 733 | pfn_t pfn; |
734 | |||
735 | ipl = interrupts_disable(); |
||
736 | spinlock_lock(&zones.lock); |
||
737 | |||
2745 | decky | 738 | if ((z1 >= zones.count) || (z2 >= zones.count)) |
822 | palkovsky | 739 | goto errout; |
740 | /* We can join only 2 zones with none existing inbetween */ |
||
3188 | jermar | 741 | if (z2 - z1 != 1) |
822 | palkovsky | 742 | goto errout; |
743 | |||
744 | zone1 = zones.info[z1]; |
||
745 | zone2 = zones.info[z2]; |
||
746 | spinlock_lock(&zone1->lock); |
||
747 | spinlock_lock(&zone2->lock); |
||
748 | |||
2133 | jermar | 749 | cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count - |
750 | zone1->base)); |
||
1700 | palkovsky | 751 | if (cframes == 1) |
752 | order = 0; |
||
753 | else |
||
754 | order = fnzb(cframes - 1) + 1; |
||
822 | palkovsky | 755 | |
756 | /* Allocate zonedata inside one of the zones */ |
||
757 | if (zone_can_alloc(zone1, order)) |
||
758 | pfn = zone1->base + zone_frame_alloc(zone1, order); |
||
759 | else if (zone_can_alloc(zone2, order)) |
||
760 | pfn = zone2->base + zone_frame_alloc(zone2, order); |
||
761 | else |
||
762 | goto errout2; |
||
763 | |||
2122 | decky | 764 | newzone = (zone_t *) PA2KA(PFN2ADDR(pfn)); |
822 | palkovsky | 765 | |
766 | _zone_merge(newzone, zone1, zone2); |
||
767 | |||
824 | palkovsky | 768 | /* Free unneeded config frames */ |
769 | zone_reduce_region(newzone, pfn - newzone->base, cframes); |
||
822 | palkovsky | 770 | /* Subtract zone information from busy frames */ |
824 | palkovsky | 771 | newzone->busy_count -= cframes; |
822 | palkovsky | 772 | |
824 | palkovsky | 773 | /* Replace existing zones in zoneinfo list */ |
822 | palkovsky | 774 | zones.info[z1] = newzone; |
1037 | decky | 775 | for (i = z2 + 1; i < zones.count; i++) |
776 | zones.info[i - 1] = zones.info[i]; |
||
822 | palkovsky | 777 | zones.count--; |
778 | |||
779 | /* Free old zone information */ |
||
780 | return_config_frames(newzone, zone1); |
||
781 | return_config_frames(newzone, zone2); |
||
782 | errout2: |
||
783 | /* Nobody is allowed to enter to zone, so we are safe |
||
784 | * to touch the spinlocks last time */ |
||
785 | spinlock_unlock(&zone1->lock); |
||
786 | spinlock_unlock(&zone2->lock); |
||
787 | errout: |
||
788 | spinlock_unlock(&zones.lock); |
||
789 | interrupts_restore(ipl); |
||
790 | } |
||
791 | |||
3185 | jermar | 792 | /** Merge all zones into one big zone. |
822 | palkovsky | 793 | * |
794 | * It is reasonable to do this on systems whose bios reports parts in chunks, |
||
795 | * so that we could have 1 zone (it's faster). |
||
796 | */ |
||
797 | void zone_merge_all(void) |
||
798 | { |
||
799 | int count = zones.count; |
||
800 | |||
801 | while (zones.count > 1 && --count) { |
||
3188 | jermar | 802 | zone_merge(0, 1); |
822 | palkovsky | 803 | break; |
804 | } |
||
805 | } |
||
806 | |||
3185 | jermar | 807 | /** Create new frame zone. |
814 | palkovsky | 808 | * |
3185 | jermar | 809 | * @param start Physical address of the first frame within the zone. |
810 | * @param count Count of frames in zone. |
||
811 | * @param z Address of configuration information of zone. |
||
812 | * @param flags Zone flags. |
||
814 | palkovsky | 813 | * |
3185 | jermar | 814 | * @return Initialized zone. |
814 | palkovsky | 815 | */ |
822 | palkovsky | 816 | static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags) |
814 | palkovsky | 817 | { |
2122 | decky | 818 | unsigned int i; |
1780 | jermar | 819 | uint8_t max_order; |
814 | palkovsky | 820 | |
821 | spinlock_initialize(&z->lock, "zone_lock"); |
||
822 | z->base = start; |
||
823 | z->count = count; |
||
3207 | jermar | 824 | |
825 | /* Mask off flags that are calculated automatically. */ |
||
826 | flags &= ~FRAME_LOW_16_GiB; |
||
827 | /* Determine calculated flags. */ |
||
828 | if (z->base + count < (1ULL << (34 - FRAME_WIDTH))) /* 16 GiB */ |
||
829 | flags |= FRAME_LOW_16_GiB; |
||
830 | |||
814 | palkovsky | 831 | z->flags = flags; |
3207 | jermar | 832 | |
814 | palkovsky | 833 | z->free_count = count; |
834 | z->busy_count = 0; |
||
835 | |||
836 | /* |
||
837 | * Compute order for buddy system, initialize |
||
838 | */ |
||
822 | palkovsky | 839 | max_order = fnzb(count); |
814 | palkovsky | 840 | z->buddy_system = (buddy_system_t *)&z[1]; |
841 | |||
842 | buddy_system_create(z->buddy_system, max_order, |
||
3185 | jermar | 843 | &zone_buddy_system_operations, (void *) z); |
814 | palkovsky | 844 | |
845 | /* Allocate frames _after_ the conframe */ |
||
846 | /* Check sizes */ |
||
2133 | jermar | 847 | z->frames = (frame_t *)((uint8_t *) z->buddy_system + |
848 | buddy_conf_size(max_order)); |
||
2122 | decky | 849 | for (i = 0; i < count; i++) { |
814 | palkovsky | 850 | frame_initialize(&z->frames[i]); |
851 | } |
||
852 | palkovsky | 852 | |
814 | palkovsky | 853 | /* Stuffing frames */ |
854 | for (i = 0; i < count; i++) { |
||
855 | z->frames[i].refcount = 0; |
||
856 | buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
||
857 | } |
||
858 | } |
||
859 | |||
3185 | jermar | 860 | /** Compute configuration data size for zone. |
1568 | palkovsky | 861 | * |
3185 | jermar | 862 | * @param count Size of zone in frames. |
863 | * @return Size of zone configuration info (in bytes). |
||
1568 | palkovsky | 864 | */ |
1780 | jermar | 865 | uintptr_t zone_conf_size(count_t count) |
814 | palkovsky | 866 | { |
3057 | decky | 867 | int size = sizeof(zone_t) + count * sizeof(frame_t); |
814 | palkovsky | 868 | int max_order; |
869 | |||
822 | palkovsky | 870 | max_order = fnzb(count); |
814 | palkovsky | 871 | size += buddy_conf_size(max_order); |
872 | return size; |
||
873 | } |
||
874 | |||
3185 | jermar | 875 | /** Create and add zone to system. |
814 | palkovsky | 876 | * |
3185 | jermar | 877 | * @param start First frame number (absolute). |
878 | * @param count Size of zone in frames. |
||
879 | * @param confframe Where configuration frames are supposed to be. |
||
880 | * Automatically checks, that we will not disturb the |
||
881 | * kernel and possibly init. If confframe is given |
||
882 | * _outside_ this zone, it is expected, that the area is |
||
883 | * already marked BUSY and big enough to contain |
||
884 | * zone_conf_size() amount of data. If the confframe is |
||
885 | * inside the area, the zone free frame information is |
||
886 | * modified not to include it. |
||
822 | palkovsky | 887 | * |
3185 | jermar | 888 | * @return Zone number or -1 on error. |
814 | palkovsky | 889 | */ |
822 | palkovsky | 890 | int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags) |
814 | palkovsky | 891 | { |
892 | zone_t *z; |
||
1780 | jermar | 893 | uintptr_t addr; |
820 | jermar | 894 | count_t confcount; |
2122 | decky | 895 | unsigned int i; |
822 | palkovsky | 896 | int znum; |
814 | palkovsky | 897 | |
898 | /* Theoretically we could have here 0, practically make sure |
||
899 | * nobody tries to do that. If some platform requires, remove |
||
900 | * the assert |
||
901 | */ |
||
902 | ASSERT(confframe); |
||
903 | /* If conframe is supposed to be inside our zone, then make sure |
||
904 | * it does not span kernel & init |
||
905 | */ |
||
822 | palkovsky | 906 | confcount = SIZE2FRAMES(zone_conf_size(count)); |
3188 | jermar | 907 | if (confframe >= start && confframe < start + count) { |
908 | for (; confframe < start + count; confframe++) { |
||
814 | palkovsky | 909 | addr = PFN2ADDR(confframe); |
2133 | jermar | 910 | if (overlaps(addr, PFN2ADDR(confcount), |
911 | KA2PA(config.base), config.kernel_size)) |
||
814 | palkovsky | 912 | continue; |
1037 | decky | 913 | |
2133 | jermar | 914 | if (overlaps(addr, PFN2ADDR(confcount), |
915 | KA2PA(config.stack_base), config.stack_size)) |
||
1833 | decky | 916 | continue; |
917 | |||
1037 | decky | 918 | bool overlap = false; |
919 | count_t i; |
||
920 | for (i = 0; i < init.cnt; i++) |
||
2133 | jermar | 921 | if (overlaps(addr, PFN2ADDR(confcount), |
922 | KA2PA(init.tasks[i].addr), |
||
923 | init.tasks[i].size)) { |
||
1037 | decky | 924 | overlap = true; |
925 | break; |
||
926 | } |
||
927 | if (overlap) |
||
928 | continue; |
||
929 | |||
814 | palkovsky | 930 | break; |
931 | } |
||
1037 | decky | 932 | if (confframe >= start + count) |
814 | palkovsky | 933 | panic("Cannot find configuration data for zone."); |
934 | } |
||
935 | |||
2725 | decky | 936 | z = (zone_t *) PA2KA(PFN2ADDR(confframe)); |
822 | palkovsky | 937 | zone_construct(start, count, z, flags); |
938 | znum = zones_add_zone(z); |
||
939 | if (znum == -1) |
||
940 | return -1; |
||
941 | |||
3188 | jermar | 942 | mutex_lock(&mem_avail_mtx); |
943 | mem_avail_frames += count; |
||
944 | mutex_unlock(&mem_avail_mtx); |
||
945 | |||
814 | palkovsky | 946 | /* If confdata in zone, mark as unavailable */ |
2122 | decky | 947 | if (confframe >= start && confframe < start + count) |
948 | for (i = confframe; i < confframe + confcount; i++) { |
||
814 | palkovsky | 949 | zone_mark_unavailable(z, i - z->base); |
950 | } |
||
3188 | jermar | 951 | |
822 | palkovsky | 952 | return znum; |
814 | palkovsky | 953 | } |
954 | |||
955 | /***************************************/ |
||
956 | /* Frame functions */ |
||
957 | |||
3185 | jermar | 958 | /** Set parent of frame. */ |
2122 | decky | 959 | void frame_set_parent(pfn_t pfn, void *data, unsigned int hint) |
814 | palkovsky | 960 | { |
961 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
962 | |||
963 | ASSERT(zone); |
||
964 | |||
3188 | jermar | 965 | zone_get_frame(zone, pfn - zone->base)->parent = data; |
814 | palkovsky | 966 | spinlock_unlock(&zone->lock); |
967 | } |
||
968 | |||
2133 | jermar | 969 | void *frame_get_parent(pfn_t pfn, unsigned int hint) |
814 | palkovsky | 970 | { |
971 | zone_t *zone = find_zone_and_lock(pfn, &hint); |
||
972 | void *res; |
||
973 | |||
974 | ASSERT(zone); |
||
975 | res = zone_get_frame(zone, pfn - zone->base)->parent; |
||
976 | |||
977 | spinlock_unlock(&zone->lock); |
||
978 | return res; |
||
979 | } |
||
980 | |||
981 | /** Allocate power-of-two frames of physical memory. |
||
982 | * |
||
3185 | jermar | 983 | * @param order Allocate exactly 2^order frames. |
984 | * @param flags Flags for host zone selection and address processing. |
||
985 | * @param pzone Preferred zone. |
||
814 | palkovsky | 986 | * |
3185 | jermar | 987 | * @return Physical address of the allocated frame. |
1269 | decky | 988 | * |
814 | palkovsky | 989 | */ |
3185 | jermar | 990 | void *frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone) |
814 | palkovsky | 991 | { |
992 | ipl_t ipl; |
||
993 | int freed; |
||
994 | pfn_t v; |
||
995 | zone_t *zone; |
||
3188 | jermar | 996 | unsigned long gen = 0; |
814 | palkovsky | 997 | |
998 | loop: |
||
999 | ipl = interrupts_disable(); |
||
1269 | decky | 1000 | |
814 | palkovsky | 1001 | /* |
1002 | * First, find suitable frame zone. |
||
1003 | */ |
||
3207 | jermar | 1004 | zone = find_free_zone_and_lock(order, flags, pzone); |
1269 | decky | 1005 | |
814 | palkovsky | 1006 | /* If no memory, reclaim some slab memory, |
1007 | if it does not help, reclaim all */ |
||
1008 | if (!zone && !(flags & FRAME_NO_RECLAIM)) { |
||
1009 | freed = slab_reclaim(0); |
||
1010 | if (freed) |
||
3207 | jermar | 1011 | zone = find_free_zone_and_lock(order, flags, pzone); |
814 | palkovsky | 1012 | if (!zone) { |
1013 | freed = slab_reclaim(SLAB_RECLAIM_ALL); |
||
1014 | if (freed) |
||
3207 | jermar | 1015 | zone = find_free_zone_and_lock(order, flags, |
1016 | pzone); |
||
814 | palkovsky | 1017 | } |
1018 | } |
||
1019 | if (!zone) { |
||
1020 | /* |
||
3187 | jermar | 1021 | * Sleep until some frames are available again. |
814 | palkovsky | 1022 | */ |
3187 | jermar | 1023 | if (flags & FRAME_ATOMIC) { |
1024 | interrupts_restore(ipl); |
||
1760 | palkovsky | 1025 | return 0; |
3187 | jermar | 1026 | } |
814 | palkovsky | 1027 | |
3187 | jermar | 1028 | #ifdef CONFIG_DEBUG |
3188 | jermar | 1029 | unsigned long avail; |
1030 | |||
1031 | mutex_lock(&mem_avail_mtx); |
||
1032 | avail = mem_avail_frames; |
||
1033 | mutex_unlock(&mem_avail_mtx); |
||
1034 | |||
1035 | printf("Thread %" PRIu64 " waiting for %u frames, " |
||
1036 | "%u available.\n", THREAD->tid, 1ULL << order, avail); |
||
3187 | jermar | 1037 | #endif |
1038 | |||
3188 | jermar | 1039 | mutex_lock(&mem_avail_mtx); |
1040 | while ((mem_avail_frames < (1ULL << order)) || |
||
1041 | gen == mem_avail_gen) |
||
1042 | condvar_wait(&mem_avail_cv, &mem_avail_mtx); |
||
1043 | gen = mem_avail_gen; |
||
1044 | mutex_unlock(&mem_avail_mtx); |
||
3187 | jermar | 1045 | |
1046 | #ifdef CONFIG_DEBUG |
||
3188 | jermar | 1047 | mutex_lock(&mem_avail_mtx); |
1048 | avail = mem_avail_frames; |
||
1049 | mutex_unlock(&mem_avail_mtx); |
||
1050 | |||
1051 | printf("Thread %" PRIu64 " woken up, %u frames available.\n", |
||
1052 | THREAD->tid, avail); |
||
3187 | jermar | 1053 | #endif |
1054 | |||
1055 | interrupts_restore(ipl); |
||
814 | palkovsky | 1056 | goto loop; |
1057 | } |
||
1269 | decky | 1058 | |
1059 | v = zone_frame_alloc(zone, order); |
||
814 | palkovsky | 1060 | v += zone->base; |
1061 | |||
1062 | spinlock_unlock(&zone->lock); |
||
3188 | jermar | 1063 | |
1064 | mutex_lock(&mem_avail_mtx); |
||
1065 | mem_avail_frames -= (1ULL << order); |
||
1066 | mutex_unlock(&mem_avail_mtx); |
||
1067 | |||
814 | palkovsky | 1068 | interrupts_restore(ipl); |
1069 | |||
1760 | palkovsky | 1070 | if (flags & FRAME_KA) |
1071 | return (void *)PA2KA(PFN2ADDR(v)); |
||
1072 | return (void *)PFN2ADDR(v); |
||
814 | palkovsky | 1073 | } |
1074 | |||
1075 | /** Free a frame. |
||
1076 | * |
||
1854 | jermar | 1077 | * Find respective frame structure for supplied physical frame address. |
814 | palkovsky | 1078 | * Decrement frame reference count. |
1079 | * If it drops to zero, move the frame structure to free list. |
||
1080 | * |
||
3185 | jermar | 1081 | * @param frame Physical Address of of the frame to be freed. |
814 | palkovsky | 1082 | */ |
1780 | jermar | 1083 | void frame_free(uintptr_t frame) |
814 | palkovsky | 1084 | { |
1085 | ipl_t ipl; |
||
1086 | zone_t *zone; |
||
1760 | palkovsky | 1087 | pfn_t pfn = ADDR2PFN(frame); |
814 | palkovsky | 1088 | |
1089 | ipl = interrupts_disable(); |
||
3187 | jermar | 1090 | |
814 | palkovsky | 1091 | /* |
1092 | * First, find host frame zone for addr. |
||
1093 | */ |
||
3187 | jermar | 1094 | zone = find_zone_and_lock(pfn, NULL); |
814 | palkovsky | 1095 | ASSERT(zone); |
1096 | |||
3187 | jermar | 1097 | zone_frame_free(zone, pfn - zone->base); |
814 | palkovsky | 1098 | |
1099 | spinlock_unlock(&zone->lock); |
||
3187 | jermar | 1100 | |
1101 | /* |
||
1102 | * Signal that some memory has been freed. |
||
1103 | */ |
||
3188 | jermar | 1104 | mutex_lock(&mem_avail_mtx); |
1105 | mem_avail_frames++; |
||
1106 | mem_avail_gen++; |
||
1107 | condvar_broadcast(&mem_avail_cv); |
||
1108 | mutex_unlock(&mem_avail_mtx); |
||
3187 | jermar | 1109 | |
814 | palkovsky | 1110 | interrupts_restore(ipl); |
1111 | } |
||
1112 | |||
1236 | jermar | 1113 | /** Add reference to frame. |
1114 | * |
||
1115 | * Find respective frame structure for supplied PFN and |
||
1116 | * increment frame reference count. |
||
1117 | * |
||
3185 | jermar | 1118 | * @param pfn Frame number of the frame to be freed. |
1236 | jermar | 1119 | */ |
1120 | void frame_reference_add(pfn_t pfn) |
||
1121 | { |
||
1122 | ipl_t ipl; |
||
1123 | zone_t *zone; |
||
1124 | frame_t *frame; |
||
814 | palkovsky | 1125 | |
1236 | jermar | 1126 | ipl = interrupts_disable(); |
1127 | |||
1128 | /* |
||
1129 | * First, find host frame zone for addr. |
||
1130 | */ |
||
3185 | jermar | 1131 | zone = find_zone_and_lock(pfn, NULL); |
1236 | jermar | 1132 | ASSERT(zone); |
1133 | |||
3185 | jermar | 1134 | frame = &zone->frames[pfn - zone->base]; |
1236 | jermar | 1135 | frame->refcount++; |
1136 | |||
1137 | spinlock_unlock(&zone->lock); |
||
1138 | interrupts_restore(ipl); |
||
1139 | } |
||
814 | palkovsky | 1140 | |
3185 | jermar | 1141 | /** Mark given range unavailable in frame zones. */ |
820 | jermar | 1142 | void frame_mark_unavailable(pfn_t start, count_t count) |
814 | palkovsky | 1143 | { |
2122 | decky | 1144 | unsigned int i; |
814 | palkovsky | 1145 | zone_t *zone; |
2122 | decky | 1146 | unsigned int prefzone = 0; |
852 | palkovsky | 1147 | |
2122 | decky | 1148 | for (i = 0; i < count; i++) { |
1981 | jermar | 1149 | zone = find_zone_and_lock(start + i, &prefzone); |
814 | palkovsky | 1150 | if (!zone) /* PFN not found */ |
1151 | continue; |
||
1981 | jermar | 1152 | zone_mark_unavailable(zone, start + i - zone->base); |
814 | palkovsky | 1153 | |
1154 | spinlock_unlock(&zone->lock); |
||
1155 | } |
||
1156 | } |
||
1157 | |||
3185 | jermar | 1158 | /** Initialize physical memory management. */ |
814 | palkovsky | 1159 | void frame_init(void) |
1160 | { |
||
1161 | if (config.cpu_active == 1) { |
||
1162 | zones.count = 0; |
||
1981 | jermar | 1163 | spinlock_initialize(&zones.lock, "zones.lock"); |
3188 | jermar | 1164 | mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); |
1165 | condvar_initialize(&mem_avail_cv); |
||
814 | palkovsky | 1166 | } |
1167 | /* Tell the architecture to create some memory */ |
||
1168 | frame_arch_init(); |
||
1169 | if (config.cpu_active == 1) { |
||
2133 | jermar | 1170 | frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), |
1171 | SIZE2FRAMES(config.kernel_size)); |
||
1172 | frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), |
||
1173 | SIZE2FRAMES(config.stack_size)); |
||
1037 | decky | 1174 | |
1175 | count_t i; |
||
2133 | jermar | 1176 | for (i = 0; i < init.cnt; i++) { |
1177 | pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr)); |
||
1178 | frame_mark_unavailable(pfn, |
||
1179 | SIZE2FRAMES(init.tasks[i].size)); |
||
1180 | } |
||
1599 | palkovsky | 1181 | |
1894 | jermar | 1182 | if (ballocs.size) |
2133 | jermar | 1183 | frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), |
1184 | SIZE2FRAMES(ballocs.size)); |
||
1894 | jermar | 1185 | |
1599 | palkovsky | 1186 | /* Black list first frame, as allocating NULL would |
1790 | jermar | 1187 | * fail in some places */ |
1599 | palkovsky | 1188 | frame_mark_unavailable(0, 1); |
814 | palkovsky | 1189 | } |
1190 | } |
||
1191 | |||
1192 | |||
3185 | jermar | 1193 | /** Return total size of all zones. */ |
1194 | uint64_t zone_total_size(void) |
||
1195 | { |
||
2725 | decky | 1196 | zone_t *zone = NULL; |
1197 | unsigned int i; |
||
1198 | ipl_t ipl; |
||
1199 | uint64_t total = 0; |
||
814 | palkovsky | 1200 | |
2725 | decky | 1201 | ipl = interrupts_disable(); |
1202 | spinlock_lock(&zones.lock); |
||
1203 | |||
1204 | for (i = 0; i < zones.count; i++) { |
||
1205 | zone = zones.info[i]; |
||
1206 | spinlock_lock(&zone->lock); |
||
1207 | total += (uint64_t) FRAMES2SIZE(zone->count); |
||
1208 | spinlock_unlock(&zone->lock); |
||
1209 | } |
||
1210 | |||
1211 | spinlock_unlock(&zones.lock); |
||
1212 | interrupts_restore(ipl); |
||
1213 | |||
1214 | return total; |
||
1215 | } |
||
1216 | |||
3185 | jermar | 1217 | /** Prints list of zones. */ |
1218 | void zone_print_list(void) |
||
1219 | { |
||
676 | bondari | 1220 | zone_t *zone = NULL; |
2122 | decky | 1221 | unsigned int i; |
701 | jermar | 1222 | ipl_t ipl; |
1223 | |||
3057 | decky | 1224 | #ifdef __32_BITS__ |
1225 | printf("# base address free frames busy frames\n"); |
||
1226 | printf("-- ------------ ------------ ------------\n"); |
||
1227 | #endif |
||
1228 | |||
1229 | #ifdef __64_BITS__ |
||
1230 | printf("# base address free frames busy frames\n"); |
||
1231 | printf("-- -------------------- ------------ ------------\n"); |
||
1232 | #endif |
||
2712 | decky | 1233 | |
3205 | jermar | 1234 | /* |
1235 | * Because printing may require allocation of memory, we may not hold |
||
1236 | * the frame allocator locks when printing zone statistics. Therefore, |
||
1237 | * we simply gather the statistics under the protection of the locks and |
||
1238 | * print the statistics when the locks have been released. |
||
1239 | * |
||
1240 | * When someone adds/removes zones while we are printing the statistics, |
||
1241 | * we may end up with inaccurate output (e.g. a zone being skipped from |
||
1242 | * the listing). |
||
1243 | */ |
||
1244 | |||
1245 | for (i = 0; ; i++) { |
||
1246 | uintptr_t base; |
||
1247 | count_t free_count; |
||
1248 | count_t busy_count; |
||
1249 | |||
1250 | ipl = interrupts_disable(); |
||
1251 | spinlock_lock(&zones.lock); |
||
1252 | |||
1253 | if (i >= zones.count) { |
||
1254 | spinlock_unlock(&zones.lock); |
||
1255 | interrupts_restore(ipl); |
||
1256 | break; |
||
1257 | } |
||
1258 | |||
814 | palkovsky | 1259 | zone = zones.info[i]; |
683 | bondari | 1260 | spinlock_lock(&zone->lock); |
3057 | decky | 1261 | |
3205 | jermar | 1262 | base = PFN2ADDR(zone->base); |
1263 | free_count = zone->free_count; |
||
1264 | busy_count = zone->busy_count; |
||
1265 | |||
1266 | spinlock_unlock(&zone->lock); |
||
1267 | |||
1268 | spinlock_unlock(&zones.lock); |
||
1269 | interrupts_restore(ipl); |
||
1270 | |||
3057 | decky | 1271 | #ifdef __32_BITS__ |
3205 | jermar | 1272 | printf("%-2u %10p %12" PRIc " %12" PRIc "\n", i, base, |
1273 | free_count, busy_count); |
||
3057 | decky | 1274 | #endif |
1275 | |||
1276 | #ifdef __64_BITS__ |
||
3205 | jermar | 1277 | printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i, base, |
1278 | free_count, busy_count); |
||
3057 | decky | 1279 | #endif |
2712 | decky | 1280 | |
676 | bondari | 1281 | } |
1282 | } |
||
1283 | |||
1708 | jermar | 1284 | /** Prints zone details. |
677 | bondari | 1285 | * |
3185 | jermar | 1286 | * @param num Zone base address or zone number. |
677 | bondari | 1287 | */ |
3185 | jermar | 1288 | void zone_print_one(unsigned int num) |
1289 | { |
||
814 | palkovsky | 1290 | zone_t *zone = NULL; |
701 | jermar | 1291 | ipl_t ipl; |
2122 | decky | 1292 | unsigned int i; |
3206 | jermar | 1293 | uintptr_t base; |
1294 | count_t count; |
||
1295 | count_t busy_count; |
||
1296 | count_t free_count; |
||
701 | jermar | 1297 | |
1298 | ipl = interrupts_disable(); |
||
814 | palkovsky | 1299 | spinlock_lock(&zones.lock); |
822 | palkovsky | 1300 | |
1037 | decky | 1301 | for (i = 0; i < zones.count; i++) { |
2725 | decky | 1302 | if ((i == num) || (PFN2ADDR(zones.info[i]->base) == num)) { |
822 | palkovsky | 1303 | zone = zones.info[i]; |
1304 | break; |
||
1305 | } |
||
676 | bondari | 1306 | } |
822 | palkovsky | 1307 | if (!zone) { |
3206 | jermar | 1308 | spinlock_unlock(&zones.lock); |
1309 | interrupts_restore(ipl); |
||
822 | palkovsky | 1310 | printf("Zone not found.\n"); |
3206 | jermar | 1311 | return; |
822 | palkovsky | 1312 | } |
676 | bondari | 1313 | |
683 | bondari | 1314 | spinlock_lock(&zone->lock); |
3206 | jermar | 1315 | base = PFN2ADDR(zone->base); |
1316 | count = zone->count; |
||
1317 | busy_count = zone->busy_count; |
||
1318 | free_count = zone->free_count; |
||
2725 | decky | 1319 | spinlock_unlock(&zone->lock); |
814 | palkovsky | 1320 | spinlock_unlock(&zones.lock); |
701 | jermar | 1321 | interrupts_restore(ipl); |
3206 | jermar | 1322 | |
1323 | printf("Zone base address: %p\n", base); |
||
1324 | printf("Zone size: %" PRIc " frames (%" PRIs " KiB)\n", count, |
||
1325 | SIZE2KB(FRAMES2SIZE(count))); |
||
1326 | printf("Allocated space: %" PRIc " frames (%" PRIs " KiB)\n", |
||
1327 | busy_count, SIZE2KB(FRAMES2SIZE(busy_count))); |
||
1328 | printf("Available space: %" PRIc " frames (%" PRIs " KiB)\n", |
||
1329 | free_count, SIZE2KB(FRAMES2SIZE(free_count))); |
||
676 | bondari | 1330 | } |
1331 | |||
1757 | jermar | 1332 | /** @} |
1702 | cejka | 1333 | */ |
2133 | jermar | 1334 |