Rev 767 | Rev 769 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 767 | Rev 768 | ||
|---|---|---|---|
| Line 73... | Line 73... | ||
| 73 | 73 | ||
| 74 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
74 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
| 75 | if (status != FRAME_OK) { |
75 | if (status != FRAME_OK) { |
| 76 | return NULL; |
76 | return NULL; |
| 77 | } |
77 | } |
| 78 | if (! cache->flags & SLAB_CACHE_SLINSIDE) { |
78 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
| 79 | slab = malloc(sizeof(*slab)); // , flags); |
79 | slab = malloc(sizeof(*slab)); // , flags); |
| 80 | if (!slab) { |
80 | if (!slab) { |
| 81 | frame_free((__address)data); |
81 | frame_free((__address)data); |
| 82 | return NULL; |
82 | return NULL; |
| 83 | } |
83 | } |
| Line 100... | Line 100... | ||
| 100 | 100 | ||
| 101 | for (i=0; i<cache->objects;i++) |
101 | for (i=0; i<cache->objects;i++) |
| 102 | *((int *) (slab->start + i*cache->size)) = i+1; |
102 | *((int *) (slab->start + i*cache->size)) = i+1; |
| 103 | 103 | ||
| 104 | atomic_inc(&cache->allocated_slabs); |
104 | atomic_inc(&cache->allocated_slabs); |
| 105 | - | ||
| 106 | return slab; |
105 | return slab; |
| 107 | } |
106 | } |
| 108 | 107 | ||
| 109 | /** |
108 | /** |
| 110 | * Deallocate space associated with SLAB |
109 | * Deallocate space associated with SLAB |
| Line 112... | Line 111... | ||
| 112 | * @return number of freed frames |
111 | * @return number of freed frames |
| 113 | */ |
112 | */ |
| 114 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
113 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
| 115 | { |
114 | { |
| 116 | frame_free((__address)slab->start); |
115 | frame_free((__address)slab->start); |
| 117 | if (! cache->flags & SLAB_CACHE_SLINSIDE) |
116 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
| 118 | free(slab); |
117 | free(slab); |
| 119 | 118 | ||
| 120 | atomic_dec(&cache->allocated_slabs); |
119 | atomic_dec(&cache->allocated_slabs); |
| 121 | 120 | ||
| 122 | return 1 << cache->order; |
121 | return 1 << cache->order; |
| Line 275... | Line 274... | ||
| 275 | spinlock_unlock(&cache->lock); |
274 | spinlock_unlock(&cache->lock); |
| 276 | goto out; |
275 | goto out; |
| 277 | } |
276 | } |
| 278 | /* Free current magazine and take one from list */ |
277 | /* Free current magazine and take one from list */ |
| 279 | slab_free(&mag_cache, mag); |
278 | slab_free(&mag_cache, mag); |
| - | 279 | ||
| 280 | mag = list_get_instance(cache->magazines.next, |
280 | mag = list_get_instance(cache->magazines.next, |
| 281 | slab_magazine_t, |
281 | slab_magazine_t, |
| 282 | link); |
282 | link); |
| 283 | list_remove(&mag->link); |
283 | list_remove(&mag->link); |
| 284 | 284 | ||
| Line 294... | Line 294... | ||
| 294 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
294 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
| 295 | return NULL; |
295 | return NULL; |
| 296 | } |
296 | } |
| 297 | 297 | ||
| 298 | /** |
298 | /** |
| - | 299 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
|
| 299 | * Put object into CPU-cache magazine |
300 | * no empty magazine available and cannot be allocated |
| 300 | * |
301 | * |
| 301 | * We have 2 magazines bound to processor. |
302 | * We have 2 magazines bound to processor. |
| 302 | * First try the current. |
303 | * First try the current. |
| 303 | * If full, try the last. |
304 | * If full, try the last. |
| 304 | * If full, put to magazines list. |
305 | * If full, put to magazines list. |
| 305 | * allocate new, exchange last & current |
306 | * allocate new, exchange last & current |
| 306 | * |
307 | * |
| - | 308 | */ |
|
| - | 309 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
|
| - | 310 | { |
|
| - | 311 | slab_magazine_t *cmag,*lastmag,*newmag; |
|
| - | 312 | ||
| - | 313 | cmag = cache->mag_cache[CPU->id].current; |
|
| - | 314 | lastmag = cache->mag_cache[CPU->id].last; |
|
| - | 315 | ||
| - | 316 | if (cmag) { |
|
| - | 317 | if (cmag->busy < cmag->size) |
|
| - | 318 | return cmag; |
|
| - | 319 | if (lastmag && lastmag->busy < lastmag->size) { |
|
| - | 320 | cache->mag_cache[CPU->id].last = cmag; |
|
| - | 321 | cache->mag_cache[CPU->id].current = lastmag; |
|
| - | 322 | return lastmag; |
|
| - | 323 | } |
|
| - | 324 | } |
|
| - | 325 | /* current | last are full | nonexistent, allocate new */ |
|
| - | 326 | /* We do not want to sleep just because of caching */ |
|
| - | 327 | /* Especially we do not want reclaiming to start, as |
|
| - | 328 | * this would deadlock */ |
|
| - | 329 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
|
| - | 330 | if (!newmag) |
|
| - | 331 | return NULL; |
|
| - | 332 | newmag->size = SLAB_MAG_SIZE; |
|
| - | 333 | newmag->busy = 0; |
|
| - | 334 | ||
| - | 335 | /* Flush last to magazine list */ |
|
| - | 336 | if (lastmag) |
|
| - | 337 | list_prepend(&lastmag->link, &cache->magazines); |
|
| - | 338 | /* Move current as last, save new as current */ |
|
| - | 339 | cache->mag_cache[CPU->id].last = cmag; |
|
| - | 340 | cache->mag_cache[CPU->id].current = newmag; |
|
| - | 341 | ||
| - | 342 | return newmag; |
|
| - | 343 | } |
|
| - | 344 | ||
| - | 345 | /** |
|
| - | 346 | * Put object into CPU-cache magazine |
|
| - | 347 | * |
|
| 307 | * @return 0 - success, -1 - could not get memory |
348 | * @return 0 - success, -1 - could not get memory |
| 308 | */ |
349 | */ |
| 309 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
350 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
| 310 | { |
351 | { |
| 311 | slab_magazine_t *mag; |
352 | slab_magazine_t *mag; |
| 312 | 353 | ||
| 313 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
354 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
| - | 355 | ||
| - | 356 | mag = make_empty_current_mag(cache); |
|
| - | 357 | if (!mag) |
|
| - | 358 | goto errout; |
|
| 314 | 359 | ||
| 315 | mag = cache->mag_cache[CPU->id].current; |
- | |
| 316 | if (!mag) { |
- | |
| 317 | /* We do not want to sleep just because of caching */ |
- | |
| 318 | /* Especially we do not want reclaiming to start, as |
- | |
| 319 | * this would deadlock */ |
- | |
| 320 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
- | |
| 321 | if (!mag) /* Allocation failed, give up on caching */ |
- | |
| 322 | goto errout; |
- | |
| 323 | - | ||
| 324 | cache->mag_cache[CPU->id].current = mag; |
- | |
| 325 | mag->size = SLAB_MAG_SIZE; |
- | |
| 326 | mag->busy = 0; |
- | |
| 327 | } else if (mag->busy == mag->size) { |
- | |
| 328 | /* If the last is full | empty, allocate new */ |
- | |
| 329 | mag = cache->mag_cache[CPU->id].last; |
- | |
| 330 | if (!mag || mag->size == mag->busy) { |
- | |
| 331 | if (mag) |
- | |
| 332 | list_prepend(&mag->link, &cache->magazines); |
- | |
| 333 | - | ||
| 334 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
- | |
| 335 | if (!mag) |
- | |
| 336 | goto errout; |
- | |
| 337 | - | ||
| 338 | mag->size = SLAB_MAG_SIZE; |
- | |
| 339 | mag->busy = 0; |
- | |
| 340 | cache->mag_cache[CPU->id].last = mag; |
- | |
| 341 | } |
- | |
| 342 | /* Exchange the 2 */ |
- | |
| 343 | cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current; |
- | |
| 344 | cache->mag_cache[CPU->id].current = mag; |
- | |
| 345 | } |
- | |
| 346 | mag->objs[mag->busy++] = obj; |
360 | mag->objs[mag->busy++] = obj; |
| 347 | 361 | ||
| 348 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
362 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
| 349 | atomic_inc(&cache->cached_objs); |
363 | atomic_inc(&cache->cached_objs); |
| 350 | return 0; |
364 | return 0; |
| Line 406... | Line 420... | ||
| 406 | 420 | ||
| 407 | list_initialize(&cache->full_slabs); |
421 | list_initialize(&cache->full_slabs); |
| 408 | list_initialize(&cache->partial_slabs); |
422 | list_initialize(&cache->partial_slabs); |
| 409 | list_initialize(&cache->magazines); |
423 | list_initialize(&cache->magazines); |
| 410 | spinlock_initialize(&cache->lock, "cachelock"); |
424 | spinlock_initialize(&cache->lock, "cachelock"); |
| 411 | if (! cache->flags & SLAB_CACHE_NOMAGAZINE) { |
425 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
| 412 | for (i=0; i< config.cpu_count; i++) |
426 | for (i=0; i< config.cpu_count; i++) |
| 413 | spinlock_initialize(&cache->mag_cache[i].lock, |
427 | spinlock_initialize(&cache->mag_cache[i].lock, |
| 414 | "cpucachelock"); |
428 | "cpucachelock"); |
| 415 | } |
429 | } |
| 416 | 430 | ||
| Line 455... | Line 469... | ||
| 455 | /** |
469 | /** |
| 456 | * Reclaim space occupied by objects that are already free |
470 | * Reclaim space occupied by objects that are already free |
| 457 | * |
471 | * |
| 458 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
472 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
| 459 | * @return Number of freed pages |
473 | * @return Number of freed pages |
| 460 | * |
- | |
| 461 | * TODO: Add light reclaim |
- | |
| 462 | */ |
474 | */ |
| 463 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
475 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
| 464 | { |
476 | { |
| 465 | int i; |
477 | int i; |
| 466 | slab_magazine_t *mag; |
478 | slab_magazine_t *mag; |
| Line 491... | Line 503... | ||
| 491 | } |
503 | } |
| 492 | } |
504 | } |
| 493 | /* Destroy full magazines */ |
505 | /* Destroy full magazines */ |
| 494 | cur=cache->magazines.prev; |
506 | cur=cache->magazines.prev; |
| 495 | 507 | ||
| 496 | while (cur!=&cache->magazines) { |
508 | while (cur != &cache->magazines) { |
| 497 | mag = list_get_instance(cur, slab_magazine_t, link); |
509 | mag = list_get_instance(cur, slab_magazine_t, link); |
| 498 | 510 | ||
| 499 | cur = cur->prev; |
511 | cur = cur->prev; |
| 500 | list_remove(cur->next); |
- | |
| 501 | // list_remove(&mag->link); |
512 | list_remove(&mag->link); |
| 502 | frames += magazine_destroy(cache,mag); |
513 | frames += magazine_destroy(cache,mag); |
| 503 | /* If we do not do full reclaim, break |
514 | /* If we do not do full reclaim, break |
| 504 | * as soon as something is freed */ |
515 | * as soon as something is freed */ |
| 505 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
516 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
| 506 | break; |
517 | break; |
| Line 542... | Line 553... | ||
| 542 | void *result = NULL; |
553 | void *result = NULL; |
| 543 | 554 | ||
| 544 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
555 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
| 545 | ipl = interrupts_disable(); |
556 | ipl = interrupts_disable(); |
| 546 | 557 | ||
| 547 | if (!cache->flags & SLAB_CACHE_NOMAGAZINE) |
558 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
| 548 | result = magazine_obj_get(cache); |
559 | result = magazine_obj_get(cache); |
| 549 | 560 | ||
| 550 | if (!result) { |
561 | if (!result) { |
| 551 | spinlock_lock(&cache->lock); |
562 | spinlock_lock(&cache->lock); |
| 552 | result = slab_obj_create(cache, flags); |
563 | result = slab_obj_create(cache, flags); |