Subversion Repositories HelenOS

Rev

Rev 2745 | Rev 3104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2071 jermar 2
 * Copyright (c) 2006 Ondrej Palkovsky
759 palkovsky 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Slab allocator.
769 palkovsky 36
 *
1248 jermar 37
 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38
 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
39
 *
769 palkovsky 40
 * with the following exceptions:
1248 jermar 41
 * @li empty slabs are deallocated immediately 
769 palkovsky 42
 *     (in Linux they are kept in linked list, in Solaris ???)
1248 jermar 43
 * @li empty magazines are deallocated when not needed
769 palkovsky 44
 *     (in Solaris they are held in linked list in slab cache)
45
 *
1248 jermar 46
 * Following features are not currently supported but would be easy to do:
47
 * @li cache coloring
48
 * @li dynamic magazine growing (different magazine sizes are already
1144 jermar 49
 *     supported, but we would need to adjust allocation strategy)
769 palkovsky 50
 *
1248 jermar 51
 * The slab allocator supports per-CPU caches ('magazines') to facilitate
769 palkovsky 52
 * good SMP scaling. 
53
 *
54
 * When a new object is being allocated, it is first checked, if it is 
1554 jermar 55
 * available in a CPU-bound magazine. If it is not found there, it is
56
 * allocated from a CPU-shared slab - if a partially full one is found,
57
 * it is used, otherwise a new one is allocated. 
769 palkovsky 58
 *
1554 jermar 59
 * When an object is being deallocated, it is put to a CPU-bound magazine.
60
 * If there is no such magazine, a new one is allocated (if this fails, 
1248 jermar 61
 * the object is deallocated into slab). If the magazine is full, it is
1554 jermar 62
 * put into cpu-shared list of magazines and a new one is allocated.
769 palkovsky 63
 *
1554 jermar 64
 * The CPU-bound magazine is actually a pair of magazines in order to avoid
769 palkovsky 65
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66
 * size boundary. LIFO order is enforced, which should avoid fragmentation
67
 * as much as possible. 
68
 *  
1554 jermar 69
 * Every cache contains list of full slabs and list of partially full slabs.
1248 jermar 70
 * Empty slabs are immediately freed (thrashing will be avoided because
769 palkovsky 71
 * of magazines). 
72
 *
1248 jermar 73
 * The slab information structure is kept inside the data area, if possible.
769 palkovsky 74
 * The cache can be marked that it should not use magazines. This is used
1248 jermar 75
 * only for slab related caches to avoid deadlocks and infinite recursion
76
 * (the slab allocator uses itself for allocating all it's control structures).
769 palkovsky 77
 *
1554 jermar 78
 * The slab allocator allocates a lot of space and does not free it. When
79
 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
769 palkovsky 80
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81
 * releases slabs from cpu-shared magazine-list, until at least 1 slab 
82
 * is deallocated in each cache (this algorithm should probably change).
83
 * The brutal reclaim removes all cached objects, even from CPU-bound
84
 * magazines.
85
 *
1757 jermar 86
 * @todo
1248 jermar 87
 * For better CPU-scaling the magazine allocation strategy should
775 palkovsky 88
 * be extended. Currently, if the cache does not have magazine, it asks
89
 * for non-cpu cached magazine cache to provide one. It might be feasible
90
 * to add cpu-cached magazine cache (which would allocate it's magazines
91
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92
 * buffer. The other possibility is to use the per-cache 
93
 * 'empty-magazine-list', which decreases competing for 1 per-system
94
 * magazine cache.
95
 *
1757 jermar 96
 * @todo
97
 * it might be good to add granularity of locks even to slab level,
98
 * we could then try_spinlock over all partial slabs and thus improve
99
 * scalability even on slab level
769 palkovsky 100
 */
101
 
759 palkovsky 102
#include <synch/spinlock.h>
103
#include <mm/slab.h>
788 jermar 104
#include <adt/list.h>
759 palkovsky 105
#include <memstr.h>
106
#include <align.h>
762 palkovsky 107
#include <mm/frame.h>
759 palkovsky 108
#include <config.h>
109
#include <print.h>
110
#include <arch.h>
111
#include <panic.h>
762 palkovsky 112
#include <debug.h>
771 palkovsky 113
#include <bitops.h>
2124 decky 114
#include <macros.h>
759 palkovsky 115
 
116
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 117
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 118
 
769 palkovsky 119
/** Magazine cache */
120
static slab_cache_t mag_cache;
121
/** Cache for cache descriptors */
122
static slab_cache_t slab_cache_cache;
123
/** Cache for external slab descriptors
124
 * This time we want per-cpu cache, so do not make it static
1248 jermar 125
 * - using slab for internal slab structures will not deadlock,
769 palkovsky 126
 *   as all slab structures are 'small' - control structures of
127
 *   their caches do not require further allocation
128
 */
129
static slab_cache_t *slab_extern_cache;
771 palkovsky 130
/** Caches for malloc */
2124 decky 131
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
771 palkovsky 132
char *malloc_names[] =  {
2124 decky 133
	"malloc-16",
134
	"malloc-32",
135
	"malloc-64",
136
	"malloc-128",
137
	"malloc-256",
138
	"malloc-512",
139
	"malloc-1K",
140
	"malloc-2K",
141
	"malloc-4K",
142
	"malloc-8K",
143
	"malloc-16K",
144
	"malloc-32K",
145
	"malloc-64K",
146
	"malloc-128K",
147
	"malloc-256K"
771 palkovsky 148
};
762 palkovsky 149
 
769 palkovsky 150
/** Slab descriptor */
762 palkovsky 151
typedef struct {
1950 jermar 152
	slab_cache_t *cache; 	/**< Pointer to parent cache. */
153
	link_t link;       	/**< List of full/partial slabs. */
154
	void *start;       	/**< Start address of first available item. */
155
	count_t available; 	/**< Count of available items in this slab. */
156
	index_t nextavail; 	/**< The index of next available item. */
2124 decky 157
} slab_t;
762 palkovsky 158
 
791 palkovsky 159
#ifdef CONFIG_DEBUG
160
static int _slab_initialized = 0;
161
#endif
162
 
759 palkovsky 163
/**************************************/
1248 jermar 164
/* Slab allocation functions          */
759 palkovsky 165
 
762 palkovsky 166
/**
167
 * Allocate frames for slab space and initialize
168
 *
169
 */
170
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
171
{
172
	void *data;
173
	slab_t *slab;
174
	size_t fsize;
2745 decky 175
	unsigned int i;
2123 decky 176
	unsigned int zone = 0;
814 palkovsky 177
 
1766 palkovsky 178
	data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
	if (!data) {
762 palkovsky 180
		return NULL;
764 palkovsky 181
	}
768 palkovsky 182
	if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 183
		slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 184
		if (!slab) {
1760 palkovsky 185
			frame_free(KA2PA(data));
762 palkovsky 186
			return NULL;
187
		}
188
	} else {
189
		fsize = (PAGE_SIZE << cache->order);
190
		slab = data + fsize - sizeof(*slab);
191
	}
1288 jermar 192
 
762 palkovsky 193
	/* Fill in slab structures */
2745 decky 194
	for (i = 0; i < ((unsigned int) 1 << cache->order); i++)
195
		frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
762 palkovsky 196
 
197
	slab->start = data;
198
	slab->available = cache->objects;
199
	slab->nextavail = 0;
767 palkovsky 200
	slab->cache = cache;
762 palkovsky 201
 
2745 decky 202
	for (i = 0; i < cache->objects; i++)
762 palkovsky 203
		*((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 204
 
205
	atomic_inc(&cache->allocated_slabs);
762 palkovsky 206
	return slab;
207
}
208
 
759 palkovsky 209
/**
1248 jermar 210
 * Deallocate space associated with slab
762 palkovsky 211
 *
212
 * @return number of freed frames
213
 */
214
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
215
{
1760 palkovsky 216
	frame_free(KA2PA(slab->start));
768 palkovsky 217
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 218
		slab_free(slab_extern_cache, slab);
764 palkovsky 219
 
220
	atomic_dec(&cache->allocated_slabs);
221
 
762 palkovsky 222
	return 1 << cache->order;
223
}
224
 
225
/** Map object to slab structure */
226
static slab_t * obj2slab(void *obj)
227
{
2124 decky 228
	return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
762 palkovsky 229
}
230
 
231
/**************************************/
1248 jermar 232
/* Slab functions */
762 palkovsky 233
 
234
 
235
/**
759 palkovsky 236
 * Return object to slab and call a destructor
237
 *
762 palkovsky 238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
759 palkovsky 240
 * @return Number of freed pages
241
 */
762 palkovsky 242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
243
				slab_t *slab)
759 palkovsky 244
{
787 palkovsky 245
	int freed = 0;
246
 
762 palkovsky 247
	if (!slab)
248
		slab = obj2slab(obj);
249
 
767 palkovsky 250
	ASSERT(slab->cache == cache);
251
 
787 palkovsky 252
	if (cache->destructor)
253
		freed = cache->destructor(obj);
254
 
776 palkovsky 255
	spinlock_lock(&cache->slablock);
789 palkovsky 256
	ASSERT(slab->available < cache->objects);
776 palkovsky 257
 
762 palkovsky 258
	*((int *)obj) = slab->nextavail;
259
	slab->nextavail = (obj - slab->start)/cache->size;
260
	slab->available++;
261
 
262
	/* Move it to correct list */
263
	if (slab->available == cache->objects) {
264
		/* Free associated memory */
265
		list_remove(&slab->link);
782 palkovsky 266
		spinlock_unlock(&cache->slablock);
267
 
787 palkovsky 268
		return freed + slab_space_free(cache, slab);
782 palkovsky 269
 
780 palkovsky 270
	} else if (slab->available == 1) {
271
		/* It was in full, move to partial */
272
		list_remove(&slab->link);
273
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 274
	}
783 palkovsky 275
	spinlock_unlock(&cache->slablock);
787 palkovsky 276
	return freed;
759 palkovsky 277
}
278
 
279
/**
280
 * Take new object from slab or create new if needed
281
 *
282
 * @return Object address or null
283
 */
284
static void * slab_obj_create(slab_cache_t *cache, int flags)
285
{
762 palkovsky 286
	slab_t *slab;
287
	void *obj;
288
 
776 palkovsky 289
	spinlock_lock(&cache->slablock);
290
 
762 palkovsky 291
	if (list_empty(&cache->partial_slabs)) {
292
		/* Allow recursion and reclaiming
1248 jermar 293
		 * - this should work, as the slab control structures
1288 jermar 294
		 *   are small and do not need to allocate with anything
295
		 *   other than frame_alloc when they are allocating,
762 palkovsky 296
		 *   that's why we should get recursion at most 1-level deep
297
		 */
776 palkovsky 298
		spinlock_unlock(&cache->slablock);
762 palkovsky 299
		slab = slab_space_alloc(cache, flags);
780 palkovsky 300
		if (!slab)
301
			return NULL;
776 palkovsky 302
		spinlock_lock(&cache->slablock);
762 palkovsky 303
	} else {
1950 jermar 304
		slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
762 palkovsky 305
		list_remove(&slab->link);
306
	}
307
	obj = slab->start + slab->nextavail * cache->size;
308
	slab->nextavail = *((int *)obj);
309
	slab->available--;
787 palkovsky 310
 
1950 jermar 311
	if (!slab->available)
764 palkovsky 312
		list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 313
	else
764 palkovsky 314
		list_prepend(&slab->link, &cache->partial_slabs);
776 palkovsky 315
 
316
	spinlock_unlock(&cache->slablock);
787 palkovsky 317
 
318
	if (cache->constructor && cache->constructor(obj, flags)) {
319
		/* Bad, bad, construction failed */
320
		slab_obj_destroy(cache, obj, slab);
321
		return NULL;
322
	}
762 palkovsky 323
	return obj;
759 palkovsky 324
}
325
 
326
/**************************************/
327
/* CPU-Cache slab functions */
328
 
329
/**
781 palkovsky 330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it 
332
 *
333
 * @param first If true, return first, else last mag
334
 */
335
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
336
					    int first)
337
{
338
	slab_magazine_t *mag = NULL;
339
	link_t *cur;
340
 
341
	spinlock_lock(&cache->maglock);
342
	if (!list_empty(&cache->magazines)) {
343
		if (first)
344
			cur = cache->magazines.next;
345
		else
346
			cur = cache->magazines.prev;
347
		mag = list_get_instance(cur, slab_magazine_t, link);
348
		list_remove(&mag->link);
349
		atomic_dec(&cache->magazine_counter);
350
	}
351
	spinlock_unlock(&cache->maglock);
352
	return mag;
353
}
354
 
355
/** Prepend magazine to magazine list in cache */
356
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
357
{
358
	spinlock_lock(&cache->maglock);
359
 
360
	list_prepend(&mag->link, &cache->magazines);
361
	atomic_inc(&cache->magazine_counter);
362
 
363
	spinlock_unlock(&cache->maglock);
364
}
365
 
366
/**
759 palkovsky 367
 * Free all objects in magazine and free memory associated with magazine
368
 *
369
 * @return Number of freed pages
370
 */
371
static count_t magazine_destroy(slab_cache_t *cache, 
372
				slab_magazine_t *mag)
373
{
2745 decky 374
	unsigned int i;
759 palkovsky 375
	count_t frames = 0;
376
 
2745 decky 377
	for (i = 0; i < mag->busy; i++) {
762 palkovsky 378
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 379
		atomic_dec(&cache->cached_objs);
380
	}
759 palkovsky 381
 
382
	slab_free(&mag_cache, mag);
383
 
384
	return frames;
385
}
386
 
387
/**
769 palkovsky 388
 * Find full magazine, set it as current and return it
389
 *
390
 * Assume cpu_magazine lock is held
391
 */
392
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
393
{
394
	slab_magazine_t *cmag, *lastmag, *newmag;
395
 
396
	cmag = cache->mag_cache[CPU->id].current;
397
	lastmag = cache->mag_cache[CPU->id].last;
398
	if (cmag) { /* First try local CPU magazines */
399
		if (cmag->busy)
400
			return cmag;
401
 
402
		if (lastmag && lastmag->busy) {
403
			cache->mag_cache[CPU->id].current = lastmag;
404
			cache->mag_cache[CPU->id].last = cmag;
405
			return lastmag;
406
		}
407
	}
408
	/* Local magazines are empty, import one from magazine list */
781 palkovsky 409
	newmag = get_mag_from_cache(cache, 1);
410
	if (!newmag)
769 palkovsky 411
		return NULL;
412
 
413
	if (lastmag)
781 palkovsky 414
		magazine_destroy(cache, lastmag);
415
 
769 palkovsky 416
	cache->mag_cache[CPU->id].last = cmag;
417
	cache->mag_cache[CPU->id].current = newmag;
418
	return newmag;
419
}
420
 
421
/**
759 palkovsky 422
 * Try to find object in CPU-cache magazines
423
 *
424
 * @return Pointer to object or NULL if not available
425
 */
426
static void * magazine_obj_get(slab_cache_t *cache)
427
{
428
	slab_magazine_t *mag;
767 palkovsky 429
	void *obj;
759 palkovsky 430
 
772 palkovsky 431
	if (!CPU)
432
		return NULL;
433
 
759 palkovsky 434
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
435
 
769 palkovsky 436
	mag = get_full_current_mag(cache);
437
	if (!mag) {
438
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
439
		return NULL;
759 palkovsky 440
	}
767 palkovsky 441
	obj = mag->objs[--mag->busy];
759 palkovsky 442
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 443
	atomic_dec(&cache->cached_objs);
444
 
445
	return obj;
759 palkovsky 446
}
447
 
448
/**
768 palkovsky 449
 * Assure that the current magazine is empty, return pointer to it, or NULL if 
769 palkovsky 450
 * no empty magazine is available and cannot be allocated
759 palkovsky 451
 *
773 palkovsky 452
 * Assume mag_cache[CPU->id].lock is held
453
 *
759 palkovsky 454
 * We have 2 magazines bound to processor. 
455
 * First try the current. 
456
 *  If full, try the last.
457
 *   If full, put to magazines list.
458
 *   allocate new, exchange last & current
459
 *
768 palkovsky 460
 */
461
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
462
{
463
	slab_magazine_t *cmag,*lastmag,*newmag;
464
 
465
	cmag = cache->mag_cache[CPU->id].current;
466
	lastmag = cache->mag_cache[CPU->id].last;
467
 
468
	if (cmag) {
469
		if (cmag->busy < cmag->size)
470
			return cmag;
471
		if (lastmag && lastmag->busy < lastmag->size) {
472
			cache->mag_cache[CPU->id].last = cmag;
473
			cache->mag_cache[CPU->id].current = lastmag;
474
			return lastmag;
475
		}
476
	}
477
	/* current | last are full | nonexistent, allocate new */
478
	/* We do not want to sleep just because of caching */
479
	/* Especially we do not want reclaiming to start, as 
480
	 * this would deadlock */
481
	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
482
	if (!newmag)
483
		return NULL;
484
	newmag->size = SLAB_MAG_SIZE;
485
	newmag->busy = 0;
486
 
487
	/* Flush last to magazine list */
781 palkovsky 488
	if (lastmag)
489
		put_mag_to_cache(cache, lastmag);
490
 
768 palkovsky 491
	/* Move current as last, save new as current */
492
	cache->mag_cache[CPU->id].last = cmag;	
493
	cache->mag_cache[CPU->id].current = newmag;	
494
 
495
	return newmag;
496
}
497
 
498
/**
499
 * Put object into CPU-cache magazine
500
 *
759 palkovsky 501
 * @return 0 - success, -1 - could not get memory
502
 */
503
static int magazine_obj_put(slab_cache_t *cache, void *obj)
504
{
505
	slab_magazine_t *mag;
506
 
772 palkovsky 507
	if (!CPU)
508
		return -1;
509
 
759 palkovsky 510
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 511
 
512
	mag = make_empty_current_mag(cache);
769 palkovsky 513
	if (!mag) {
514
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
515
		return -1;
516
	}
759 palkovsky 517
 
518
	mag->objs[mag->busy++] = obj;
519
 
520
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 521
	atomic_inc(&cache->cached_objs);
759 palkovsky 522
	return 0;
523
}
524
 
525
 
526
/**************************************/
1248 jermar 527
/* Slab cache functions */
759 palkovsky 528
 
762 palkovsky 529
/** Return number of objects that fit in certain cache size */
2745 decky 530
static unsigned int comp_objects(slab_cache_t *cache)
762 palkovsky 531
{
532
	if (cache->flags & SLAB_CACHE_SLINSIDE)
533
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
534
	else 
535
		return (PAGE_SIZE << cache->order) / cache->size;
536
}
537
 
538
/** Return wasted space in slab */
2745 decky 539
static unsigned int badness(slab_cache_t *cache)
762 palkovsky 540
{
2745 decky 541
	unsigned int objects;
542
	unsigned int ssize;
762 palkovsky 543
 
544
	objects = comp_objects(cache);
545
	ssize = PAGE_SIZE << cache->order;
546
	if (cache->flags & SLAB_CACHE_SLINSIDE)
547
		ssize -= sizeof(slab_t);
2745 decky 548
	return ssize - objects * cache->size;
762 palkovsky 549
}
550
 
789 palkovsky 551
/**
552
 * Initialize mag_cache structure in slab cache
553
 */
554
static void make_magcache(slab_cache_t *cache)
555
{
2745 decky 556
	unsigned int i;
791 palkovsky 557
 
558
	ASSERT(_slab_initialized >= 2);
789 palkovsky 559
 
3057 decky 560
	cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
2745 decky 561
	for (i = 0; i < config.cpu_count; i++) {
1780 jermar 562
		memsetb((uintptr_t)&cache->mag_cache[i],
789 palkovsky 563
			sizeof(cache->mag_cache[i]), 0);
2745 decky 564
		spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
789 palkovsky 565
	}
566
}
567
 
759 palkovsky 568
/** Initialize allocated memory as a slab cache */
569
static void
570
_slab_cache_create(slab_cache_t *cache,
571
		   char *name,
572
		   size_t size,
573
		   size_t align,
574
		   int (*constructor)(void *obj, int kmflag),
787 palkovsky 575
		   int (*destructor)(void *obj),
759 palkovsky 576
		   int flags)
577
{
771 palkovsky 578
	int pages;
783 palkovsky 579
	ipl_t ipl;
759 palkovsky 580
 
1780 jermar 581
	memsetb((uintptr_t)cache, sizeof(*cache), 0);
759 palkovsky 582
	cache->name = name;
583
 
1780 jermar 584
	if (align < sizeof(unative_t))
585
		align = sizeof(unative_t);
766 palkovsky 586
	size = ALIGN_UP(size, align);
587
 
762 palkovsky 588
	cache->size = size;
759 palkovsky 589
 
590
	cache->constructor = constructor;
591
	cache->destructor = destructor;
592
	cache->flags = flags;
593
 
594
	list_initialize(&cache->full_slabs);
595
	list_initialize(&cache->partial_slabs);
596
	list_initialize(&cache->magazines);
776 palkovsky 597
	spinlock_initialize(&cache->slablock, "slab_lock");
598
	spinlock_initialize(&cache->maglock, "slab_maglock");
789 palkovsky 599
	if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
600
		make_magcache(cache);
759 palkovsky 601
 
602
	/* Compute slab sizes, object counts in slabs etc. */
603
	if (cache->size < SLAB_INSIDE_SIZE)
604
		cache->flags |= SLAB_CACHE_SLINSIDE;
605
 
762 palkovsky 606
	/* Minimum slab order */
1682 palkovsky 607
	pages = SIZE2FRAMES(cache->size);
1677 palkovsky 608
	/* We need the 2^order >= pages */
609
	if (pages == 1)
610
		cache->order = 0;
611
	else
612
		cache->order = fnzb(pages-1)+1;
766 palkovsky 613
 
762 palkovsky 614
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
615
		cache->order += 1;
616
	}
617
	cache->objects = comp_objects(cache);
766 palkovsky 618
	/* If info fits in, put it inside */
619
	if (badness(cache) > sizeof(slab_t))
620
		cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 621
 
783 palkovsky 622
	/* Add cache to cache list */
623
	ipl = interrupts_disable();
759 palkovsky 624
	spinlock_lock(&slab_cache_lock);
625
 
626
	list_append(&cache->link, &slab_cache_list);
627
 
628
	spinlock_unlock(&slab_cache_lock);
783 palkovsky 629
	interrupts_restore(ipl);
759 palkovsky 630
}
631
 
632
/** Create slab cache  */
633
slab_cache_t * slab_cache_create(char *name,
634
				 size_t size,
635
				 size_t align,
636
				 int (*constructor)(void *obj, int kmflag),
787 palkovsky 637
				 int (*destructor)(void *obj),
759 palkovsky 638
				 int flags)
639
{
640
	slab_cache_t *cache;
641
 
769 palkovsky 642
	cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 643
	_slab_cache_create(cache, name, size, align, constructor, destructor,
644
			   flags);
645
	return cache;
646
}
647
 
648
/** 
649
 * Reclaim space occupied by objects that are already free
650
 *
651
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
652
 * @return Number of freed pages
653
 */
654
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
655
{
2745 decky 656
	unsigned int i;
759 palkovsky 657
	slab_magazine_t *mag;
658
	count_t frames = 0;
781 palkovsky 659
	int magcount;
759 palkovsky 660
 
661
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
662
		return 0; /* Nothing to do */
781 palkovsky 663
 
664
	/* We count up to original magazine count to avoid
665
	 * endless loop 
666
	 */
667
	magcount = atomic_get(&cache->magazine_counter);
668
	while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
669
		frames += magazine_destroy(cache,mag);
670
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
671
			break;
769 palkovsky 672
	}
759 palkovsky 673
 
674
	if (flags & SLAB_RECLAIM_ALL) {
781 palkovsky 675
		/* Free cpu-bound magazines */
759 palkovsky 676
		/* Destroy CPU magazines */
2745 decky 677
		for (i = 0; i < config.cpu_count; i++) {
781 palkovsky 678
			spinlock_lock(&cache->mag_cache[i].lock);
679
 
759 palkovsky 680
			mag = cache->mag_cache[i].current;
681
			if (mag)
682
				frames += magazine_destroy(cache, mag);
683
			cache->mag_cache[i].current = NULL;
684
 
685
			mag = cache->mag_cache[i].last;
686
			if (mag)
687
				frames += magazine_destroy(cache, mag);
688
			cache->mag_cache[i].last = NULL;
781 palkovsky 689
 
690
			spinlock_unlock(&cache->mag_cache[i].lock);
759 palkovsky 691
		}
692
	}
767 palkovsky 693
 
759 palkovsky 694
	return frames;
695
}
696
 
697
/** Check that there are no slabs and remove cache from system  */
698
void slab_cache_destroy(slab_cache_t *cache)
699
{
781 palkovsky 700
	ipl_t ipl;
701
 
702
	/* First remove cache from link, so that we don't need
703
	 * to disable interrupts later
704
	 */
705
 
706
	ipl = interrupts_disable();
707
	spinlock_lock(&slab_cache_lock);
708
 
709
	list_remove(&cache->link);
710
 
711
	spinlock_unlock(&slab_cache_lock);
712
	interrupts_restore(ipl);
713
 
759 palkovsky 714
	/* Do not lock anything, we assume the software is correct and
715
	 * does not touch the cache when it decides to destroy it */
716
 
717
	/* Destroy all magazines */
718
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
719
 
720
	/* All slabs must be empty */
721
	if (!list_empty(&cache->full_slabs) \
722
	    || !list_empty(&cache->partial_slabs))
723
		panic("Destroying cache that is not empty.");
724
 
789 palkovsky 725
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
822 palkovsky 726
		free(cache->mag_cache);
769 palkovsky 727
	slab_free(&slab_cache_cache, cache);
759 palkovsky 728
}
729
 
730
/** Allocate new object from cache - if no flags given, always returns 
731
    memory */
732
void * slab_alloc(slab_cache_t *cache, int flags)
733
{
734
	ipl_t ipl;
735
	void *result = NULL;
773 palkovsky 736
 
759 palkovsky 737
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
738
	ipl = interrupts_disable();
771 palkovsky 739
 
814 palkovsky 740
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
759 palkovsky 741
		result = magazine_obj_get(cache);
814 palkovsky 742
	}
776 palkovsky 743
	if (!result)
759 palkovsky 744
		result = slab_obj_create(cache, flags);
745
 
769 palkovsky 746
	interrupts_restore(ipl);
747
 
764 palkovsky 748
	if (result)
749
		atomic_inc(&cache->allocated_objs);
750
 
759 palkovsky 751
	return result;
752
}
753
 
771 palkovsky 754
/** Return object to cache, use slab if known  */
755
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 756
{
757
	ipl_t ipl;
758
 
759
	ipl = interrupts_disable();
760
 
762 palkovsky 761
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
762
	    || magazine_obj_put(cache, obj)) {
776 palkovsky 763
 
771 palkovsky 764
		slab_obj_destroy(cache, obj, slab);
776 palkovsky 765
 
759 palkovsky 766
	}
769 palkovsky 767
	interrupts_restore(ipl);
764 palkovsky 768
	atomic_dec(&cache->allocated_objs);
759 palkovsky 769
}
770
 
771 palkovsky 771
/** Return slab object to cache */
772
void slab_free(slab_cache_t *cache, void *obj)
773
{
2124 decky 774
	_slab_free(cache, obj, NULL);
771 palkovsky 775
}
776
 
759 palkovsky 777
/* Go through all caches and reclaim what is possible */
778
count_t slab_reclaim(int flags)
779
{
780
	slab_cache_t *cache;
781
	link_t *cur;
782
	count_t frames = 0;
783
 
784
	spinlock_lock(&slab_cache_lock);
785
 
776 palkovsky 786
	/* TODO: Add assert, that interrupts are disabled, otherwise
787
	 * memory allocation from interrupts can deadlock.
788
	 */
789
 
759 palkovsky 790
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
791
		cache = list_get_instance(cur, slab_cache_t, link);
792
		frames += _slab_reclaim(cache, flags);
793
	}
794
 
795
	spinlock_unlock(&slab_cache_lock);
796
 
797
	return frames;
798
}
799
 
800
 
801
/* Print list of slabs */
802
void slab_print_list(void)
803
{
804
	slab_cache_t *cache;
805
	link_t *cur;
783 palkovsky 806
	ipl_t ipl;
807
 
808
	ipl = interrupts_disable();
759 palkovsky 809
	spinlock_lock(&slab_cache_lock);
2052 decky 810
	printf("slab name        size     pages  obj/pg slabs  cached allocated ctl\n");
811
	printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
812
 
813
	for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
759 palkovsky 814
		cache = list_get_instance(cur, slab_cache_t, link);
2052 decky 815
 
3057 decky 816
		printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
817
			cache->name, cache->size, (1 << cache->order), cache->objects,
818
			atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs),
819
			atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
759 palkovsky 820
	}
821
	spinlock_unlock(&slab_cache_lock);
783 palkovsky 822
	interrupts_restore(ipl);
759 palkovsky 823
}
824
 
825
void slab_cache_init(void)
826
{
771 palkovsky 827
	int i, size;
828
 
759 palkovsky 829
	/* Initialize magazine cache */
830
	_slab_cache_create(&mag_cache,
831
			   "slab_magazine",
3057 decky 832
			   sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
1780 jermar 833
			   sizeof(uintptr_t),
759 palkovsky 834
			   NULL, NULL,
769 palkovsky 835
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
836
	/* Initialize slab_cache cache */
837
	_slab_cache_create(&slab_cache_cache,
838
			   "slab_cache",
789 palkovsky 839
			   sizeof(slab_cache_cache),
1780 jermar 840
			   sizeof(uintptr_t),
769 palkovsky 841
			   NULL, NULL,
842
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
843
	/* Initialize external slab cache */
844
	slab_extern_cache = slab_cache_create("slab_extern",
845
					      sizeof(slab_t),
846
					      0, NULL, NULL,
789 palkovsky 847
					      SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
759 palkovsky 848
 
849
	/* Initialize structures for malloc */
3057 decky 850
	for (i=0, size=(1 << SLAB_MIN_MALLOC_W);
851
	     i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
771 palkovsky 852
	     i++, size <<= 1) {
853
		malloc_caches[i] = slab_cache_create(malloc_names[i],
854
						     size, 0,
789 palkovsky 855
						     NULL,NULL, SLAB_CACHE_MAGDEFERRED);
771 palkovsky 856
	}
778 palkovsky 857
#ifdef CONFIG_DEBUG       
858
	_slab_initialized = 1;
859
#endif
759 palkovsky 860
}
771 palkovsky 861
 
789 palkovsky 862
/** Enable cpu_cache
863
 *
864
 * Kernel calls this function, when it knows the real number of
865
 * processors. 
866
 * Allocate slab for cpucache and enable it on all existing
867
 * slabs that are SLAB_CACHE_MAGDEFERRED
868
 */
869
void slab_enable_cpucache(void)
870
{
871
	link_t *cur;
872
	slab_cache_t *s;
873
 
791 palkovsky 874
#ifdef CONFIG_DEBUG
875
	_slab_initialized = 2;
876
#endif
877
 
789 palkovsky 878
	spinlock_lock(&slab_cache_lock);
879
 
880
	for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
881
		s = list_get_instance(cur, slab_cache_t, link);
882
		if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
883
			continue;
884
		make_magcache(s);
885
		s->flags &= ~SLAB_CACHE_MAGDEFERRED;
886
	}
887
 
888
	spinlock_unlock(&slab_cache_lock);
889
}
890
 
771 palkovsky 891
/**************************************/
892
/* kalloc/kfree functions             */
822 palkovsky 893
void * malloc(unsigned int size, int flags)
771 palkovsky 894
{
778 palkovsky 895
	ASSERT(_slab_initialized);
1288 jermar 896
	ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
771 palkovsky 897
 
898
	if (size < (1 << SLAB_MIN_MALLOC_W))
899
		size = (1 << SLAB_MIN_MALLOC_W);
900
 
2124 decky 901
	int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
771 palkovsky 902
 
903
	return slab_alloc(malloc_caches[idx], flags);
904
}
905
 
2124 decky 906
void * realloc(void *ptr, unsigned int size, int flags)
771 palkovsky 907
{
2124 decky 908
	ASSERT(_slab_initialized);
909
	ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
910
 
911
	void *new_ptr;
912
 
913
	if (size > 0) {
914
		if (size < (1 << SLAB_MIN_MALLOC_W))
915
			size = (1 << SLAB_MIN_MALLOC_W);
916
		int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
917
 
918
		new_ptr = slab_alloc(malloc_caches[idx], flags);
919
	} else
920
		new_ptr = NULL;
921
 
922
	if ((new_ptr != NULL) && (ptr != NULL)) {
923
		slab_t *slab = obj2slab(ptr);
924
		memcpy(new_ptr, ptr, min(size, slab->cache->size));
925
	}
926
 
927
	if (ptr != NULL)
928
		free(ptr);
929
 
930
	return new_ptr;
931
}
781 palkovsky 932
 
2124 decky 933
void free(void *ptr)
934
{
935
	if (!ptr)
1950 jermar 936
		return;
781 palkovsky 937
 
2124 decky 938
	slab_t *slab = obj2slab(ptr);
939
	_slab_free(slab->cache, ptr, slab);
771 palkovsky 940
}
1702 cejka 941
 
1757 jermar 942
/** @}
1702 cejka 943
 */