Subversion Repositories HelenOS

Rev

Rev 3183 | Rev 4174 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2071 jermar 2
 * Copyright (c) 2006 Ondrej Palkovsky
759 palkovsky 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Slab allocator.
769 palkovsky 36
 *
1248 jermar 37
 * The slab allocator is closely modelled after OpenSolaris slab allocator.
38
 * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
39
 *
769 palkovsky 40
 * with the following exceptions:
1248 jermar 41
 * @li empty slabs are deallocated immediately 
769 palkovsky 42
 *     (in Linux they are kept in linked list, in Solaris ???)
1248 jermar 43
 * @li empty magazines are deallocated when not needed
769 palkovsky 44
 *     (in Solaris they are held in linked list in slab cache)
45
 *
1248 jermar 46
 * Following features are not currently supported but would be easy to do:
47
 * @li cache coloring
48
 * @li dynamic magazine growing (different magazine sizes are already
1144 jermar 49
 *     supported, but we would need to adjust allocation strategy)
769 palkovsky 50
 *
1248 jermar 51
 * The slab allocator supports per-CPU caches ('magazines') to facilitate
769 palkovsky 52
 * good SMP scaling. 
53
 *
54
 * When a new object is being allocated, it is first checked, if it is 
1554 jermar 55
 * available in a CPU-bound magazine. If it is not found there, it is
56
 * allocated from a CPU-shared slab - if a partially full one is found,
57
 * it is used, otherwise a new one is allocated. 
769 palkovsky 58
 *
1554 jermar 59
 * When an object is being deallocated, it is put to a CPU-bound magazine.
60
 * If there is no such magazine, a new one is allocated (if this fails, 
1248 jermar 61
 * the object is deallocated into slab). If the magazine is full, it is
1554 jermar 62
 * put into cpu-shared list of magazines and a new one is allocated.
769 palkovsky 63
 *
1554 jermar 64
 * The CPU-bound magazine is actually a pair of magazines in order to avoid
769 palkovsky 65
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
66
 * size boundary. LIFO order is enforced, which should avoid fragmentation
67
 * as much as possible. 
68
 *  
1554 jermar 69
 * Every cache contains list of full slabs and list of partially full slabs.
1248 jermar 70
 * Empty slabs are immediately freed (thrashing will be avoided because
769 palkovsky 71
 * of magazines). 
72
 *
1248 jermar 73
 * The slab information structure is kept inside the data area, if possible.
769 palkovsky 74
 * The cache can be marked that it should not use magazines. This is used
1248 jermar 75
 * only for slab related caches to avoid deadlocks and infinite recursion
76
 * (the slab allocator uses itself for allocating all it's control structures).
769 palkovsky 77
 *
1554 jermar 78
 * The slab allocator allocates a lot of space and does not free it. When
79
 * the frame allocator fails to allocate a frame, it calls slab_reclaim().
769 palkovsky 80
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
81
 * releases slabs from cpu-shared magazine-list, until at least 1 slab 
82
 * is deallocated in each cache (this algorithm should probably change).
83
 * The brutal reclaim removes all cached objects, even from CPU-bound
84
 * magazines.
85
 *
1757 jermar 86
 * @todo
1248 jermar 87
 * For better CPU-scaling the magazine allocation strategy should
775 palkovsky 88
 * be extended. Currently, if the cache does not have magazine, it asks
89
 * for non-cpu cached magazine cache to provide one. It might be feasible
90
 * to add cpu-cached magazine cache (which would allocate it's magazines
91
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
92
 * buffer. The other possibility is to use the per-cache 
93
 * 'empty-magazine-list', which decreases competing for 1 per-system
94
 * magazine cache.
95
 *
1757 jermar 96
 * @todo
97
 * it might be good to add granularity of locks even to slab level,
98
 * we could then try_spinlock over all partial slabs and thus improve
99
 * scalability even on slab level
769 palkovsky 100
 */
101
 
759 palkovsky 102
#include <synch/spinlock.h>
103
#include <mm/slab.h>
788 jermar 104
#include <adt/list.h>
759 palkovsky 105
#include <memstr.h>
106
#include <align.h>
762 palkovsky 107
#include <mm/frame.h>
759 palkovsky 108
#include <config.h>
109
#include <print.h>
110
#include <arch.h>
111
#include <panic.h>
762 palkovsky 112
#include <debug.h>
771 palkovsky 113
#include <bitops.h>
2124 decky 114
#include <macros.h>
759 palkovsky 115
 
116
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 117
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 118
 
769 palkovsky 119
/** Magazine cache */
120
static slab_cache_t mag_cache;
121
/** Cache for cache descriptors */
122
static slab_cache_t slab_cache_cache;
123
/** Cache for external slab descriptors
124
 * This time we want per-cpu cache, so do not make it static
1248 jermar 125
 * - using slab for internal slab structures will not deadlock,
769 palkovsky 126
 *   as all slab structures are 'small' - control structures of
127
 *   their caches do not require further allocation
128
 */
129
static slab_cache_t *slab_extern_cache;
771 palkovsky 130
/** Caches for malloc */
2124 decky 131
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1];
771 palkovsky 132
char *malloc_names[] =  {
2124 decky 133
	"malloc-16",
134
	"malloc-32",
135
	"malloc-64",
136
	"malloc-128",
137
	"malloc-256",
138
	"malloc-512",
139
	"malloc-1K",
140
	"malloc-2K",
141
	"malloc-4K",
142
	"malloc-8K",
143
	"malloc-16K",
144
	"malloc-32K",
145
	"malloc-64K",
146
	"malloc-128K",
147
	"malloc-256K"
771 palkovsky 148
};
762 palkovsky 149
 
769 palkovsky 150
/** Slab descriptor */
762 palkovsky 151
typedef struct {
1950 jermar 152
	slab_cache_t *cache; 	/**< Pointer to parent cache. */
153
	link_t link;       	/**< List of full/partial slabs. */
154
	void *start;       	/**< Start address of first available item. */
155
	count_t available; 	/**< Count of available items in this slab. */
156
	index_t nextavail; 	/**< The index of next available item. */
2124 decky 157
} slab_t;
762 palkovsky 158
 
791 palkovsky 159
#ifdef CONFIG_DEBUG
160
static int _slab_initialized = 0;
161
#endif
162
 
759 palkovsky 163
/**************************************/
1248 jermar 164
/* Slab allocation functions          */
759 palkovsky 165
 
762 palkovsky 166
/**
167
 * Allocate frames for slab space and initialize
168
 *
169
 */
3180 jermar 170
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
762 palkovsky 171
{
172
	void *data;
173
	slab_t *slab;
174
	size_t fsize;
2745 decky 175
	unsigned int i;
3972 decky 176
	count_t zone = 0;
814 palkovsky 177
 
1766 palkovsky 178
	data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
	if (!data) {
762 palkovsky 180
		return NULL;
764 palkovsky 181
	}
3180 jermar 182
	if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 183
		slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 184
		if (!slab) {
1760 palkovsky 185
			frame_free(KA2PA(data));
762 palkovsky 186
			return NULL;
187
		}
188
	} else {
189
		fsize = (PAGE_SIZE << cache->order);
190
		slab = data + fsize - sizeof(*slab);
191
	}
1288 jermar 192
 
762 palkovsky 193
	/* Fill in slab structures */
2745 decky 194
	for (i = 0; i < ((unsigned int) 1 << cache->order); i++)
195
		frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
762 palkovsky 196
 
197
	slab->start = data;
198
	slab->available = cache->objects;
199
	slab->nextavail = 0;
767 palkovsky 200
	slab->cache = cache;
762 palkovsky 201
 
2745 decky 202
	for (i = 0; i < cache->objects; i++)
3180 jermar 203
		*((int *) (slab->start + i*cache->size)) = i + 1;
764 palkovsky 204
 
205
	atomic_inc(&cache->allocated_slabs);
762 palkovsky 206
	return slab;
207
}
208
 
759 palkovsky 209
/**
1248 jermar 210
 * Deallocate space associated with slab
762 palkovsky 211
 *
212
 * @return number of freed frames
213
 */
214
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
215
{
1760 palkovsky 216
	frame_free(KA2PA(slab->start));
768 palkovsky 217
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 218
		slab_free(slab_extern_cache, slab);
764 palkovsky 219
 
220
	atomic_dec(&cache->allocated_slabs);
221
 
762 palkovsky 222
	return 1 << cache->order;
223
}
224
 
225
/** Map object to slab structure */
226
static slab_t * obj2slab(void *obj)
227
{
2124 decky 228
	return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
762 palkovsky 229
}
230
 
231
/**************************************/
1248 jermar 232
/* Slab functions */
762 palkovsky 233
 
234
 
235
/**
759 palkovsky 236
 * Return object to slab and call a destructor
237
 *
762 palkovsky 238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
759 palkovsky 240
 * @return Number of freed pages
241
 */
3180 jermar 242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 243
{
787 palkovsky 244
	int freed = 0;
245
 
762 palkovsky 246
	if (!slab)
247
		slab = obj2slab(obj);
248
 
767 palkovsky 249
	ASSERT(slab->cache == cache);
250
 
787 palkovsky 251
	if (cache->destructor)
252
		freed = cache->destructor(obj);
253
 
776 palkovsky 254
	spinlock_lock(&cache->slablock);
789 palkovsky 255
	ASSERT(slab->available < cache->objects);
776 palkovsky 256
 
762 palkovsky 257
	*((int *)obj) = slab->nextavail;
3180 jermar 258
	slab->nextavail = (obj - slab->start) / cache->size;
762 palkovsky 259
	slab->available++;
260
 
261
	/* Move it to correct list */
262
	if (slab->available == cache->objects) {
263
		/* Free associated memory */
264
		list_remove(&slab->link);
782 palkovsky 265
		spinlock_unlock(&cache->slablock);
266
 
787 palkovsky 267
		return freed + slab_space_free(cache, slab);
782 palkovsky 268
 
780 palkovsky 269
	} else if (slab->available == 1) {
270
		/* It was in full, move to partial */
271
		list_remove(&slab->link);
272
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 273
	}
783 palkovsky 274
	spinlock_unlock(&cache->slablock);
787 palkovsky 275
	return freed;
759 palkovsky 276
}
277
 
278
/**
279
 * Take new object from slab or create new if needed
280
 *
281
 * @return Object address or null
282
 */
3180 jermar 283
static void *slab_obj_create(slab_cache_t *cache, int flags)
759 palkovsky 284
{
762 palkovsky 285
	slab_t *slab;
286
	void *obj;
287
 
776 palkovsky 288
	spinlock_lock(&cache->slablock);
289
 
762 palkovsky 290
	if (list_empty(&cache->partial_slabs)) {
291
		/* Allow recursion and reclaiming
1248 jermar 292
		 * - this should work, as the slab control structures
1288 jermar 293
		 *   are small and do not need to allocate with anything
294
		 *   other than frame_alloc when they are allocating,
762 palkovsky 295
		 *   that's why we should get recursion at most 1-level deep
296
		 */
776 palkovsky 297
		spinlock_unlock(&cache->slablock);
762 palkovsky 298
		slab = slab_space_alloc(cache, flags);
780 palkovsky 299
		if (!slab)
300
			return NULL;
776 palkovsky 301
		spinlock_lock(&cache->slablock);
762 palkovsky 302
	} else {
3180 jermar 303
		slab = list_get_instance(cache->partial_slabs.next, slab_t,
304
		    link);
762 palkovsky 305
		list_remove(&slab->link);
306
	}
307
	obj = slab->start + slab->nextavail * cache->size;
308
	slab->nextavail = *((int *)obj);
309
	slab->available--;
787 palkovsky 310
 
1950 jermar 311
	if (!slab->available)
764 palkovsky 312
		list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 313
	else
764 palkovsky 314
		list_prepend(&slab->link, &cache->partial_slabs);
776 palkovsky 315
 
316
	spinlock_unlock(&cache->slablock);
787 palkovsky 317
 
318
	if (cache->constructor && cache->constructor(obj, flags)) {
319
		/* Bad, bad, construction failed */
320
		slab_obj_destroy(cache, obj, slab);
321
		return NULL;
322
	}
762 palkovsky 323
	return obj;
759 palkovsky 324
}
325
 
326
/**************************************/
327
/* CPU-Cache slab functions */
328
 
329
/**
781 palkovsky 330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it 
332
 *
333
 * @param first If true, return first, else last mag
334
 */
3180 jermar 335
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
781 palkovsky 336
{
337
	slab_magazine_t *mag = NULL;
338
	link_t *cur;
339
 
340
	spinlock_lock(&cache->maglock);
341
	if (!list_empty(&cache->magazines)) {
342
		if (first)
343
			cur = cache->magazines.next;
344
		else
345
			cur = cache->magazines.prev;
346
		mag = list_get_instance(cur, slab_magazine_t, link);
347
		list_remove(&mag->link);
348
		atomic_dec(&cache->magazine_counter);
349
	}
350
	spinlock_unlock(&cache->maglock);
351
	return mag;
352
}
353
 
354
/** Prepend magazine to magazine list in cache */
355
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
356
{
357
	spinlock_lock(&cache->maglock);
358
 
359
	list_prepend(&mag->link, &cache->magazines);
360
	atomic_inc(&cache->magazine_counter);
361
 
362
	spinlock_unlock(&cache->maglock);
363
}
364
 
365
/**
759 palkovsky 366
 * Free all objects in magazine and free memory associated with magazine
367
 *
368
 * @return Number of freed pages
369
 */
3180 jermar 370
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
759 palkovsky 371
{
2745 decky 372
	unsigned int i;
759 palkovsky 373
	count_t frames = 0;
374
 
2745 decky 375
	for (i = 0; i < mag->busy; i++) {
762 palkovsky 376
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 377
		atomic_dec(&cache->cached_objs);
378
	}
759 palkovsky 379
 
380
	slab_free(&mag_cache, mag);
381
 
382
	return frames;
383
}
384
 
385
/**
769 palkovsky 386
 * Find full magazine, set it as current and return it
387
 *
388
 * Assume cpu_magazine lock is held
389
 */
3180 jermar 390
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
769 palkovsky 391
{
392
	slab_magazine_t *cmag, *lastmag, *newmag;
393
 
394
	cmag = cache->mag_cache[CPU->id].current;
395
	lastmag = cache->mag_cache[CPU->id].last;
396
	if (cmag) { /* First try local CPU magazines */
397
		if (cmag->busy)
398
			return cmag;
399
 
400
		if (lastmag && lastmag->busy) {
401
			cache->mag_cache[CPU->id].current = lastmag;
402
			cache->mag_cache[CPU->id].last = cmag;
403
			return lastmag;
404
		}
405
	}
406
	/* Local magazines are empty, import one from magazine list */
781 palkovsky 407
	newmag = get_mag_from_cache(cache, 1);
408
	if (!newmag)
769 palkovsky 409
		return NULL;
410
 
411
	if (lastmag)
781 palkovsky 412
		magazine_destroy(cache, lastmag);
413
 
769 palkovsky 414
	cache->mag_cache[CPU->id].last = cmag;
415
	cache->mag_cache[CPU->id].current = newmag;
416
	return newmag;
417
}
418
 
419
/**
759 palkovsky 420
 * Try to find object in CPU-cache magazines
421
 *
422
 * @return Pointer to object or NULL if not available
423
 */
3180 jermar 424
static void *magazine_obj_get(slab_cache_t *cache)
759 palkovsky 425
{
426
	slab_magazine_t *mag;
767 palkovsky 427
	void *obj;
759 palkovsky 428
 
772 palkovsky 429
	if (!CPU)
430
		return NULL;
431
 
759 palkovsky 432
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
433
 
769 palkovsky 434
	mag = get_full_current_mag(cache);
435
	if (!mag) {
436
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
437
		return NULL;
759 palkovsky 438
	}
767 palkovsky 439
	obj = mag->objs[--mag->busy];
759 palkovsky 440
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 441
	atomic_dec(&cache->cached_objs);
442
 
443
	return obj;
759 palkovsky 444
}
445
 
446
/**
768 palkovsky 447
 * Assure that the current magazine is empty, return pointer to it, or NULL if 
769 palkovsky 448
 * no empty magazine is available and cannot be allocated
759 palkovsky 449
 *
773 palkovsky 450
 * Assume mag_cache[CPU->id].lock is held
451
 *
759 palkovsky 452
 * We have 2 magazines bound to processor. 
453
 * First try the current. 
454
 *  If full, try the last.
455
 *   If full, put to magazines list.
456
 *   allocate new, exchange last & current
457
 *
768 palkovsky 458
 */
3180 jermar 459
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
768 palkovsky 460
{
461
	slab_magazine_t *cmag,*lastmag,*newmag;
462
 
463
	cmag = cache->mag_cache[CPU->id].current;
464
	lastmag = cache->mag_cache[CPU->id].last;
465
 
466
	if (cmag) {
467
		if (cmag->busy < cmag->size)
468
			return cmag;
469
		if (lastmag && lastmag->busy < lastmag->size) {
470
			cache->mag_cache[CPU->id].last = cmag;
471
			cache->mag_cache[CPU->id].current = lastmag;
472
			return lastmag;
473
		}
474
	}
475
	/* current | last are full | nonexistent, allocate new */
476
	/* We do not want to sleep just because of caching */
477
	/* Especially we do not want reclaiming to start, as 
478
	 * this would deadlock */
479
	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
480
	if (!newmag)
481
		return NULL;
482
	newmag->size = SLAB_MAG_SIZE;
483
	newmag->busy = 0;
484
 
485
	/* Flush last to magazine list */
781 palkovsky 486
	if (lastmag)
487
		put_mag_to_cache(cache, lastmag);
488
 
768 palkovsky 489
	/* Move current as last, save new as current */
490
	cache->mag_cache[CPU->id].last = cmag;	
491
	cache->mag_cache[CPU->id].current = newmag;	
492
 
493
	return newmag;
494
}
495
 
496
/**
497
 * Put object into CPU-cache magazine
498
 *
759 palkovsky 499
 * @return 0 - success, -1 - could not get memory
500
 */
501
static int magazine_obj_put(slab_cache_t *cache, void *obj)
502
{
503
	slab_magazine_t *mag;
504
 
772 palkovsky 505
	if (!CPU)
506
		return -1;
507
 
759 palkovsky 508
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 509
 
510
	mag = make_empty_current_mag(cache);
769 palkovsky 511
	if (!mag) {
512
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
513
		return -1;
514
	}
759 palkovsky 515
 
516
	mag->objs[mag->busy++] = obj;
517
 
518
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 519
	atomic_inc(&cache->cached_objs);
759 palkovsky 520
	return 0;
521
}
522
 
523
 
524
/**************************************/
1248 jermar 525
/* Slab cache functions */
759 palkovsky 526
 
762 palkovsky 527
/** Return number of objects that fit in certain cache size */
2745 decky 528
static unsigned int comp_objects(slab_cache_t *cache)
762 palkovsky 529
{
530
	if (cache->flags & SLAB_CACHE_SLINSIDE)
3180 jermar 531
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
532
		    cache->size;
762 palkovsky 533
	else 
534
		return (PAGE_SIZE << cache->order) / cache->size;
535
}
536
 
537
/** Return wasted space in slab */
2745 decky 538
static unsigned int badness(slab_cache_t *cache)
762 palkovsky 539
{
2745 decky 540
	unsigned int objects;
541
	unsigned int ssize;
762 palkovsky 542
 
543
	objects = comp_objects(cache);
544
	ssize = PAGE_SIZE << cache->order;
545
	if (cache->flags & SLAB_CACHE_SLINSIDE)
546
		ssize -= sizeof(slab_t);
2745 decky 547
	return ssize - objects * cache->size;
762 palkovsky 548
}
549
 
789 palkovsky 550
/**
551
 * Initialize mag_cache structure in slab cache
552
 */
553
static void make_magcache(slab_cache_t *cache)
554
{
2745 decky 555
	unsigned int i;
791 palkovsky 556
 
557
	ASSERT(_slab_initialized >= 2);
789 palkovsky 558
 
3180 jermar 559
	cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
560
	    0);
2745 decky 561
	for (i = 0; i < config.cpu_count; i++) {
3104 svoboda 562
		memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
3180 jermar 563
		spinlock_initialize(&cache->mag_cache[i].lock,
564
		    "slab_maglock_cpu");
789 palkovsky 565
	}
566
}
567
 
759 palkovsky 568
/** Initialize allocated memory as a slab cache */
569
static void
3180 jermar 570
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
571
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
572
    int flags)
759 palkovsky 573
{
771 palkovsky 574
	int pages;
783 palkovsky 575
	ipl_t ipl;
759 palkovsky 576
 
3104 svoboda 577
	memsetb(cache, sizeof(*cache), 0);
759 palkovsky 578
	cache->name = name;
579
 
1780 jermar 580
	if (align < sizeof(unative_t))
581
		align = sizeof(unative_t);
766 palkovsky 582
	size = ALIGN_UP(size, align);
583
 
762 palkovsky 584
	cache->size = size;
759 palkovsky 585
 
586
	cache->constructor = constructor;
587
	cache->destructor = destructor;
588
	cache->flags = flags;
589
 
590
	list_initialize(&cache->full_slabs);
591
	list_initialize(&cache->partial_slabs);
592
	list_initialize(&cache->magazines);
776 palkovsky 593
	spinlock_initialize(&cache->slablock, "slab_lock");
594
	spinlock_initialize(&cache->maglock, "slab_maglock");
3180 jermar 595
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
789 palkovsky 596
		make_magcache(cache);
759 palkovsky 597
 
598
	/* Compute slab sizes, object counts in slabs etc. */
599
	if (cache->size < SLAB_INSIDE_SIZE)
600
		cache->flags |= SLAB_CACHE_SLINSIDE;
601
 
762 palkovsky 602
	/* Minimum slab order */
1682 palkovsky 603
	pages = SIZE2FRAMES(cache->size);
1677 palkovsky 604
	/* We need the 2^order >= pages */
605
	if (pages == 1)
606
		cache->order = 0;
607
	else
3180 jermar 608
		cache->order = fnzb(pages - 1) + 1;
766 palkovsky 609
 
762 palkovsky 610
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
611
		cache->order += 1;
612
	}
613
	cache->objects = comp_objects(cache);
766 palkovsky 614
	/* If info fits in, put it inside */
615
	if (badness(cache) > sizeof(slab_t))
616
		cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 617
 
783 palkovsky 618
	/* Add cache to cache list */
619
	ipl = interrupts_disable();
759 palkovsky 620
	spinlock_lock(&slab_cache_lock);
621
 
622
	list_append(&cache->link, &slab_cache_list);
623
 
624
	spinlock_unlock(&slab_cache_lock);
783 palkovsky 625
	interrupts_restore(ipl);
759 palkovsky 626
}
627
 
628
/** Create slab cache  */
3180 jermar 629
slab_cache_t *
630
slab_cache_create(char *name, size_t size, size_t align,
631
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
632
    int flags)
759 palkovsky 633
{
634
	slab_cache_t *cache;
635
 
769 palkovsky 636
	cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 637
	_slab_cache_create(cache, name, size, align, constructor, destructor,
3180 jermar 638
	    flags);
759 palkovsky 639
	return cache;
640
}
641
 
642
/** 
643
 * Reclaim space occupied by objects that are already free
644
 *
645
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
646
 * @return Number of freed pages
647
 */
648
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
649
{
2745 decky 650
	unsigned int i;
759 palkovsky 651
	slab_magazine_t *mag;
652
	count_t frames = 0;
781 palkovsky 653
	int magcount;
759 palkovsky 654
 
655
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
656
		return 0; /* Nothing to do */
781 palkovsky 657
 
658
	/* We count up to original magazine count to avoid
659
	 * endless loop 
660
	 */
661
	magcount = atomic_get(&cache->magazine_counter);
3180 jermar 662
	while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
781 palkovsky 663
		frames += magazine_destroy(cache,mag);
664
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
665
			break;
769 palkovsky 666
	}
759 palkovsky 667
 
668
	if (flags & SLAB_RECLAIM_ALL) {
781 palkovsky 669
		/* Free cpu-bound magazines */
759 palkovsky 670
		/* Destroy CPU magazines */
2745 decky 671
		for (i = 0; i < config.cpu_count; i++) {
781 palkovsky 672
			spinlock_lock(&cache->mag_cache[i].lock);
673
 
759 palkovsky 674
			mag = cache->mag_cache[i].current;
675
			if (mag)
676
				frames += magazine_destroy(cache, mag);
677
			cache->mag_cache[i].current = NULL;
678
 
679
			mag = cache->mag_cache[i].last;
680
			if (mag)
681
				frames += magazine_destroy(cache, mag);
682
			cache->mag_cache[i].last = NULL;
781 palkovsky 683
 
684
			spinlock_unlock(&cache->mag_cache[i].lock);
759 palkovsky 685
		}
686
	}
767 palkovsky 687
 
759 palkovsky 688
	return frames;
689
}
690
 
691
/** Check that there are no slabs and remove cache from system  */
692
void slab_cache_destroy(slab_cache_t *cache)
693
{
781 palkovsky 694
	ipl_t ipl;
695
 
696
	/* First remove cache from link, so that we don't need
697
	 * to disable interrupts later
698
	 */
699
 
700
	ipl = interrupts_disable();
701
	spinlock_lock(&slab_cache_lock);
702
 
703
	list_remove(&cache->link);
704
 
705
	spinlock_unlock(&slab_cache_lock);
706
	interrupts_restore(ipl);
707
 
759 palkovsky 708
	/* Do not lock anything, we assume the software is correct and
709
	 * does not touch the cache when it decides to destroy it */
710
 
711
	/* Destroy all magazines */
712
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
713
 
714
	/* All slabs must be empty */
3180 jermar 715
	if (!list_empty(&cache->full_slabs) ||
716
	    !list_empty(&cache->partial_slabs))
759 palkovsky 717
		panic("Destroying cache that is not empty.");
718
 
789 palkovsky 719
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
822 palkovsky 720
		free(cache->mag_cache);
769 palkovsky 721
	slab_free(&slab_cache_cache, cache);
759 palkovsky 722
}
723
 
3180 jermar 724
/** Allocate new object from cache - if no flags given, always returns memory */
725
void *slab_alloc(slab_cache_t *cache, int flags)
759 palkovsky 726
{
727
	ipl_t ipl;
728
	void *result = NULL;
773 palkovsky 729
 
759 palkovsky 730
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
731
	ipl = interrupts_disable();
771 palkovsky 732
 
814 palkovsky 733
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
759 palkovsky 734
		result = magazine_obj_get(cache);
814 palkovsky 735
	}
776 palkovsky 736
	if (!result)
759 palkovsky 737
		result = slab_obj_create(cache, flags);
738
 
769 palkovsky 739
	interrupts_restore(ipl);
740
 
764 palkovsky 741
	if (result)
742
		atomic_inc(&cache->allocated_objs);
743
 
759 palkovsky 744
	return result;
745
}
746
 
771 palkovsky 747
/** Return object to cache, use slab if known  */
748
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 749
{
750
	ipl_t ipl;
751
 
752
	ipl = interrupts_disable();
753
 
3180 jermar 754
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
755
	    magazine_obj_put(cache, obj)) {
771 palkovsky 756
		slab_obj_destroy(cache, obj, slab);
776 palkovsky 757
 
759 palkovsky 758
	}
769 palkovsky 759
	interrupts_restore(ipl);
764 palkovsky 760
	atomic_dec(&cache->allocated_objs);
759 palkovsky 761
}
762
 
771 palkovsky 763
/** Return slab object to cache */
764
void slab_free(slab_cache_t *cache, void *obj)
765
{
2124 decky 766
	_slab_free(cache, obj, NULL);
771 palkovsky 767
}
768
 
759 palkovsky 769
/* Go through all caches and reclaim what is possible */
770
count_t slab_reclaim(int flags)
771
{
772
	slab_cache_t *cache;
773
	link_t *cur;
774
	count_t frames = 0;
775
 
776
	spinlock_lock(&slab_cache_lock);
777
 
776 palkovsky 778
	/* TODO: Add assert, that interrupts are disabled, otherwise
779
	 * memory allocation from interrupts can deadlock.
780
	 */
781
 
3180 jermar 782
	for (cur = slab_cache_list.next; cur != &slab_cache_list;
783
	    cur = cur->next) {
759 palkovsky 784
		cache = list_get_instance(cur, slab_cache_t, link);
785
		frames += _slab_reclaim(cache, flags);
786
	}
787
 
788
	spinlock_unlock(&slab_cache_lock);
789
 
790
	return frames;
791
}
792
 
793
 
794
/* Print list of slabs */
795
void slab_print_list(void)
796
{
3183 jermar 797
	int skip = 0;
798
 
3180 jermar 799
	printf("slab name        size     pages  obj/pg slabs  cached allocated"
800
	    " ctl\n");
801
	printf("---------------- -------- ------ ------ ------ ------ ---------"
802
	    " ---\n");
3183 jermar 803
 
804
	while (true) {
805
		slab_cache_t *cache;
806
		link_t *cur;
807
		ipl_t ipl;
808
		int i;
809
 
810
		/*
811
		 * We must not hold the slab_cache_lock spinlock when printing
812
		 * the statistics. Otherwise we can easily deadlock if the print
813
		 * needs to allocate memory.
814
		 *
815
		 * Therefore, we walk through the slab cache list, skipping some
816
		 * amount of already processed caches during each iteration and
817
		 * gathering statistics about the first unprocessed cache. For
818
		 * the sake of printing the statistics, we realese the
819
		 * slab_cache_lock and reacquire it afterwards. Then the walk
820
		 * starts again.
821
		 *
822
		 * This limits both the efficiency and also accuracy of the
823
		 * obtained statistics. The efficiency is decreased because the
824
		 * time complexity of the algorithm is quadratic instead of
825
		 * linear. The accuracy is impacted because we drop the lock
826
		 * after processing one cache. If there is someone else
827
		 * manipulating the cache list, we might omit an arbitrary
828
		 * number of caches or process one cache multiple times.
829
		 * However, we don't bleed for this algorithm for it is only
830
		 * statistics.
831
		 */
832
 
833
		ipl = interrupts_disable();
834
		spinlock_lock(&slab_cache_lock);
835
 
836
		for (i = 0, cur = slab_cache_list.next;
837
		    i < skip && cur != &slab_cache_list;
838
		    i++, cur = cur->next)
839
			;
840
 
841
		if (cur == &slab_cache_list) {
842
			spinlock_unlock(&slab_cache_lock);
843
			interrupts_restore(ipl);
844
			break;
845
		}
846
 
847
		skip++;
848
 
759 palkovsky 849
		cache = list_get_instance(cur, slab_cache_t, link);
3183 jermar 850
 
851
		char *name = cache->name;
852
		uint8_t order = cache->order;
853
		size_t size = cache->size;
854
		unsigned int objects = cache->objects;
855
		long allocated_slabs = atomic_get(&cache->allocated_slabs);
856
		long cached_objs = atomic_get(&cache->cached_objs);
857
		long allocated_objs = atomic_get(&cache->allocated_objs);
858
		int flags = cache->flags;
2052 decky 859
 
3183 jermar 860
		spinlock_unlock(&slab_cache_lock);
861
		interrupts_restore(ipl);
862
 
3057 decky 863
		printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
3183 jermar 864
		    name, size, (1 << order), objects, allocated_slabs,
865
		    cached_objs, allocated_objs,
866
		    flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
759 palkovsky 867
	}
868
}
869
 
870
void slab_cache_init(void)
871
{
771 palkovsky 872
	int i, size;
873
 
759 palkovsky 874
	/* Initialize magazine cache */
3180 jermar 875
	_slab_cache_create(&mag_cache, "slab_magazine",
876
	    sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
877
	    sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
878
	    SLAB_CACHE_SLINSIDE);
769 palkovsky 879
	/* Initialize slab_cache cache */
3180 jermar 880
	_slab_cache_create(&slab_cache_cache, "slab_cache",
881
	    sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
882
	    SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
769 palkovsky 883
	/* Initialize external slab cache */
3180 jermar 884
	slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
885
	    NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
759 palkovsky 886
 
887
	/* Initialize structures for malloc */
3180 jermar 888
	for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
889
	    i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
890
	    i++, size <<= 1) {
891
		malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
892
		    NULL, NULL, SLAB_CACHE_MAGDEFERRED);
771 palkovsky 893
	}
778 palkovsky 894
#ifdef CONFIG_DEBUG       
895
	_slab_initialized = 1;
896
#endif
759 palkovsky 897
}
771 palkovsky 898
 
789 palkovsky 899
/** Enable cpu_cache
900
 *
901
 * Kernel calls this function, when it knows the real number of
902
 * processors. 
903
 * Allocate slab for cpucache and enable it on all existing
904
 * slabs that are SLAB_CACHE_MAGDEFERRED
905
 */
906
void slab_enable_cpucache(void)
907
{
908
	link_t *cur;
909
	slab_cache_t *s;
910
 
791 palkovsky 911
#ifdef CONFIG_DEBUG
912
	_slab_initialized = 2;
913
#endif
914
 
789 palkovsky 915
	spinlock_lock(&slab_cache_lock);
916
 
3180 jermar 917
	for (cur = slab_cache_list.next; cur != &slab_cache_list;
918
	    cur = cur->next){
789 palkovsky 919
		s = list_get_instance(cur, slab_cache_t, link);
3180 jermar 920
		if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
921
		    SLAB_CACHE_MAGDEFERRED)
789 palkovsky 922
			continue;
923
		make_magcache(s);
924
		s->flags &= ~SLAB_CACHE_MAGDEFERRED;
925
	}
926
 
927
	spinlock_unlock(&slab_cache_lock);
928
}
929
 
771 palkovsky 930
/**************************************/
931
/* kalloc/kfree functions             */
3180 jermar 932
void *malloc(unsigned int size, int flags)
771 palkovsky 933
{
778 palkovsky 934
	ASSERT(_slab_initialized);
1288 jermar 935
	ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
771 palkovsky 936
 
937
	if (size < (1 << SLAB_MIN_MALLOC_W))
938
		size = (1 << SLAB_MIN_MALLOC_W);
939
 
2124 decky 940
	int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
771 palkovsky 941
 
942
	return slab_alloc(malloc_caches[idx], flags);
943
}
944
 
3180 jermar 945
void *realloc(void *ptr, unsigned int size, int flags)
771 palkovsky 946
{
2124 decky 947
	ASSERT(_slab_initialized);
948
	ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
949
 
950
	void *new_ptr;
951
 
952
	if (size > 0) {
953
		if (size < (1 << SLAB_MIN_MALLOC_W))
954
			size = (1 << SLAB_MIN_MALLOC_W);
955
		int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
956
 
957
		new_ptr = slab_alloc(malloc_caches[idx], flags);
958
	} else
959
		new_ptr = NULL;
960
 
961
	if ((new_ptr != NULL) && (ptr != NULL)) {
962
		slab_t *slab = obj2slab(ptr);
963
		memcpy(new_ptr, ptr, min(size, slab->cache->size));
964
	}
965
 
966
	if (ptr != NULL)
967
		free(ptr);
968
 
969
	return new_ptr;
970
}
781 palkovsky 971
 
2124 decky 972
void free(void *ptr)
973
{
974
	if (!ptr)
1950 jermar 975
		return;
781 palkovsky 976
 
2124 decky 977
	slab_t *slab = obj2slab(ptr);
978
	_slab_free(slab->cache, ptr, slab);
771 palkovsky 979
}
1702 cejka 980
 
1757 jermar 981
/** @}
1702 cejka 982
 */