Subversion Repositories HelenOS-historic

Rev

Rev 771 | Rev 773 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
30
 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately 
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling. 
46
 *
47
 * When a new object is being allocated, it is first checked, if it is 
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated. 
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails, 
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible. 
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines). 
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab 
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
79
 * 
80
 */
81
 
82
 
759 palkovsky 83
#include <synch/spinlock.h>
84
#include <mm/slab.h>
85
#include <list.h>
86
#include <memstr.h>
87
#include <align.h>
88
#include <mm/heap.h>
762 palkovsky 89
#include <mm/frame.h>
759 palkovsky 90
#include <config.h>
91
#include <print.h>
92
#include <arch.h>
93
#include <panic.h>
762 palkovsky 94
#include <debug.h>
771 palkovsky 95
#include <bitops.h>
759 palkovsky 96
 
97
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 98
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 99
 
769 palkovsky 100
/** Magazine cache */
101
static slab_cache_t mag_cache;
102
/** Cache for cache descriptors */
103
static slab_cache_t slab_cache_cache;
759 palkovsky 104
 
769 palkovsky 105
/** Cache for external slab descriptors
106
 * This time we want per-cpu cache, so do not make it static
107
 * - using SLAB for internal SLAB structures will not deadlock,
108
 *   as all slab structures are 'small' - control structures of
109
 *   their caches do not require further allocation
110
 */
111
static slab_cache_t *slab_extern_cache;
771 palkovsky 112
/** Caches for malloc */
113
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
114
char *malloc_names[] =  {
115
	"malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
116
	"malloc-256","malloc-512","malloc-1K","malloc-2K",
117
	"malloc-4K","malloc-8K","malloc-16K","malloc-32K",
118
	"malloc-64K","malloc-128K"
119
};
762 palkovsky 120
 
769 palkovsky 121
/** Slab descriptor */
762 palkovsky 122
typedef struct {
123
	slab_cache_t *cache; /**< Pointer to parent cache */
124
	link_t link;       /* List of full/partial slabs */
125
	void *start;       /**< Start address of first available item */
126
	count_t available; /**< Count of available items in this slab */
127
	index_t nextavail; /**< The index of next available item */
128
}slab_t;
129
 
759 palkovsky 130
/**************************************/
762 palkovsky 131
/* SLAB allocation functions          */
759 palkovsky 132
 
762 palkovsky 133
/**
134
 * Allocate frames for slab space and initialize
135
 *
136
 */
137
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
138
{
139
	void *data;
140
	slab_t *slab;
141
	size_t fsize;
142
	int i;
143
	zone_t *zone = NULL;
144
	int status;
764 palkovsky 145
	frame_t *frame;
759 palkovsky 146
 
762 palkovsky 147
	data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
764 palkovsky 148
	if (status != FRAME_OK) {
762 palkovsky 149
		return NULL;
764 palkovsky 150
	}
768 palkovsky 151
	if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 152
		slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 153
		if (!slab) {
154
			frame_free((__address)data);
155
			return NULL;
156
		}
157
	} else {
158
		fsize = (PAGE_SIZE << cache->order);
159
		slab = data + fsize - sizeof(*slab);
160
	}
764 palkovsky 161
 
762 palkovsky 162
	/* Fill in slab structures */
763 jermar 163
	/* TODO: some better way of accessing the frame */
766 palkovsky 164
	for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 165
		frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
166
		frame->parent = slab;
762 palkovsky 167
	}
168
 
169
	slab->start = data;
170
	slab->available = cache->objects;
171
	slab->nextavail = 0;
767 palkovsky 172
	slab->cache = cache;
762 palkovsky 173
 
174
	for (i=0; i<cache->objects;i++)
175
		*((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 176
 
177
	atomic_inc(&cache->allocated_slabs);
762 palkovsky 178
	return slab;
179
}
180
 
759 palkovsky 181
/**
766 palkovsky 182
 * Deallocate space associated with SLAB
762 palkovsky 183
 *
184
 * @return number of freed frames
185
 */
186
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
187
{
188
	frame_free((__address)slab->start);
768 palkovsky 189
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 190
		slab_free(slab_extern_cache, slab);
764 palkovsky 191
 
192
	atomic_dec(&cache->allocated_slabs);
193
 
762 palkovsky 194
	return 1 << cache->order;
195
}
196
 
197
/** Map object to slab structure */
198
static slab_t * obj2slab(void *obj)
199
{
200
	frame_t *frame; 
201
 
202
	frame = frame_addr2frame((__address)obj);
203
	return (slab_t *)frame->parent;
204
}
205
 
206
/**************************************/
207
/* SLAB functions */
208
 
209
 
210
/**
759 palkovsky 211
 * Return object to slab and call a destructor
212
 *
762 palkovsky 213
 * Assume the cache->lock is held;
214
 *
215
 * @param slab If the caller knows directly slab of the object, otherwise NULL
216
 *
759 palkovsky 217
 * @return Number of freed pages
218
 */
762 palkovsky 219
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
220
				slab_t *slab)
759 palkovsky 221
{
762 palkovsky 222
	count_t frames = 0;
223
 
224
	if (!slab)
225
		slab = obj2slab(obj);
226
 
767 palkovsky 227
	ASSERT(slab->cache == cache);
228
 
762 palkovsky 229
	*((int *)obj) = slab->nextavail;
230
	slab->nextavail = (obj - slab->start)/cache->size;
231
	slab->available++;
232
 
233
	/* Move it to correct list */
234
	if (slab->available == 1) {
235
		/* It was in full, move to partial */
236
		list_remove(&slab->link);
764 palkovsky 237
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 238
	}
239
	if (slab->available == cache->objects) {
240
		/* Free associated memory */
241
		list_remove(&slab->link);
242
		/* Avoid deadlock */
243
		spinlock_unlock(&cache->lock);
244
		frames = slab_space_free(cache, slab);
245
		spinlock_lock(&cache->lock);
246
	}
247
 
248
	return frames;
759 palkovsky 249
}
250
 
251
/**
252
 * Take new object from slab or create new if needed
253
 *
762 palkovsky 254
 * Assume cache->lock is held. 
255
 *
759 palkovsky 256
 * @return Object address or null
257
 */
258
static void * slab_obj_create(slab_cache_t *cache, int flags)
259
{
762 palkovsky 260
	slab_t *slab;
261
	void *obj;
262
 
263
	if (list_empty(&cache->partial_slabs)) {
264
		/* Allow recursion and reclaiming
265
		 * - this should work, as the SLAB control structures
266
		 *   are small and do not need to allocte with anything
267
		 *   other ten frame_alloc when they are allocating,
268
		 *   that's why we should get recursion at most 1-level deep
269
		 */
270
		spinlock_unlock(&cache->lock);
271
		slab = slab_space_alloc(cache, flags);
272
		spinlock_lock(&cache->lock);
764 palkovsky 273
		if (!slab) {
762 palkovsky 274
			return NULL;
764 palkovsky 275
		}
762 palkovsky 276
	} else {
277
		slab = list_get_instance(cache->partial_slabs.next,
278
					 slab_t,
279
					 link);
280
		list_remove(&slab->link);
281
	}
282
	obj = slab->start + slab->nextavail * cache->size;
283
	slab->nextavail = *((int *)obj);
284
	slab->available--;
285
	if (! slab->available)
764 palkovsky 286
		list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 287
	else
764 palkovsky 288
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 289
	return obj;
759 palkovsky 290
}
291
 
292
/**************************************/
293
/* CPU-Cache slab functions */
294
 
295
/**
296
 * Free all objects in magazine and free memory associated with magazine
297
 *
762 palkovsky 298
 * Assume mag_cache[cpu].lock is locked 
759 palkovsky 299
 *
300
 * @return Number of freed pages
301
 */
302
static count_t magazine_destroy(slab_cache_t *cache, 
303
				slab_magazine_t *mag)
304
{
305
	int i;
306
	count_t frames = 0;
307
 
767 palkovsky 308
	for (i=0;i < mag->busy; i++) {
762 palkovsky 309
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 310
		atomic_dec(&cache->cached_objs);
311
	}
759 palkovsky 312
 
313
	slab_free(&mag_cache, mag);
314
 
315
	return frames;
316
}
317
 
318
/**
769 palkovsky 319
 * Find full magazine, set it as current and return it
320
 *
321
 * Assume cpu_magazine lock is held
322
 */
323
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
324
{
325
	slab_magazine_t *cmag, *lastmag, *newmag;
326
 
327
	cmag = cache->mag_cache[CPU->id].current;
328
	lastmag = cache->mag_cache[CPU->id].last;
329
	if (cmag) { /* First try local CPU magazines */
330
		if (cmag->busy)
331
			return cmag;
332
 
333
		if (lastmag && lastmag->busy) {
334
			cache->mag_cache[CPU->id].current = lastmag;
335
			cache->mag_cache[CPU->id].last = cmag;
336
			return lastmag;
337
		}
338
	}
339
	/* Local magazines are empty, import one from magazine list */
340
	spinlock_lock(&cache->lock);
341
	if (list_empty(&cache->magazines)) {
342
		spinlock_unlock(&cache->lock);
343
		return NULL;
344
	}
345
	newmag = list_get_instance(cache->magazines.next,
346
				   slab_magazine_t,
347
				   link);
348
	list_remove(&newmag->link);
349
	spinlock_unlock(&cache->lock);
350
 
351
	if (lastmag)
352
		slab_free(&mag_cache, lastmag);
353
	cache->mag_cache[CPU->id].last = cmag;
354
	cache->mag_cache[CPU->id].current = newmag;
355
	return newmag;
356
}
357
 
358
/**
759 palkovsky 359
 * Try to find object in CPU-cache magazines
360
 *
361
 * @return Pointer to object or NULL if not available
362
 */
363
static void * magazine_obj_get(slab_cache_t *cache)
364
{
365
	slab_magazine_t *mag;
767 palkovsky 366
	void *obj;
759 palkovsky 367
 
772 palkovsky 368
	if (!CPU)
369
		return NULL;
370
 
759 palkovsky 371
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
372
 
769 palkovsky 373
	mag = get_full_current_mag(cache);
374
	if (!mag) {
375
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
376
		return NULL;
759 palkovsky 377
	}
767 palkovsky 378
	obj = mag->objs[--mag->busy];
759 palkovsky 379
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 380
	atomic_dec(&cache->cached_objs);
381
 
382
	return obj;
759 palkovsky 383
}
384
 
385
/**
768 palkovsky 386
 * Assure that the current magazine is empty, return pointer to it, or NULL if 
769 palkovsky 387
 * no empty magazine is available and cannot be allocated
759 palkovsky 388
 *
389
 * We have 2 magazines bound to processor. 
390
 * First try the current. 
391
 *  If full, try the last.
392
 *   If full, put to magazines list.
393
 *   allocate new, exchange last & current
394
 *
768 palkovsky 395
 */
396
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
397
{
398
	slab_magazine_t *cmag,*lastmag,*newmag;
399
 
400
	cmag = cache->mag_cache[CPU->id].current;
401
	lastmag = cache->mag_cache[CPU->id].last;
402
 
403
	if (cmag) {
404
		if (cmag->busy < cmag->size)
405
			return cmag;
406
		if (lastmag && lastmag->busy < lastmag->size) {
407
			cache->mag_cache[CPU->id].last = cmag;
408
			cache->mag_cache[CPU->id].current = lastmag;
409
			return lastmag;
410
		}
411
	}
412
	/* current | last are full | nonexistent, allocate new */
413
	/* We do not want to sleep just because of caching */
414
	/* Especially we do not want reclaiming to start, as 
415
	 * this would deadlock */
416
	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
417
	if (!newmag)
418
		return NULL;
419
	newmag->size = SLAB_MAG_SIZE;
420
	newmag->busy = 0;
421
 
422
	/* Flush last to magazine list */
423
	if (lastmag)
424
		list_prepend(&lastmag->link, &cache->magazines);
425
	/* Move current as last, save new as current */
426
	cache->mag_cache[CPU->id].last = cmag;	
427
	cache->mag_cache[CPU->id].current = newmag;	
428
 
429
	return newmag;
430
}
431
 
432
/**
433
 * Put object into CPU-cache magazine
434
 *
759 palkovsky 435
 * @return 0 - success, -1 - could not get memory
436
 */
437
static int magazine_obj_put(slab_cache_t *cache, void *obj)
438
{
439
	slab_magazine_t *mag;
440
 
772 palkovsky 441
	if (!CPU)
442
		return -1;
443
 
759 palkovsky 444
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 445
 
446
	mag = make_empty_current_mag(cache);
769 palkovsky 447
	if (!mag) {
448
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
449
		return -1;
450
	}
759 palkovsky 451
 
452
	mag->objs[mag->busy++] = obj;
453
 
454
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 455
	atomic_inc(&cache->cached_objs);
759 palkovsky 456
	return 0;
457
}
458
 
459
 
460
/**************************************/
762 palkovsky 461
/* SLAB CACHE functions */
759 palkovsky 462
 
762 palkovsky 463
/** Return number of objects that fit in certain cache size */
464
static int comp_objects(slab_cache_t *cache)
465
{
466
	if (cache->flags & SLAB_CACHE_SLINSIDE)
467
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
468
	else 
469
		return (PAGE_SIZE << cache->order) / cache->size;
470
}
471
 
472
/** Return wasted space in slab */
473
static int badness(slab_cache_t *cache)
474
{
475
	int objects;
476
	int ssize;
477
 
478
	objects = comp_objects(cache);
479
	ssize = PAGE_SIZE << cache->order;
480
	if (cache->flags & SLAB_CACHE_SLINSIDE)
481
		ssize -= sizeof(slab_t);
482
	return ssize - objects*cache->size;
483
}
484
 
759 palkovsky 485
/** Initialize allocated memory as a slab cache */
486
static void
487
_slab_cache_create(slab_cache_t *cache,
488
		   char *name,
489
		   size_t size,
490
		   size_t align,
491
		   int (*constructor)(void *obj, int kmflag),
492
		   void (*destructor)(void *obj),
493
		   int flags)
494
{
495
	int i;
771 palkovsky 496
	int pages;
759 palkovsky 497
 
498
	memsetb((__address)cache, sizeof(*cache), 0);
499
	cache->name = name;
500
 
766 palkovsky 501
	if (align < sizeof(__native))
502
		align = sizeof(__native);
503
	size = ALIGN_UP(size, align);
504
 
762 palkovsky 505
	cache->size = size;
759 palkovsky 506
 
507
	cache->constructor = constructor;
508
	cache->destructor = destructor;
509
	cache->flags = flags;
510
 
511
	list_initialize(&cache->full_slabs);
512
	list_initialize(&cache->partial_slabs);
513
	list_initialize(&cache->magazines);
514
	spinlock_initialize(&cache->lock, "cachelock");
768 palkovsky 515
	if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
772 palkovsky 516
		for (i=0; i< config.cpu_count; i++) {
517
			memsetb((__address)&cache->mag_cache[i],
518
				sizeof(cache->mag_cache[i]), 0);
759 palkovsky 519
			spinlock_initialize(&cache->mag_cache[i].lock, 
520
					    "cpucachelock");
772 palkovsky 521
		}
759 palkovsky 522
	}
523
 
524
	/* Compute slab sizes, object counts in slabs etc. */
525
	if (cache->size < SLAB_INSIDE_SIZE)
526
		cache->flags |= SLAB_CACHE_SLINSIDE;
527
 
762 palkovsky 528
	/* Minimum slab order */
771 palkovsky 529
	pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
530
	cache->order = fnzb(pages);
766 palkovsky 531
 
762 palkovsky 532
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
533
		cache->order += 1;
534
	}
535
	cache->objects = comp_objects(cache);
766 palkovsky 536
	/* If info fits in, put it inside */
537
	if (badness(cache) > sizeof(slab_t))
538
		cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 539
 
759 palkovsky 540
	spinlock_lock(&slab_cache_lock);
541
 
542
	list_append(&cache->link, &slab_cache_list);
543
 
544
	spinlock_unlock(&slab_cache_lock);
545
}
546
 
547
/** Create slab cache  */
548
slab_cache_t * slab_cache_create(char *name,
549
				 size_t size,
550
				 size_t align,
551
				 int (*constructor)(void *obj, int kmflag),
552
				 void (*destructor)(void *obj),
553
				 int flags)
554
{
555
	slab_cache_t *cache;
556
 
769 palkovsky 557
	cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 558
	_slab_cache_create(cache, name, size, align, constructor, destructor,
559
			   flags);
560
	return cache;
561
}
562
 
563
/** 
564
 * Reclaim space occupied by objects that are already free
565
 *
566
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
567
 * @return Number of freed pages
568
 */
569
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
570
{
571
	int i;
572
	slab_magazine_t *mag;
573
	link_t *cur;
574
	count_t frames = 0;
575
 
576
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
577
		return 0; /* Nothing to do */
578
 
579
	/* First lock all cpu caches, then the complete cache lock */
769 palkovsky 580
	if (flags & SLAB_RECLAIM_ALL) {
581
		for (i=0; i < config.cpu_count; i++)
582
			spinlock_lock(&cache->mag_cache[i].lock);
583
	}
759 palkovsky 584
	spinlock_lock(&cache->lock);
585
 
586
	if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 587
		/* Aggressive memfree */
759 palkovsky 588
		/* Destroy CPU magazines */
589
		for (i=0; i<config.cpu_count; i++) {
590
			mag = cache->mag_cache[i].current;
591
			if (mag)
592
				frames += magazine_destroy(cache, mag);
593
			cache->mag_cache[i].current = NULL;
594
 
595
			mag = cache->mag_cache[i].last;
596
			if (mag)
597
				frames += magazine_destroy(cache, mag);
598
			cache->mag_cache[i].last = NULL;
599
		}
600
	}
762 palkovsky 601
	/* Destroy full magazines */
602
	cur=cache->magazines.prev;
767 palkovsky 603
 
768 palkovsky 604
	while (cur != &cache->magazines) {
762 palkovsky 605
		mag = list_get_instance(cur, slab_magazine_t, link);
606
 
607
		cur = cur->prev;
768 palkovsky 608
		list_remove(&mag->link);
762 palkovsky 609
		frames += magazine_destroy(cache,mag);
610
		/* If we do not do full reclaim, break
611
		 * as soon as something is freed */
612
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
613
			break;
614
	}
759 palkovsky 615
 
616
	spinlock_unlock(&cache->lock);
769 palkovsky 617
	if (flags & SLAB_RECLAIM_ALL) {
618
		for (i=0; i < config.cpu_count; i++)
619
			spinlock_unlock(&cache->mag_cache[i].lock);
620
	}
759 palkovsky 621
 
622
	return frames;
623
}
624
 
625
/** Check that there are no slabs and remove cache from system  */
626
void slab_cache_destroy(slab_cache_t *cache)
627
{
628
	/* Do not lock anything, we assume the software is correct and
629
	 * does not touch the cache when it decides to destroy it */
630
 
631
	/* Destroy all magazines */
632
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
633
 
634
	/* All slabs must be empty */
635
	if (!list_empty(&cache->full_slabs) \
636
	    || !list_empty(&cache->partial_slabs))
637
		panic("Destroying cache that is not empty.");
638
 
639
	spinlock_lock(&slab_cache_lock);
640
	list_remove(&cache->link);
641
	spinlock_unlock(&slab_cache_lock);
642
 
769 palkovsky 643
	slab_free(&slab_cache_cache, cache);
759 palkovsky 644
}
645
 
646
/** Allocate new object from cache - if no flags given, always returns 
647
    memory */
648
void * slab_alloc(slab_cache_t *cache, int flags)
649
{
650
	ipl_t ipl;
651
	void *result = NULL;
652
 
653
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
654
	ipl = interrupts_disable();
771 palkovsky 655
 
772 palkovsky 656
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
759 palkovsky 657
		result = magazine_obj_get(cache);
658
 
762 palkovsky 659
	if (!result) {
660
		spinlock_lock(&cache->lock);
759 palkovsky 661
		result = slab_obj_create(cache, flags);
762 palkovsky 662
		spinlock_unlock(&cache->lock);
663
	}
759 palkovsky 664
 
769 palkovsky 665
	interrupts_restore(ipl);
666
 
764 palkovsky 667
	if (result)
668
		atomic_inc(&cache->allocated_objs);
669
 
759 palkovsky 670
	return result;
671
}
672
 
771 palkovsky 673
/** Return object to cache, use slab if known  */
674
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 675
{
676
	ipl_t ipl;
677
 
678
	ipl = interrupts_disable();
679
 
762 palkovsky 680
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
681
	    || magazine_obj_put(cache, obj)) {
682
		spinlock_lock(&cache->lock);
771 palkovsky 683
		slab_obj_destroy(cache, obj, slab);
762 palkovsky 684
		spinlock_unlock(&cache->lock);
759 palkovsky 685
	}
769 palkovsky 686
	interrupts_restore(ipl);
764 palkovsky 687
	atomic_dec(&cache->allocated_objs);
759 palkovsky 688
}
689
 
771 palkovsky 690
/** Return slab object to cache */
691
void slab_free(slab_cache_t *cache, void *obj)
692
{
693
	_slab_free(cache,obj,NULL);
694
}
695
 
759 palkovsky 696
/* Go through all caches and reclaim what is possible */
697
count_t slab_reclaim(int flags)
698
{
699
	slab_cache_t *cache;
700
	link_t *cur;
701
	count_t frames = 0;
702
 
703
	spinlock_lock(&slab_cache_lock);
704
 
705
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
706
		cache = list_get_instance(cur, slab_cache_t, link);
707
		frames += _slab_reclaim(cache, flags);
708
	}
709
 
710
	spinlock_unlock(&slab_cache_lock);
711
 
712
	return frames;
713
}
714
 
715
 
716
/* Print list of slabs */
717
void slab_print_list(void)
718
{
719
	slab_cache_t *cache;
720
	link_t *cur;
721
 
722
	spinlock_lock(&slab_cache_lock);
767 palkovsky 723
	printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 724
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
725
		cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 726
		printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, 
766 palkovsky 727
		       (1 << cache->order), cache->objects,
767 palkovsky 728
		       atomic_get(&cache->allocated_slabs),
729
		       atomic_get(&cache->cached_objs),
766 palkovsky 730
		       atomic_get(&cache->allocated_objs),
731
		       cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 732
	}
733
	spinlock_unlock(&slab_cache_lock);
734
}
735
 
736
void slab_cache_init(void)
737
{
771 palkovsky 738
	int i, size;
739
 
759 palkovsky 740
	/* Initialize magazine cache */
741
	_slab_cache_create(&mag_cache,
742
			   "slab_magazine",
743
			   sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
744
			   sizeof(__address),
745
			   NULL, NULL,
769 palkovsky 746
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
747
	/* Initialize slab_cache cache */
748
	_slab_cache_create(&slab_cache_cache,
749
			   "slab_cache",
750
			   sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
751
			   sizeof(__address),
752
			   NULL, NULL,
753
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
754
	/* Initialize external slab cache */
755
	slab_extern_cache = slab_cache_create("slab_extern",
756
					      sizeof(slab_t),
757
					      0, NULL, NULL,
758
					      SLAB_CACHE_SLINSIDE);
759 palkovsky 759
 
760
	/* Initialize structures for malloc */
771 palkovsky 761
	for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
762
	     i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
763
	     i++, size <<= 1) {
764
		malloc_caches[i] = slab_cache_create(malloc_names[i],
765
						     size, 0,
766
						     NULL,NULL,0);
767
	}
759 palkovsky 768
}
771 palkovsky 769
 
770
/**************************************/
771
/* kalloc/kfree functions             */
772
void * kalloc(unsigned int size, int flags)
773
{
774
	int idx;
775
 
776
	ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
777
 
778
	if (size < (1 << SLAB_MIN_MALLOC_W))
779
		size = (1 << SLAB_MIN_MALLOC_W);
780
 
781
	idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
782
 
783
	return slab_alloc(malloc_caches[idx], flags);
784
}
785
 
786
 
787
void kfree(void *obj)
788
{
789
	slab_t *slab = obj2slab(obj);
790
 
791
	_slab_free(slab->cache, obj, slab);
792
}