Subversion Repositories HelenOS-historic

Rev

Rev 773 | Rev 776 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
30
 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately 
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling. 
46
 *
47
 * When a new object is being allocated, it is first checked, if it is 
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated. 
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails, 
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible. 
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines). 
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab 
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
775 palkovsky 79
 * TODO: For better CPU-scaling the magazine allocation strategy should
80
 * be extended. Currently, if the cache does not have magazine, it asks
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84
 * buffer. The other possibility is to use the per-cache 
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
86
 * magazine cache.
87
 *
769 palkovsky 88
 */
89
 
90
 
759 palkovsky 91
#include <synch/spinlock.h>
92
#include <mm/slab.h>
93
#include <list.h>
94
#include <memstr.h>
95
#include <align.h>
96
#include <mm/heap.h>
762 palkovsky 97
#include <mm/frame.h>
759 palkovsky 98
#include <config.h>
99
#include <print.h>
100
#include <arch.h>
101
#include <panic.h>
762 palkovsky 102
#include <debug.h>
771 palkovsky 103
#include <bitops.h>
759 palkovsky 104
 
105
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 106
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 107
 
769 palkovsky 108
/** Magazine cache */
109
static slab_cache_t mag_cache;
110
/** Cache for cache descriptors */
111
static slab_cache_t slab_cache_cache;
759 palkovsky 112
 
769 palkovsky 113
/** Cache for external slab descriptors
114
 * This time we want per-cpu cache, so do not make it static
115
 * - using SLAB for internal SLAB structures will not deadlock,
116
 *   as all slab structures are 'small' - control structures of
117
 *   their caches do not require further allocation
118
 */
119
static slab_cache_t *slab_extern_cache;
771 palkovsky 120
/** Caches for malloc */
121
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
122
char *malloc_names[] =  {
123
	"malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
124
	"malloc-256","malloc-512","malloc-1K","malloc-2K",
125
	"malloc-4K","malloc-8K","malloc-16K","malloc-32K",
126
	"malloc-64K","malloc-128K"
127
};
762 palkovsky 128
 
769 palkovsky 129
/** Slab descriptor */
762 palkovsky 130
typedef struct {
131
	slab_cache_t *cache; /**< Pointer to parent cache */
132
	link_t link;       /* List of full/partial slabs */
133
	void *start;       /**< Start address of first available item */
134
	count_t available; /**< Count of available items in this slab */
135
	index_t nextavail; /**< The index of next available item */
136
}slab_t;
137
 
759 palkovsky 138
/**************************************/
762 palkovsky 139
/* SLAB allocation functions          */
759 palkovsky 140
 
762 palkovsky 141
/**
142
 * Allocate frames for slab space and initialize
143
 *
144
 */
145
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
146
{
147
	void *data;
148
	slab_t *slab;
149
	size_t fsize;
150
	int i;
151
	zone_t *zone = NULL;
152
	int status;
764 palkovsky 153
	frame_t *frame;
759 palkovsky 154
 
762 palkovsky 155
	data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
764 palkovsky 156
	if (status != FRAME_OK) {
762 palkovsky 157
		return NULL;
764 palkovsky 158
	}
768 palkovsky 159
	if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 160
		slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 161
		if (!slab) {
162
			frame_free((__address)data);
163
			return NULL;
164
		}
165
	} else {
166
		fsize = (PAGE_SIZE << cache->order);
167
		slab = data + fsize - sizeof(*slab);
168
	}
764 palkovsky 169
 
762 palkovsky 170
	/* Fill in slab structures */
763 jermar 171
	/* TODO: some better way of accessing the frame */
766 palkovsky 172
	for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 173
		frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
174
		frame->parent = slab;
762 palkovsky 175
	}
176
 
177
	slab->start = data;
178
	slab->available = cache->objects;
179
	slab->nextavail = 0;
767 palkovsky 180
	slab->cache = cache;
762 palkovsky 181
 
182
	for (i=0; i<cache->objects;i++)
183
		*((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 184
 
185
	atomic_inc(&cache->allocated_slabs);
762 palkovsky 186
	return slab;
187
}
188
 
759 palkovsky 189
/**
766 palkovsky 190
 * Deallocate space associated with SLAB
762 palkovsky 191
 *
192
 * @return number of freed frames
193
 */
194
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
195
{
196
	frame_free((__address)slab->start);
768 palkovsky 197
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 198
		slab_free(slab_extern_cache, slab);
764 palkovsky 199
 
200
	atomic_dec(&cache->allocated_slabs);
201
 
762 palkovsky 202
	return 1 << cache->order;
203
}
204
 
205
/** Map object to slab structure */
206
static slab_t * obj2slab(void *obj)
207
{
208
	frame_t *frame; 
209
 
210
	frame = frame_addr2frame((__address)obj);
211
	return (slab_t *)frame->parent;
212
}
213
 
214
/**************************************/
215
/* SLAB functions */
216
 
217
 
218
/**
759 palkovsky 219
 * Return object to slab and call a destructor
220
 *
762 palkovsky 221
 * Assume the cache->lock is held;
222
 *
223
 * @param slab If the caller knows directly slab of the object, otherwise NULL
224
 *
759 palkovsky 225
 * @return Number of freed pages
226
 */
762 palkovsky 227
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
228
				slab_t *slab)
759 palkovsky 229
{
762 palkovsky 230
	count_t frames = 0;
231
 
232
	if (!slab)
233
		slab = obj2slab(obj);
234
 
767 palkovsky 235
	ASSERT(slab->cache == cache);
236
 
762 palkovsky 237
	*((int *)obj) = slab->nextavail;
238
	slab->nextavail = (obj - slab->start)/cache->size;
239
	slab->available++;
240
 
241
	/* Move it to correct list */
242
	if (slab->available == 1) {
243
		/* It was in full, move to partial */
244
		list_remove(&slab->link);
764 palkovsky 245
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 246
	}
247
	if (slab->available == cache->objects) {
248
		/* Free associated memory */
249
		list_remove(&slab->link);
250
		/* Avoid deadlock */
251
		spinlock_unlock(&cache->lock);
252
		frames = slab_space_free(cache, slab);
253
		spinlock_lock(&cache->lock);
254
	}
255
 
256
	return frames;
759 palkovsky 257
}
258
 
259
/**
260
 * Take new object from slab or create new if needed
261
 *
762 palkovsky 262
 * Assume cache->lock is held. 
263
 *
759 palkovsky 264
 * @return Object address or null
265
 */
266
static void * slab_obj_create(slab_cache_t *cache, int flags)
267
{
762 palkovsky 268
	slab_t *slab;
269
	void *obj;
270
 
271
	if (list_empty(&cache->partial_slabs)) {
272
		/* Allow recursion and reclaiming
273
		 * - this should work, as the SLAB control structures
274
		 *   are small and do not need to allocte with anything
275
		 *   other ten frame_alloc when they are allocating,
276
		 *   that's why we should get recursion at most 1-level deep
277
		 */
278
		spinlock_unlock(&cache->lock);
279
		slab = slab_space_alloc(cache, flags);
280
		spinlock_lock(&cache->lock);
764 palkovsky 281
		if (!slab) {
762 palkovsky 282
			return NULL;
764 palkovsky 283
		}
762 palkovsky 284
	} else {
285
		slab = list_get_instance(cache->partial_slabs.next,
286
					 slab_t,
287
					 link);
288
		list_remove(&slab->link);
289
	}
290
	obj = slab->start + slab->nextavail * cache->size;
291
	slab->nextavail = *((int *)obj);
292
	slab->available--;
293
	if (! slab->available)
764 palkovsky 294
		list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 295
	else
764 palkovsky 296
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 297
	return obj;
759 palkovsky 298
}
299
 
300
/**************************************/
301
/* CPU-Cache slab functions */
302
 
303
/**
304
 * Free all objects in magazine and free memory associated with magazine
305
 *
775 palkovsky 306
 * Assume cache->lock is held
759 palkovsky 307
 *
308
 * @return Number of freed pages
309
 */
310
static count_t magazine_destroy(slab_cache_t *cache, 
311
				slab_magazine_t *mag)
312
{
313
	int i;
314
	count_t frames = 0;
315
 
767 palkovsky 316
	for (i=0;i < mag->busy; i++) {
762 palkovsky 317
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 318
		atomic_dec(&cache->cached_objs);
319
	}
759 palkovsky 320
 
321
	slab_free(&mag_cache, mag);
322
 
323
	return frames;
324
}
325
 
326
/**
769 palkovsky 327
 * Find full magazine, set it as current and return it
328
 *
329
 * Assume cpu_magazine lock is held
330
 */
331
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
332
{
333
	slab_magazine_t *cmag, *lastmag, *newmag;
334
 
335
	cmag = cache->mag_cache[CPU->id].current;
336
	lastmag = cache->mag_cache[CPU->id].last;
337
	if (cmag) { /* First try local CPU magazines */
338
		if (cmag->busy)
339
			return cmag;
340
 
341
		if (lastmag && lastmag->busy) {
342
			cache->mag_cache[CPU->id].current = lastmag;
343
			cache->mag_cache[CPU->id].last = cmag;
344
			return lastmag;
345
		}
346
	}
347
	/* Local magazines are empty, import one from magazine list */
348
	spinlock_lock(&cache->lock);
349
	if (list_empty(&cache->magazines)) {
350
		spinlock_unlock(&cache->lock);
351
		return NULL;
352
	}
353
	newmag = list_get_instance(cache->magazines.next,
354
				   slab_magazine_t,
355
				   link);
356
	list_remove(&newmag->link);
357
	spinlock_unlock(&cache->lock);
358
 
359
	if (lastmag)
360
		slab_free(&mag_cache, lastmag);
361
	cache->mag_cache[CPU->id].last = cmag;
362
	cache->mag_cache[CPU->id].current = newmag;
363
	return newmag;
364
}
365
 
366
/**
759 palkovsky 367
 * Try to find object in CPU-cache magazines
368
 *
369
 * @return Pointer to object or NULL if not available
370
 */
371
static void * magazine_obj_get(slab_cache_t *cache)
372
{
373
	slab_magazine_t *mag;
767 palkovsky 374
	void *obj;
759 palkovsky 375
 
772 palkovsky 376
	if (!CPU)
377
		return NULL;
378
 
759 palkovsky 379
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
380
 
769 palkovsky 381
	mag = get_full_current_mag(cache);
382
	if (!mag) {
383
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
384
		return NULL;
759 palkovsky 385
	}
767 palkovsky 386
	obj = mag->objs[--mag->busy];
759 palkovsky 387
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 388
	atomic_dec(&cache->cached_objs);
389
 
390
	return obj;
759 palkovsky 391
}
392
 
393
/**
768 palkovsky 394
 * Assure that the current magazine is empty, return pointer to it, or NULL if 
769 palkovsky 395
 * no empty magazine is available and cannot be allocated
759 palkovsky 396
 *
773 palkovsky 397
 * Assume mag_cache[CPU->id].lock is held
398
 *
759 palkovsky 399
 * We have 2 magazines bound to processor. 
400
 * First try the current. 
401
 *  If full, try the last.
402
 *   If full, put to magazines list.
403
 *   allocate new, exchange last & current
404
 *
768 palkovsky 405
 */
406
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
407
{
408
	slab_magazine_t *cmag,*lastmag,*newmag;
409
 
410
	cmag = cache->mag_cache[CPU->id].current;
411
	lastmag = cache->mag_cache[CPU->id].last;
412
 
413
	if (cmag) {
414
		if (cmag->busy < cmag->size)
415
			return cmag;
416
		if (lastmag && lastmag->busy < lastmag->size) {
417
			cache->mag_cache[CPU->id].last = cmag;
418
			cache->mag_cache[CPU->id].current = lastmag;
419
			return lastmag;
420
		}
421
	}
422
	/* current | last are full | nonexistent, allocate new */
423
	/* We do not want to sleep just because of caching */
424
	/* Especially we do not want reclaiming to start, as 
425
	 * this would deadlock */
426
	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
427
	if (!newmag)
428
		return NULL;
429
	newmag->size = SLAB_MAG_SIZE;
430
	newmag->busy = 0;
431
 
432
	/* Flush last to magazine list */
773 palkovsky 433
	if (lastmag) {
434
		spinlock_lock(&cache->lock);
768 palkovsky 435
		list_prepend(&lastmag->link, &cache->magazines);
773 palkovsky 436
		spinlock_unlock(&cache->lock);
437
	}
768 palkovsky 438
	/* Move current as last, save new as current */
439
	cache->mag_cache[CPU->id].last = cmag;	
440
	cache->mag_cache[CPU->id].current = newmag;	
441
 
442
	return newmag;
443
}
444
 
445
/**
446
 * Put object into CPU-cache magazine
447
 *
759 palkovsky 448
 * @return 0 - success, -1 - could not get memory
449
 */
450
static int magazine_obj_put(slab_cache_t *cache, void *obj)
451
{
452
	slab_magazine_t *mag;
453
 
772 palkovsky 454
	if (!CPU)
455
		return -1;
456
 
759 palkovsky 457
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 458
 
459
	mag = make_empty_current_mag(cache);
769 palkovsky 460
	if (!mag) {
461
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
462
		return -1;
463
	}
759 palkovsky 464
 
465
	mag->objs[mag->busy++] = obj;
466
 
467
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 468
	atomic_inc(&cache->cached_objs);
759 palkovsky 469
	return 0;
470
}
471
 
472
 
473
/**************************************/
762 palkovsky 474
/* SLAB CACHE functions */
759 palkovsky 475
 
762 palkovsky 476
/** Return number of objects that fit in certain cache size */
477
static int comp_objects(slab_cache_t *cache)
478
{
479
	if (cache->flags & SLAB_CACHE_SLINSIDE)
480
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
481
	else 
482
		return (PAGE_SIZE << cache->order) / cache->size;
483
}
484
 
485
/** Return wasted space in slab */
486
static int badness(slab_cache_t *cache)
487
{
488
	int objects;
489
	int ssize;
490
 
491
	objects = comp_objects(cache);
492
	ssize = PAGE_SIZE << cache->order;
493
	if (cache->flags & SLAB_CACHE_SLINSIDE)
494
		ssize -= sizeof(slab_t);
495
	return ssize - objects*cache->size;
496
}
497
 
759 palkovsky 498
/** Initialize allocated memory as a slab cache */
499
static void
500
_slab_cache_create(slab_cache_t *cache,
501
		   char *name,
502
		   size_t size,
503
		   size_t align,
504
		   int (*constructor)(void *obj, int kmflag),
505
		   void (*destructor)(void *obj),
506
		   int flags)
507
{
508
	int i;
771 palkovsky 509
	int pages;
759 palkovsky 510
 
511
	memsetb((__address)cache, sizeof(*cache), 0);
512
	cache->name = name;
513
 
766 palkovsky 514
	if (align < sizeof(__native))
515
		align = sizeof(__native);
516
	size = ALIGN_UP(size, align);
517
 
762 palkovsky 518
	cache->size = size;
759 palkovsky 519
 
520
	cache->constructor = constructor;
521
	cache->destructor = destructor;
522
	cache->flags = flags;
523
 
524
	list_initialize(&cache->full_slabs);
525
	list_initialize(&cache->partial_slabs);
526
	list_initialize(&cache->magazines);
527
	spinlock_initialize(&cache->lock, "cachelock");
768 palkovsky 528
	if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
773 palkovsky 529
		for (i=0; i < config.cpu_count; i++) {
772 palkovsky 530
			memsetb((__address)&cache->mag_cache[i],
531
				sizeof(cache->mag_cache[i]), 0);
759 palkovsky 532
			spinlock_initialize(&cache->mag_cache[i].lock, 
533
					    "cpucachelock");
772 palkovsky 534
		}
759 palkovsky 535
	}
536
 
537
	/* Compute slab sizes, object counts in slabs etc. */
538
	if (cache->size < SLAB_INSIDE_SIZE)
539
		cache->flags |= SLAB_CACHE_SLINSIDE;
540
 
762 palkovsky 541
	/* Minimum slab order */
771 palkovsky 542
	pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
543
	cache->order = fnzb(pages);
766 palkovsky 544
 
762 palkovsky 545
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
546
		cache->order += 1;
547
	}
548
	cache->objects = comp_objects(cache);
766 palkovsky 549
	/* If info fits in, put it inside */
550
	if (badness(cache) > sizeof(slab_t))
551
		cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 552
 
759 palkovsky 553
	spinlock_lock(&slab_cache_lock);
554
 
555
	list_append(&cache->link, &slab_cache_list);
556
 
557
	spinlock_unlock(&slab_cache_lock);
558
}
559
 
560
/** Create slab cache  */
561
slab_cache_t * slab_cache_create(char *name,
562
				 size_t size,
563
				 size_t align,
564
				 int (*constructor)(void *obj, int kmflag),
565
				 void (*destructor)(void *obj),
566
				 int flags)
567
{
568
	slab_cache_t *cache;
569
 
769 palkovsky 570
	cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 571
	_slab_cache_create(cache, name, size, align, constructor, destructor,
572
			   flags);
573
	return cache;
574
}
575
 
576
/** 
577
 * Reclaim space occupied by objects that are already free
578
 *
579
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
580
 * @return Number of freed pages
581
 */
582
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
583
{
584
	int i;
585
	slab_magazine_t *mag;
586
	link_t *cur;
587
	count_t frames = 0;
588
 
589
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
590
		return 0; /* Nothing to do */
591
 
592
	/* First lock all cpu caches, then the complete cache lock */
769 palkovsky 593
	if (flags & SLAB_RECLAIM_ALL) {
594
		for (i=0; i < config.cpu_count; i++)
595
			spinlock_lock(&cache->mag_cache[i].lock);
596
	}
759 palkovsky 597
	spinlock_lock(&cache->lock);
598
 
599
	if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 600
		/* Aggressive memfree */
759 palkovsky 601
		/* Destroy CPU magazines */
602
		for (i=0; i<config.cpu_count; i++) {
603
			mag = cache->mag_cache[i].current;
604
			if (mag)
605
				frames += magazine_destroy(cache, mag);
606
			cache->mag_cache[i].current = NULL;
607
 
608
			mag = cache->mag_cache[i].last;
609
			if (mag)
610
				frames += magazine_destroy(cache, mag);
611
			cache->mag_cache[i].last = NULL;
612
		}
613
	}
762 palkovsky 614
	/* Destroy full magazines */
615
	cur=cache->magazines.prev;
767 palkovsky 616
 
768 palkovsky 617
	while (cur != &cache->magazines) {
762 palkovsky 618
		mag = list_get_instance(cur, slab_magazine_t, link);
619
 
620
		cur = cur->prev;
768 palkovsky 621
		list_remove(&mag->link);
762 palkovsky 622
		frames += magazine_destroy(cache,mag);
623
		/* If we do not do full reclaim, break
624
		 * as soon as something is freed */
625
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
626
			break;
627
	}
759 palkovsky 628
 
629
	spinlock_unlock(&cache->lock);
775 palkovsky 630
	/* We can release the cache locks now */
769 palkovsky 631
	if (flags & SLAB_RECLAIM_ALL) {
632
		for (i=0; i < config.cpu_count; i++)
633
			spinlock_unlock(&cache->mag_cache[i].lock);
634
	}
759 palkovsky 635
 
636
	return frames;
637
}
638
 
639
/** Check that there are no slabs and remove cache from system  */
640
void slab_cache_destroy(slab_cache_t *cache)
641
{
642
	/* Do not lock anything, we assume the software is correct and
643
	 * does not touch the cache when it decides to destroy it */
644
 
645
	/* Destroy all magazines */
646
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
647
 
648
	/* All slabs must be empty */
649
	if (!list_empty(&cache->full_slabs) \
650
	    || !list_empty(&cache->partial_slabs))
651
		panic("Destroying cache that is not empty.");
652
 
653
	spinlock_lock(&slab_cache_lock);
654
	list_remove(&cache->link);
655
	spinlock_unlock(&slab_cache_lock);
656
 
769 palkovsky 657
	slab_free(&slab_cache_cache, cache);
759 palkovsky 658
}
659
 
660
/** Allocate new object from cache - if no flags given, always returns 
661
    memory */
662
void * slab_alloc(slab_cache_t *cache, int flags)
663
{
664
	ipl_t ipl;
665
	void *result = NULL;
773 palkovsky 666
 
759 palkovsky 667
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
668
	ipl = interrupts_disable();
771 palkovsky 669
 
772 palkovsky 670
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
759 palkovsky 671
		result = magazine_obj_get(cache);
672
 
762 palkovsky 673
	if (!result) {
674
		spinlock_lock(&cache->lock);
759 palkovsky 675
		result = slab_obj_create(cache, flags);
762 palkovsky 676
		spinlock_unlock(&cache->lock);
677
	}
759 palkovsky 678
 
769 palkovsky 679
	interrupts_restore(ipl);
680
 
764 palkovsky 681
	if (result)
682
		atomic_inc(&cache->allocated_objs);
683
 
759 palkovsky 684
	return result;
685
}
686
 
771 palkovsky 687
/** Return object to cache, use slab if known  */
688
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 689
{
690
	ipl_t ipl;
691
 
692
	ipl = interrupts_disable();
693
 
762 palkovsky 694
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
695
	    || magazine_obj_put(cache, obj)) {
696
		spinlock_lock(&cache->lock);
771 palkovsky 697
		slab_obj_destroy(cache, obj, slab);
762 palkovsky 698
		spinlock_unlock(&cache->lock);
759 palkovsky 699
	}
769 palkovsky 700
	interrupts_restore(ipl);
764 palkovsky 701
	atomic_dec(&cache->allocated_objs);
759 palkovsky 702
}
703
 
771 palkovsky 704
/** Return slab object to cache */
705
void slab_free(slab_cache_t *cache, void *obj)
706
{
707
	_slab_free(cache,obj,NULL);
708
}
709
 
759 palkovsky 710
/* Go through all caches and reclaim what is possible */
711
count_t slab_reclaim(int flags)
712
{
713
	slab_cache_t *cache;
714
	link_t *cur;
715
	count_t frames = 0;
716
 
717
	spinlock_lock(&slab_cache_lock);
718
 
719
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
720
		cache = list_get_instance(cur, slab_cache_t, link);
721
		frames += _slab_reclaim(cache, flags);
722
	}
723
 
724
	spinlock_unlock(&slab_cache_lock);
725
 
726
	return frames;
727
}
728
 
729
 
730
/* Print list of slabs */
731
void slab_print_list(void)
732
{
733
	slab_cache_t *cache;
734
	link_t *cur;
735
 
736
	spinlock_lock(&slab_cache_lock);
767 palkovsky 737
	printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 738
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
739
		cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 740
		printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, 
766 palkovsky 741
		       (1 << cache->order), cache->objects,
767 palkovsky 742
		       atomic_get(&cache->allocated_slabs),
743
		       atomic_get(&cache->cached_objs),
766 palkovsky 744
		       atomic_get(&cache->allocated_objs),
745
		       cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 746
	}
747
	spinlock_unlock(&slab_cache_lock);
748
}
749
 
750
void slab_cache_init(void)
751
{
771 palkovsky 752
	int i, size;
753
 
759 palkovsky 754
	/* Initialize magazine cache */
755
	_slab_cache_create(&mag_cache,
756
			   "slab_magazine",
757
			   sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
758
			   sizeof(__address),
759
			   NULL, NULL,
769 palkovsky 760
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
761
	/* Initialize slab_cache cache */
762
	_slab_cache_create(&slab_cache_cache,
763
			   "slab_cache",
764
			   sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
765
			   sizeof(__address),
766
			   NULL, NULL,
767
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
768
	/* Initialize external slab cache */
769
	slab_extern_cache = slab_cache_create("slab_extern",
770
					      sizeof(slab_t),
771
					      0, NULL, NULL,
772
					      SLAB_CACHE_SLINSIDE);
759 palkovsky 773
 
774
	/* Initialize structures for malloc */
771 palkovsky 775
	for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
776
	     i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
777
	     i++, size <<= 1) {
778
		malloc_caches[i] = slab_cache_create(malloc_names[i],
779
						     size, 0,
780
						     NULL,NULL,0);
781
	}
759 palkovsky 782
}
771 palkovsky 783
 
784
/**************************************/
785
/* kalloc/kfree functions             */
786
void * kalloc(unsigned int size, int flags)
787
{
788
	int idx;
775 palkovsky 789
 
771 palkovsky 790
	ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
791
 
792
	if (size < (1 << SLAB_MIN_MALLOC_W))
793
		size = (1 << SLAB_MIN_MALLOC_W);
794
 
795
	idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
796
 
797
	return slab_alloc(malloc_caches[idx], flags);
798
}
799
 
800
 
801
void kfree(void *obj)
802
{
803
	slab_t *slab = obj2slab(obj);
804
 
805
	_slab_free(slab->cache, obj, slab);
806
}