Subversion Repositories HelenOS-historic

Rev

Rev 786 | Rev 788 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
785 jermar 30
 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
769 palkovsky 31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately 
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling. 
46
 *
47
 * When a new object is being allocated, it is first checked, if it is 
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated. 
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails, 
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible. 
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines). 
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab 
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
775 palkovsky 79
 * TODO: For better CPU-scaling the magazine allocation strategy should
80
 * be extended. Currently, if the cache does not have magazine, it asks
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84
 * buffer. The other possibility is to use the per-cache 
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
86
 * magazine cache.
87
 *
776 palkovsky 88
 * - it might be good to add granularity of locks even to slab level,
89
 *   we could then try_spinlock over all partial slabs and thus improve
90
 *   scalability even on slab level
769 palkovsky 91
 */
92
 
93
 
759 palkovsky 94
#include <synch/spinlock.h>
95
#include <mm/slab.h>
96
#include <list.h>
97
#include <memstr.h>
98
#include <align.h>
99
#include <mm/heap.h>
762 palkovsky 100
#include <mm/frame.h>
759 palkovsky 101
#include <config.h>
102
#include <print.h>
103
#include <arch.h>
104
#include <panic.h>
762 palkovsky 105
#include <debug.h>
771 palkovsky 106
#include <bitops.h>
759 palkovsky 107
 
108
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 109
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 110
 
769 palkovsky 111
/** Magazine cache */
112
static slab_cache_t mag_cache;
113
/** Cache for cache descriptors */
114
static slab_cache_t slab_cache_cache;
759 palkovsky 115
 
769 palkovsky 116
/** Cache for external slab descriptors
117
 * This time we want per-cpu cache, so do not make it static
118
 * - using SLAB for internal SLAB structures will not deadlock,
119
 *   as all slab structures are 'small' - control structures of
120
 *   their caches do not require further allocation
121
 */
122
static slab_cache_t *slab_extern_cache;
771 palkovsky 123
/** Caches for malloc */
124
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
125
char *malloc_names[] =  {
126
	"malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
127
	"malloc-256","malloc-512","malloc-1K","malloc-2K",
128
	"malloc-4K","malloc-8K","malloc-16K","malloc-32K",
129
	"malloc-64K","malloc-128K"
130
};
762 palkovsky 131
 
769 palkovsky 132
/** Slab descriptor */
762 palkovsky 133
typedef struct {
134
	slab_cache_t *cache; /**< Pointer to parent cache */
135
	link_t link;       /* List of full/partial slabs */
136
	void *start;       /**< Start address of first available item */
137
	count_t available; /**< Count of available items in this slab */
138
	index_t nextavail; /**< The index of next available item */
139
}slab_t;
140
 
759 palkovsky 141
/**************************************/
762 palkovsky 142
/* SLAB allocation functions          */
759 palkovsky 143
 
762 palkovsky 144
/**
145
 * Allocate frames for slab space and initialize
146
 *
147
 */
148
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
149
{
150
	void *data;
151
	slab_t *slab;
152
	size_t fsize;
153
	int i;
154
	zone_t *zone = NULL;
155
	int status;
764 palkovsky 156
	frame_t *frame;
759 palkovsky 157
 
786 bondari 158
	data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
764 palkovsky 159
	if (status != FRAME_OK) {
762 palkovsky 160
		return NULL;
764 palkovsky 161
	}
768 palkovsky 162
	if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 163
		slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 164
		if (!slab) {
165
			frame_free((__address)data);
166
			return NULL;
167
		}
168
	} else {
169
		fsize = (PAGE_SIZE << cache->order);
170
		slab = data + fsize - sizeof(*slab);
171
	}
764 palkovsky 172
 
762 palkovsky 173
	/* Fill in slab structures */
763 jermar 174
	/* TODO: some better way of accessing the frame */
766 palkovsky 175
	for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 176
		frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
177
		frame->parent = slab;
762 palkovsky 178
	}
179
 
180
	slab->start = data;
181
	slab->available = cache->objects;
182
	slab->nextavail = 0;
767 palkovsky 183
	slab->cache = cache;
762 palkovsky 184
 
185
	for (i=0; i<cache->objects;i++)
186
		*((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 187
 
188
	atomic_inc(&cache->allocated_slabs);
762 palkovsky 189
	return slab;
190
}
191
 
759 palkovsky 192
/**
766 palkovsky 193
 * Deallocate space associated with SLAB
762 palkovsky 194
 *
195
 * @return number of freed frames
196
 */
197
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
198
{
199
	frame_free((__address)slab->start);
768 palkovsky 200
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 201
		slab_free(slab_extern_cache, slab);
764 palkovsky 202
 
203
	atomic_dec(&cache->allocated_slabs);
204
 
762 palkovsky 205
	return 1 << cache->order;
206
}
207
 
208
/** Map object to slab structure */
209
static slab_t * obj2slab(void *obj)
210
{
211
	frame_t *frame; 
212
 
213
	frame = frame_addr2frame((__address)obj);
214
	return (slab_t *)frame->parent;
215
}
216
 
217
/**************************************/
218
/* SLAB functions */
219
 
220
 
221
/**
759 palkovsky 222
 * Return object to slab and call a destructor
223
 *
762 palkovsky 224
 * @param slab If the caller knows directly slab of the object, otherwise NULL
225
 *
759 palkovsky 226
 * @return Number of freed pages
227
 */
762 palkovsky 228
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
229
				slab_t *slab)
759 palkovsky 230
{
787 palkovsky 231
	int freed = 0;
232
 
762 palkovsky 233
	if (!slab)
234
		slab = obj2slab(obj);
235
 
767 palkovsky 236
	ASSERT(slab->cache == cache);
780 palkovsky 237
	ASSERT(slab->available < cache->objects);
767 palkovsky 238
 
787 palkovsky 239
	if (cache->destructor)
240
		freed = cache->destructor(obj);
241
 
776 palkovsky 242
	spinlock_lock(&cache->slablock);
243
 
762 palkovsky 244
	*((int *)obj) = slab->nextavail;
245
	slab->nextavail = (obj - slab->start)/cache->size;
246
	slab->available++;
247
 
248
	/* Move it to correct list */
249
	if (slab->available == cache->objects) {
250
		/* Free associated memory */
251
		list_remove(&slab->link);
782 palkovsky 252
		spinlock_unlock(&cache->slablock);
253
 
787 palkovsky 254
		return freed + slab_space_free(cache, slab);
782 palkovsky 255
 
780 palkovsky 256
	} else if (slab->available == 1) {
257
		/* It was in full, move to partial */
258
		list_remove(&slab->link);
259
		list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 260
	}
783 palkovsky 261
	spinlock_unlock(&cache->slablock);
787 palkovsky 262
	return freed;
759 palkovsky 263
}
264
 
265
/**
266
 * Take new object from slab or create new if needed
267
 *
268
 * @return Object address or null
269
 */
270
static void * slab_obj_create(slab_cache_t *cache, int flags)
271
{
762 palkovsky 272
	slab_t *slab;
273
	void *obj;
274
 
776 palkovsky 275
	spinlock_lock(&cache->slablock);
276
 
762 palkovsky 277
	if (list_empty(&cache->partial_slabs)) {
278
		/* Allow recursion and reclaiming
279
		 * - this should work, as the SLAB control structures
280
		 *   are small and do not need to allocte with anything
281
		 *   other ten frame_alloc when they are allocating,
282
		 *   that's why we should get recursion at most 1-level deep
283
		 */
776 palkovsky 284
		spinlock_unlock(&cache->slablock);
762 palkovsky 285
		slab = slab_space_alloc(cache, flags);
780 palkovsky 286
		if (!slab)
287
			return NULL;
776 palkovsky 288
		spinlock_lock(&cache->slablock);
762 palkovsky 289
	} else {
290
		slab = list_get_instance(cache->partial_slabs.next,
291
					 slab_t,
292
					 link);
293
		list_remove(&slab->link);
294
	}
295
	obj = slab->start + slab->nextavail * cache->size;
296
	slab->nextavail = *((int *)obj);
297
	slab->available--;
787 palkovsky 298
 
762 palkovsky 299
	if (! slab->available)
764 palkovsky 300
		list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 301
	else
764 palkovsky 302
		list_prepend(&slab->link, &cache->partial_slabs);
776 palkovsky 303
 
304
	spinlock_unlock(&cache->slablock);
787 palkovsky 305
 
306
	if (cache->constructor && cache->constructor(obj, flags)) {
307
		/* Bad, bad, construction failed */
308
		slab_obj_destroy(cache, obj, slab);
309
		return NULL;
310
	}
762 palkovsky 311
	return obj;
759 palkovsky 312
}
313
 
314
/**************************************/
315
/* CPU-Cache slab functions */
316
 
317
/**
781 palkovsky 318
 * Finds a full magazine in cache, takes it from list
319
 * and returns it 
320
 *
321
 * @param first If true, return first, else last mag
322
 */
323
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
324
					    int first)
325
{
326
	slab_magazine_t *mag = NULL;
327
	link_t *cur;
328
 
329
	spinlock_lock(&cache->maglock);
330
	if (!list_empty(&cache->magazines)) {
331
		if (first)
332
			cur = cache->magazines.next;
333
		else
334
			cur = cache->magazines.prev;
335
		mag = list_get_instance(cur, slab_magazine_t, link);
336
		list_remove(&mag->link);
337
		atomic_dec(&cache->magazine_counter);
338
	}
339
	spinlock_unlock(&cache->maglock);
340
	return mag;
341
}
342
 
343
/** Prepend magazine to magazine list in cache */
344
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
345
{
346
	spinlock_lock(&cache->maglock);
347
 
348
	list_prepend(&mag->link, &cache->magazines);
349
	atomic_inc(&cache->magazine_counter);
350
 
351
	spinlock_unlock(&cache->maglock);
352
}
353
 
354
/**
759 palkovsky 355
 * Free all objects in magazine and free memory associated with magazine
356
 *
357
 * @return Number of freed pages
358
 */
359
static count_t magazine_destroy(slab_cache_t *cache, 
360
				slab_magazine_t *mag)
361
{
362
	int i;
363
	count_t frames = 0;
364
 
767 palkovsky 365
	for (i=0;i < mag->busy; i++) {
762 palkovsky 366
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 367
		atomic_dec(&cache->cached_objs);
368
	}
759 palkovsky 369
 
370
	slab_free(&mag_cache, mag);
371
 
372
	return frames;
373
}
374
 
375
/**
769 palkovsky 376
 * Find full magazine, set it as current and return it
377
 *
378
 * Assume cpu_magazine lock is held
379
 */
380
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
381
{
382
	slab_magazine_t *cmag, *lastmag, *newmag;
383
 
384
	cmag = cache->mag_cache[CPU->id].current;
385
	lastmag = cache->mag_cache[CPU->id].last;
386
	if (cmag) { /* First try local CPU magazines */
387
		if (cmag->busy)
388
			return cmag;
389
 
390
		if (lastmag && lastmag->busy) {
391
			cache->mag_cache[CPU->id].current = lastmag;
392
			cache->mag_cache[CPU->id].last = cmag;
393
			return lastmag;
394
		}
395
	}
396
	/* Local magazines are empty, import one from magazine list */
781 palkovsky 397
	newmag = get_mag_from_cache(cache, 1);
398
	if (!newmag)
769 palkovsky 399
		return NULL;
400
 
401
	if (lastmag)
781 palkovsky 402
		magazine_destroy(cache, lastmag);
403
 
769 palkovsky 404
	cache->mag_cache[CPU->id].last = cmag;
405
	cache->mag_cache[CPU->id].current = newmag;
406
	return newmag;
407
}
408
 
409
/**
759 palkovsky 410
 * Try to find object in CPU-cache magazines
411
 *
412
 * @return Pointer to object or NULL if not available
413
 */
414
static void * magazine_obj_get(slab_cache_t *cache)
415
{
416
	slab_magazine_t *mag;
767 palkovsky 417
	void *obj;
759 palkovsky 418
 
772 palkovsky 419
	if (!CPU)
420
		return NULL;
421
 
759 palkovsky 422
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
423
 
769 palkovsky 424
	mag = get_full_current_mag(cache);
425
	if (!mag) {
426
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
427
		return NULL;
759 palkovsky 428
	}
767 palkovsky 429
	obj = mag->objs[--mag->busy];
759 palkovsky 430
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 431
	atomic_dec(&cache->cached_objs);
432
 
433
	return obj;
759 palkovsky 434
}
435
 
436
/**
768 palkovsky 437
 * Assure that the current magazine is empty, return pointer to it, or NULL if 
769 palkovsky 438
 * no empty magazine is available and cannot be allocated
759 palkovsky 439
 *
773 palkovsky 440
 * Assume mag_cache[CPU->id].lock is held
441
 *
759 palkovsky 442
 * We have 2 magazines bound to processor. 
443
 * First try the current. 
444
 *  If full, try the last.
445
 *   If full, put to magazines list.
446
 *   allocate new, exchange last & current
447
 *
768 palkovsky 448
 */
449
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
450
{
451
	slab_magazine_t *cmag,*lastmag,*newmag;
452
 
453
	cmag = cache->mag_cache[CPU->id].current;
454
	lastmag = cache->mag_cache[CPU->id].last;
455
 
456
	if (cmag) {
457
		if (cmag->busy < cmag->size)
458
			return cmag;
459
		if (lastmag && lastmag->busy < lastmag->size) {
460
			cache->mag_cache[CPU->id].last = cmag;
461
			cache->mag_cache[CPU->id].current = lastmag;
462
			return lastmag;
463
		}
464
	}
465
	/* current | last are full | nonexistent, allocate new */
466
	/* We do not want to sleep just because of caching */
467
	/* Especially we do not want reclaiming to start, as 
468
	 * this would deadlock */
469
	newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
470
	if (!newmag)
471
		return NULL;
472
	newmag->size = SLAB_MAG_SIZE;
473
	newmag->busy = 0;
474
 
475
	/* Flush last to magazine list */
781 palkovsky 476
	if (lastmag)
477
		put_mag_to_cache(cache, lastmag);
478
 
768 palkovsky 479
	/* Move current as last, save new as current */
480
	cache->mag_cache[CPU->id].last = cmag;	
481
	cache->mag_cache[CPU->id].current = newmag;	
482
 
483
	return newmag;
484
}
485
 
486
/**
487
 * Put object into CPU-cache magazine
488
 *
759 palkovsky 489
 * @return 0 - success, -1 - could not get memory
490
 */
491
static int magazine_obj_put(slab_cache_t *cache, void *obj)
492
{
493
	slab_magazine_t *mag;
494
 
772 palkovsky 495
	if (!CPU)
496
		return -1;
497
 
759 palkovsky 498
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 499
 
500
	mag = make_empty_current_mag(cache);
769 palkovsky 501
	if (!mag) {
502
		spinlock_unlock(&cache->mag_cache[CPU->id].lock);
503
		return -1;
504
	}
759 palkovsky 505
 
506
	mag->objs[mag->busy++] = obj;
507
 
508
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 509
	atomic_inc(&cache->cached_objs);
759 palkovsky 510
	return 0;
511
}
512
 
513
 
514
/**************************************/
762 palkovsky 515
/* SLAB CACHE functions */
759 palkovsky 516
 
762 palkovsky 517
/** Return number of objects that fit in certain cache size */
518
static int comp_objects(slab_cache_t *cache)
519
{
520
	if (cache->flags & SLAB_CACHE_SLINSIDE)
521
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
522
	else 
523
		return (PAGE_SIZE << cache->order) / cache->size;
524
}
525
 
526
/** Return wasted space in slab */
527
static int badness(slab_cache_t *cache)
528
{
529
	int objects;
530
	int ssize;
531
 
532
	objects = comp_objects(cache);
533
	ssize = PAGE_SIZE << cache->order;
534
	if (cache->flags & SLAB_CACHE_SLINSIDE)
535
		ssize -= sizeof(slab_t);
536
	return ssize - objects*cache->size;
537
}
538
 
759 palkovsky 539
/** Initialize allocated memory as a slab cache */
540
static void
541
_slab_cache_create(slab_cache_t *cache,
542
		   char *name,
543
		   size_t size,
544
		   size_t align,
545
		   int (*constructor)(void *obj, int kmflag),
787 palkovsky 546
		   int (*destructor)(void *obj),
759 palkovsky 547
		   int flags)
548
{
549
	int i;
771 palkovsky 550
	int pages;
783 palkovsky 551
	ipl_t ipl;
759 palkovsky 552
 
553
	memsetb((__address)cache, sizeof(*cache), 0);
554
	cache->name = name;
555
 
766 palkovsky 556
	if (align < sizeof(__native))
557
		align = sizeof(__native);
558
	size = ALIGN_UP(size, align);
559
 
762 palkovsky 560
	cache->size = size;
759 palkovsky 561
 
562
	cache->constructor = constructor;
563
	cache->destructor = destructor;
564
	cache->flags = flags;
565
 
566
	list_initialize(&cache->full_slabs);
567
	list_initialize(&cache->partial_slabs);
568
	list_initialize(&cache->magazines);
776 palkovsky 569
	spinlock_initialize(&cache->slablock, "slab_lock");
570
	spinlock_initialize(&cache->maglock, "slab_maglock");
768 palkovsky 571
	if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
773 palkovsky 572
		for (i=0; i < config.cpu_count; i++) {
772 palkovsky 573
			memsetb((__address)&cache->mag_cache[i],
574
				sizeof(cache->mag_cache[i]), 0);
759 palkovsky 575
			spinlock_initialize(&cache->mag_cache[i].lock, 
776 palkovsky 576
					    "slab_maglock_cpu");
772 palkovsky 577
		}
759 palkovsky 578
	}
579
 
580
	/* Compute slab sizes, object counts in slabs etc. */
581
	if (cache->size < SLAB_INSIDE_SIZE)
582
		cache->flags |= SLAB_CACHE_SLINSIDE;
583
 
762 palkovsky 584
	/* Minimum slab order */
771 palkovsky 585
	pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
586
	cache->order = fnzb(pages);
766 palkovsky 587
 
762 palkovsky 588
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
589
		cache->order += 1;
590
	}
591
	cache->objects = comp_objects(cache);
766 palkovsky 592
	/* If info fits in, put it inside */
593
	if (badness(cache) > sizeof(slab_t))
594
		cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 595
 
783 palkovsky 596
	/* Add cache to cache list */
597
	ipl = interrupts_disable();
759 palkovsky 598
	spinlock_lock(&slab_cache_lock);
599
 
600
	list_append(&cache->link, &slab_cache_list);
601
 
602
	spinlock_unlock(&slab_cache_lock);
783 palkovsky 603
	interrupts_restore(ipl);
759 palkovsky 604
}
605
 
606
/** Create slab cache  */
607
slab_cache_t * slab_cache_create(char *name,
608
				 size_t size,
609
				 size_t align,
610
				 int (*constructor)(void *obj, int kmflag),
787 palkovsky 611
				 int (*destructor)(void *obj),
759 palkovsky 612
				 int flags)
613
{
614
	slab_cache_t *cache;
615
 
769 palkovsky 616
	cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 617
	_slab_cache_create(cache, name, size, align, constructor, destructor,
618
			   flags);
619
	return cache;
620
}
621
 
622
/** 
623
 * Reclaim space occupied by objects that are already free
624
 *
625
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
626
 * @return Number of freed pages
627
 */
628
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
629
{
630
	int i;
631
	slab_magazine_t *mag;
632
	count_t frames = 0;
781 palkovsky 633
	int magcount;
759 palkovsky 634
 
635
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
636
		return 0; /* Nothing to do */
781 palkovsky 637
 
638
	/* We count up to original magazine count to avoid
639
	 * endless loop 
640
	 */
641
	magcount = atomic_get(&cache->magazine_counter);
642
	while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
643
		frames += magazine_destroy(cache,mag);
644
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
645
			break;
769 palkovsky 646
	}
759 palkovsky 647
 
648
	if (flags & SLAB_RECLAIM_ALL) {
781 palkovsky 649
		/* Free cpu-bound magazines */
759 palkovsky 650
		/* Destroy CPU magazines */
651
		for (i=0; i<config.cpu_count; i++) {
781 palkovsky 652
			spinlock_lock(&cache->mag_cache[i].lock);
653
 
759 palkovsky 654
			mag = cache->mag_cache[i].current;
655
			if (mag)
656
				frames += magazine_destroy(cache, mag);
657
			cache->mag_cache[i].current = NULL;
658
 
659
			mag = cache->mag_cache[i].last;
660
			if (mag)
661
				frames += magazine_destroy(cache, mag);
662
			cache->mag_cache[i].last = NULL;
781 palkovsky 663
 
664
			spinlock_unlock(&cache->mag_cache[i].lock);
759 palkovsky 665
		}
666
	}
767 palkovsky 667
 
759 palkovsky 668
	return frames;
669
}
670
 
671
/** Check that there are no slabs and remove cache from system  */
672
void slab_cache_destroy(slab_cache_t *cache)
673
{
781 palkovsky 674
	ipl_t ipl;
675
 
676
	/* First remove cache from link, so that we don't need
677
	 * to disable interrupts later
678
	 */
679
 
680
	ipl = interrupts_disable();
681
	spinlock_lock(&slab_cache_lock);
682
 
683
	list_remove(&cache->link);
684
 
685
	spinlock_unlock(&slab_cache_lock);
686
	interrupts_restore(ipl);
687
 
759 palkovsky 688
	/* Do not lock anything, we assume the software is correct and
689
	 * does not touch the cache when it decides to destroy it */
690
 
691
	/* Destroy all magazines */
692
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
693
 
694
	/* All slabs must be empty */
695
	if (!list_empty(&cache->full_slabs) \
696
	    || !list_empty(&cache->partial_slabs))
697
		panic("Destroying cache that is not empty.");
698
 
769 palkovsky 699
	slab_free(&slab_cache_cache, cache);
759 palkovsky 700
}
701
 
702
/** Allocate new object from cache - if no flags given, always returns 
703
    memory */
704
void * slab_alloc(slab_cache_t *cache, int flags)
705
{
706
	ipl_t ipl;
707
	void *result = NULL;
773 palkovsky 708
 
759 palkovsky 709
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
710
	ipl = interrupts_disable();
771 palkovsky 711
 
772 palkovsky 712
	if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
759 palkovsky 713
		result = magazine_obj_get(cache);
776 palkovsky 714
	if (!result)
759 palkovsky 715
		result = slab_obj_create(cache, flags);
716
 
769 palkovsky 717
	interrupts_restore(ipl);
718
 
764 palkovsky 719
	if (result)
720
		atomic_inc(&cache->allocated_objs);
721
 
759 palkovsky 722
	return result;
723
}
724
 
771 palkovsky 725
/** Return object to cache, use slab if known  */
726
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 727
{
728
	ipl_t ipl;
729
 
730
	ipl = interrupts_disable();
731
 
762 palkovsky 732
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
733
	    || magazine_obj_put(cache, obj)) {
776 palkovsky 734
 
771 palkovsky 735
		slab_obj_destroy(cache, obj, slab);
776 palkovsky 736
 
759 palkovsky 737
	}
769 palkovsky 738
	interrupts_restore(ipl);
764 palkovsky 739
	atomic_dec(&cache->allocated_objs);
759 palkovsky 740
}
741
 
771 palkovsky 742
/** Return slab object to cache */
743
void slab_free(slab_cache_t *cache, void *obj)
744
{
745
	_slab_free(cache,obj,NULL);
746
}
747
 
759 palkovsky 748
/* Go through all caches and reclaim what is possible */
749
count_t slab_reclaim(int flags)
750
{
751
	slab_cache_t *cache;
752
	link_t *cur;
753
	count_t frames = 0;
754
 
755
	spinlock_lock(&slab_cache_lock);
756
 
776 palkovsky 757
	/* TODO: Add assert, that interrupts are disabled, otherwise
758
	 * memory allocation from interrupts can deadlock.
759
	 */
760
 
759 palkovsky 761
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
762
		cache = list_get_instance(cur, slab_cache_t, link);
763
		frames += _slab_reclaim(cache, flags);
764
	}
765
 
766
	spinlock_unlock(&slab_cache_lock);
767
 
768
	return frames;
769
}
770
 
771
 
772
/* Print list of slabs */
773
void slab_print_list(void)
774
{
775
	slab_cache_t *cache;
776
	link_t *cur;
783 palkovsky 777
	ipl_t ipl;
778
 
779
	ipl = interrupts_disable();
759 palkovsky 780
	spinlock_lock(&slab_cache_lock);
767 palkovsky 781
	printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 782
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
783
		cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 784
		printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, 
766 palkovsky 785
		       (1 << cache->order), cache->objects,
767 palkovsky 786
		       atomic_get(&cache->allocated_slabs),
787
		       atomic_get(&cache->cached_objs),
766 palkovsky 788
		       atomic_get(&cache->allocated_objs),
789
		       cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 790
	}
791
	spinlock_unlock(&slab_cache_lock);
783 palkovsky 792
	interrupts_restore(ipl);
759 palkovsky 793
}
794
 
778 palkovsky 795
#ifdef CONFIG_DEBUG
796
static int _slab_initialized = 0;
797
#endif
798
 
759 palkovsky 799
void slab_cache_init(void)
800
{
771 palkovsky 801
	int i, size;
802
 
759 palkovsky 803
	/* Initialize magazine cache */
804
	_slab_cache_create(&mag_cache,
805
			   "slab_magazine",
806
			   sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
807
			   sizeof(__address),
808
			   NULL, NULL,
769 palkovsky 809
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
810
	/* Initialize slab_cache cache */
811
	_slab_cache_create(&slab_cache_cache,
812
			   "slab_cache",
813
			   sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
814
			   sizeof(__address),
815
			   NULL, NULL,
816
			   SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
817
	/* Initialize external slab cache */
818
	slab_extern_cache = slab_cache_create("slab_extern",
819
					      sizeof(slab_t),
820
					      0, NULL, NULL,
821
					      SLAB_CACHE_SLINSIDE);
759 palkovsky 822
 
823
	/* Initialize structures for malloc */
771 palkovsky 824
	for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
825
	     i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
826
	     i++, size <<= 1) {
827
		malloc_caches[i] = slab_cache_create(malloc_names[i],
828
						     size, 0,
829
						     NULL,NULL,0);
830
	}
778 palkovsky 831
#ifdef CONFIG_DEBUG       
832
	_slab_initialized = 1;
833
#endif
759 palkovsky 834
}
771 palkovsky 835
 
836
/**************************************/
837
/* kalloc/kfree functions             */
838
void * kalloc(unsigned int size, int flags)
839
{
840
	int idx;
778 palkovsky 841
 
842
	ASSERT(_slab_initialized);
771 palkovsky 843
	ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
844
 
845
	if (size < (1 << SLAB_MIN_MALLOC_W))
846
		size = (1 << SLAB_MIN_MALLOC_W);
847
 
848
	idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
849
 
850
	return slab_alloc(malloc_caches[idx], flags);
851
}
852
 
853
 
854
void kfree(void *obj)
855
{
781 palkovsky 856
	slab_t *slab;
857
 
858
	if (!obj) return;
859
 
860
	slab = obj2slab(obj);
771 palkovsky 861
	_slab_free(slab->cache, obj, slab);
862
}