Subversion Repositories HelenOS-historic

Rev

Rev 762 | Rev 764 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <synch/spinlock.h>
30
#include <mm/slab.h>
31
#include <list.h>
32
#include <memstr.h>
33
#include <align.h>
34
#include <mm/heap.h>
762 palkovsky 35
#include <mm/frame.h>
759 palkovsky 36
#include <config.h>
37
#include <print.h>
38
#include <arch.h>
39
#include <panic.h>
762 palkovsky 40
#include <debug.h>
759 palkovsky 41
 
42
SPINLOCK_INITIALIZE(slab_cache_lock);
43
LIST_INITIALIZE(slab_cache_list);
44
 
45
slab_cache_t mag_cache;
46
 
762 palkovsky 47
 
48
typedef struct {
49
	slab_cache_t *cache; /**< Pointer to parent cache */
50
	link_t link;       /* List of full/partial slabs */
51
	void *start;       /**< Start address of first available item */
52
	count_t available; /**< Count of available items in this slab */
53
	index_t nextavail; /**< The index of next available item */
54
}slab_t;
55
 
759 palkovsky 56
/**************************************/
762 palkovsky 57
/* SLAB allocation functions          */
759 palkovsky 58
 
762 palkovsky 59
/**
60
 * Allocate frames for slab space and initialize
61
 *
62
 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
63
 */
64
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
65
{
66
	void *data;
67
	slab_t *slab;
68
	size_t fsize;
69
	int i;
70
	zone_t *zone = NULL;
71
	int status;
759 palkovsky 72
 
762 palkovsky 73
	data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
74
	if (status != FRAME_OK)
75
		return NULL;
76
 
77
	if (! cache->flags & SLAB_CACHE_SLINSIDE) {
78
		slab = malloc(sizeof(*slab)); // , flags);
79
		if (!slab) {
80
			frame_free((__address)data);
81
			return NULL;
82
		}
83
	} else {
84
		fsize = (PAGE_SIZE << cache->order);
85
		slab = data + fsize - sizeof(*slab);
86
	}
87
 
88
	/* Fill in slab structures */
763 jermar 89
	/* TODO: some better way of accessing the frame */
762 palkovsky 90
	for (i=0; i< (1<<cache->order); i++) {
91
		ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab;
92
	}
93
 
94
	slab->start = data;
95
	slab->available = cache->objects;
96
	slab->nextavail = 0;
97
 
98
	for (i=0; i<cache->objects;i++)
99
		*((int *) (slab->start + i*cache->size)) = i+1;
100
	return slab;
101
}
102
 
759 palkovsky 103
/**
762 palkovsky 104
 * Free space associated with SLAB
105
 *
106
 * @return number of freed frames
107
 */
108
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
109
{
110
	frame_free((__address)slab->start);
111
	if (! cache->flags & SLAB_CACHE_SLINSIDE)
112
		free(slab);
113
	return 1 << cache->order;
114
}
115
 
116
/** Map object to slab structure */
117
static slab_t * obj2slab(void *obj)
118
{
119
	frame_t *frame; 
120
 
121
	frame = frame_addr2frame((__address)obj);
122
	return (slab_t *)frame->parent;
123
}
124
 
125
/**************************************/
126
/* SLAB functions */
127
 
128
 
129
/**
759 palkovsky 130
 * Return object to slab and call a destructor
131
 *
762 palkovsky 132
 * Assume the cache->lock is held;
133
 *
134
 * @param slab If the caller knows directly slab of the object, otherwise NULL
135
 *
759 palkovsky 136
 * @return Number of freed pages
137
 */
762 palkovsky 138
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
139
				slab_t *slab)
759 palkovsky 140
{
762 palkovsky 141
	count_t frames = 0;
142
 
143
	if (!slab)
144
		slab = obj2slab(obj);
145
 
763 jermar 146
	spinlock_lock(&cache->lock);
762 palkovsky 147
 
148
	*((int *)obj) = slab->nextavail;
149
	slab->nextavail = (obj - slab->start)/cache->size;
150
	slab->available++;
151
 
152
	/* Move it to correct list */
153
	if (slab->available == 1) {
154
		/* It was in full, move to partial */
155
		list_remove(&slab->link);
156
		list_prepend(&cache->partial_slabs, &slab->link);
157
	}
158
	if (slab->available == cache->objects) {
159
		/* Free associated memory */
160
		list_remove(&slab->link);
161
		/* Avoid deadlock */
162
		spinlock_unlock(&cache->lock);
163
		frames = slab_space_free(cache, slab);
164
		spinlock_lock(&cache->lock);
165
	}
166
 
763 jermar 167
	spinlock_unlock(&cache->lock);
762 palkovsky 168
 
169
	return frames;
759 palkovsky 170
}
171
 
172
/**
173
 * Take new object from slab or create new if needed
174
 *
762 palkovsky 175
 * Assume cache->lock is held. 
176
 *
759 palkovsky 177
 * @return Object address or null
178
 */
179
static void * slab_obj_create(slab_cache_t *cache, int flags)
180
{
762 palkovsky 181
	slab_t *slab;
182
	void *obj;
183
 
184
	if (list_empty(&cache->partial_slabs)) {
185
		/* Allow recursion and reclaiming
186
		 * - this should work, as the SLAB control structures
187
		 *   are small and do not need to allocte with anything
188
		 *   other ten frame_alloc when they are allocating,
189
		 *   that's why we should get recursion at most 1-level deep
190
		 */
191
		spinlock_unlock(&cache->lock);
192
		slab = slab_space_alloc(cache, flags);
193
		spinlock_lock(&cache->lock);
194
		if (!slab)
195
			return NULL;
196
	} else {
197
		slab = list_get_instance(cache->partial_slabs.next,
198
					 slab_t,
199
					 link);
200
		list_remove(&slab->link);
201
	}
202
	obj = slab->start + slab->nextavail * cache->size;
203
	slab->nextavail = *((int *)obj);
204
	slab->available--;
205
	if (! slab->available)
206
		list_prepend(&cache->full_slabs, &slab->link);
207
	else
208
		list_prepend(&cache->partial_slabs, &slab->link);
209
	return obj;
759 palkovsky 210
}
211
 
212
/**************************************/
213
/* CPU-Cache slab functions */
214
 
215
/**
216
 * Free all objects in magazine and free memory associated with magazine
217
 *
762 palkovsky 218
 * Assume mag_cache[cpu].lock is locked 
759 palkovsky 219
 *
220
 * @return Number of freed pages
221
 */
222
static count_t magazine_destroy(slab_cache_t *cache, 
223
				slab_magazine_t *mag)
224
{
225
	int i;
226
	count_t frames = 0;
227
 
228
	for (i=0;i < mag->busy; i++)
762 palkovsky 229
		frames += slab_obj_destroy(cache, mag->objs[i], NULL);
759 palkovsky 230
 
231
	slab_free(&mag_cache, mag);
232
 
233
	return frames;
234
}
235
 
236
/**
237
 * Try to find object in CPU-cache magazines
238
 *
239
 * @return Pointer to object or NULL if not available
240
 */
241
static void * magazine_obj_get(slab_cache_t *cache)
242
{
243
	slab_magazine_t *mag;
244
 
245
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
246
 
247
	mag = cache->mag_cache[CPU->id].current;
248
	if (!mag)
249
		goto out;
250
 
251
	if (!mag->busy) {
252
		/* If current is empty && last exists && not empty, exchange */
253
		if (cache->mag_cache[CPU->id].last \
254
		    && cache->mag_cache[CPU->id].last->busy) {
255
			cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
256
			cache->mag_cache[CPU->id].last = mag;
257
			mag = cache->mag_cache[CPU->id].current;
258
			goto gotit;
259
		}
762 palkovsky 260
		/* If still not busy, exchange current with some from
759 palkovsky 261
		 * other full magazines */
262
		spinlock_lock(&cache->lock);
263
		if (list_empty(&cache->magazines)) {
264
			spinlock_unlock(&cache->lock);
265
			goto out;
266
		}
267
		/* Free current magazine and take one from list */
268
		slab_free(&mag_cache, mag);
269
		mag = list_get_instance(cache->magazines.next,
270
					slab_magazine_t,
271
					link);
272
		list_remove(&mag->link);
273
 
274
		spinlock_unlock(&cache->lock);
275
	}
276
gotit:
277
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
278
	return mag->objs[--mag->busy];
279
out:	
280
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
281
	return NULL;
282
}
283
 
284
/**
285
 * Put object into CPU-cache magazine
286
 *
287
 * We have 2 magazines bound to processor. 
288
 * First try the current. 
289
 *  If full, try the last.
290
 *   If full, put to magazines list.
291
 *   allocate new, exchange last & current
292
 *
293
 * @return 0 - success, -1 - could not get memory
294
 */
295
static int magazine_obj_put(slab_cache_t *cache, void *obj)
296
{
297
	slab_magazine_t *mag;
298
 
299
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
300
 
301
	mag = cache->mag_cache[CPU->id].current;
302
	if (!mag) {
303
		/* We do not want to sleep just because of caching */
304
		/* Especially we do not want reclaiming to start, as 
305
		 * this would deadlock */
762 palkovsky 306
		mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
759 palkovsky 307
		if (!mag) /* Allocation failed, give up on caching */
308
			goto errout;
309
 
310
		cache->mag_cache[CPU->id].current = mag;
311
		mag->size = SLAB_MAG_SIZE;
312
		mag->busy = 0;
313
	} else if (mag->busy == mag->size) {
314
		/* If the last is full | empty, allocate new */
315
		mag = cache->mag_cache[CPU->id].last;
316
		if (!mag || mag->size == mag->busy) {
317
			if (mag) 
318
				list_prepend(&cache->magazines, &mag->link);
319
 
762 palkovsky 320
			mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
759 palkovsky 321
			if (!mag)
322
				goto errout;
323
 
324
			mag->size = SLAB_MAG_SIZE;
325
			mag->busy = 0;
326
			cache->mag_cache[CPU->id].last = mag;
327
		} 
328
		/* Exchange the 2 */
329
		cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
330
		cache->mag_cache[CPU->id].current = mag;
331
	}
332
	mag->objs[mag->busy++] = obj;
333
 
334
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
335
	return 0;
336
errout:
337
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
338
	return -1;
339
}
340
 
341
 
342
/**************************************/
762 palkovsky 343
/* SLAB CACHE functions */
759 palkovsky 344
 
762 palkovsky 345
/** Return number of objects that fit in certain cache size */
346
static int comp_objects(slab_cache_t *cache)
347
{
348
	if (cache->flags & SLAB_CACHE_SLINSIDE)
349
		return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
350
	else 
351
		return (PAGE_SIZE << cache->order) / cache->size;
352
}
353
 
354
/** Return wasted space in slab */
355
static int badness(slab_cache_t *cache)
356
{
357
	int objects;
358
	int ssize;
359
 
360
	objects = comp_objects(cache);
361
	ssize = PAGE_SIZE << cache->order;
362
	if (cache->flags & SLAB_CACHE_SLINSIDE)
363
		ssize -= sizeof(slab_t);
364
	return ssize - objects*cache->size;
365
}
366
 
759 palkovsky 367
/** Initialize allocated memory as a slab cache */
368
static void
369
_slab_cache_create(slab_cache_t *cache,
370
		   char *name,
371
		   size_t size,
372
		   size_t align,
373
		   int (*constructor)(void *obj, int kmflag),
374
		   void (*destructor)(void *obj),
375
		   int flags)
376
{
377
	int i;
378
 
379
	memsetb((__address)cache, sizeof(*cache), 0);
380
	cache->name = name;
381
 
762 palkovsky 382
	if (align)
383
		size = ALIGN_UP(size, align);
384
	cache->size = size;
759 palkovsky 385
 
386
	cache->constructor = constructor;
387
	cache->destructor = destructor;
388
	cache->flags = flags;
389
 
390
	list_initialize(&cache->full_slabs);
391
	list_initialize(&cache->partial_slabs);
392
	list_initialize(&cache->magazines);
393
	spinlock_initialize(&cache->lock, "cachelock");
394
	if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
395
		for (i=0; i< config.cpu_count; i++)
396
			spinlock_initialize(&cache->mag_cache[i].lock, 
397
					    "cpucachelock");
398
	}
399
 
400
	/* Compute slab sizes, object counts in slabs etc. */
401
	if (cache->size < SLAB_INSIDE_SIZE)
402
		cache->flags |= SLAB_CACHE_SLINSIDE;
403
 
762 palkovsky 404
	/* Minimum slab order */
405
	cache->order = (cache->size / PAGE_SIZE) + 1;
406
 
407
	while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
408
		cache->order += 1;
409
	}
759 palkovsky 410
 
762 palkovsky 411
	cache->objects = comp_objects(cache);
412
 
759 palkovsky 413
	spinlock_lock(&slab_cache_lock);
414
 
415
	list_append(&cache->link, &slab_cache_list);
416
 
417
	spinlock_unlock(&slab_cache_lock);
418
}
419
 
420
/** Create slab cache  */
421
slab_cache_t * slab_cache_create(char *name,
422
				 size_t size,
423
				 size_t align,
424
				 int (*constructor)(void *obj, int kmflag),
425
				 void (*destructor)(void *obj),
426
				 int flags)
427
{
428
	slab_cache_t *cache;
429
 
430
	cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
431
	_slab_cache_create(cache, name, size, align, constructor, destructor,
432
			   flags);
433
	return cache;
434
}
435
 
436
/** 
437
 * Reclaim space occupied by objects that are already free
438
 *
439
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
440
 * @return Number of freed pages
762 palkovsky 441
 *
442
 * TODO: Add light reclaim
759 palkovsky 443
 */
444
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
445
{
446
	int i;
447
	slab_magazine_t *mag;
448
	link_t *cur;
449
	count_t frames = 0;
450
 
451
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
452
		return 0; /* Nothing to do */
453
 
454
	/* First lock all cpu caches, then the complete cache lock */
455
	for (i=0; i < config.cpu_count; i++)
456
		spinlock_lock(&cache->mag_cache[i].lock);
457
	spinlock_lock(&cache->lock);
458
 
459
	if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 460
		/* Aggressive memfree */
461
 
759 palkovsky 462
		/* Destroy CPU magazines */
463
		for (i=0; i<config.cpu_count; i++) {
464
			mag = cache->mag_cache[i].current;
465
			if (mag)
466
				frames += magazine_destroy(cache, mag);
467
			cache->mag_cache[i].current = NULL;
468
 
469
			mag = cache->mag_cache[i].last;
470
			if (mag)
471
				frames += magazine_destroy(cache, mag);
472
			cache->mag_cache[i].last = NULL;
473
		}
474
	}
762 palkovsky 475
	/* Destroy full magazines */
476
	cur=cache->magazines.prev;
477
	while (cur!=&cache->magazines) {
478
		mag = list_get_instance(cur, slab_magazine_t, link);
479
 
480
		cur = cur->prev;
481
		list_remove(cur->next);
482
		frames += magazine_destroy(cache,mag);
483
		/* If we do not do full reclaim, break
484
		 * as soon as something is freed */
485
		if (!(flags & SLAB_RECLAIM_ALL) && frames)
486
			break;
487
	}
759 palkovsky 488
 
489
	spinlock_unlock(&cache->lock);
490
	for (i=0; i < config.cpu_count; i++)
491
		spinlock_unlock(&cache->mag_cache[i].lock);
492
 
493
	return frames;
494
}
495
 
496
/** Check that there are no slabs and remove cache from system  */
497
void slab_cache_destroy(slab_cache_t *cache)
498
{
499
	/* Do not lock anything, we assume the software is correct and
500
	 * does not touch the cache when it decides to destroy it */
501
 
502
	/* Destroy all magazines */
503
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
504
 
505
	/* All slabs must be empty */
506
	if (!list_empty(&cache->full_slabs) \
507
	    || !list_empty(&cache->partial_slabs))
508
		panic("Destroying cache that is not empty.");
509
 
510
	spinlock_lock(&slab_cache_lock);
511
	list_remove(&cache->link);
512
	spinlock_unlock(&slab_cache_lock);
513
 
514
	free(cache);
515
}
516
 
517
/** Allocate new object from cache - if no flags given, always returns 
518
    memory */
519
void * slab_alloc(slab_cache_t *cache, int flags)
520
{
521
	ipl_t ipl;
522
	void *result = NULL;
523
 
524
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
525
	ipl = interrupts_disable();
526
 
527
	if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
528
		result = magazine_obj_get(cache);
529
 
762 palkovsky 530
	if (!result) {
531
		spinlock_lock(&cache->lock);
759 palkovsky 532
		result = slab_obj_create(cache, flags);
762 palkovsky 533
		spinlock_unlock(&cache->lock);
534
	}
759 palkovsky 535
 
536
	interrupts_restore(ipl);
537
 
538
	return result;
539
}
540
 
541
/** Return object to cache  */
542
void slab_free(slab_cache_t *cache, void *obj)
543
{
544
	ipl_t ipl;
545
 
546
	ipl = interrupts_disable();
547
 
762 palkovsky 548
	if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
549
	    || magazine_obj_put(cache, obj)) {
550
 
551
		spinlock_lock(&cache->lock);
552
		slab_obj_destroy(cache, obj, NULL);
553
		spinlock_unlock(&cache->lock);
759 palkovsky 554
	}
555
	interrupts_restore(ipl);
556
}
557
 
558
/* Go through all caches and reclaim what is possible */
559
count_t slab_reclaim(int flags)
560
{
561
	slab_cache_t *cache;
562
	link_t *cur;
563
	count_t frames = 0;
564
 
565
	spinlock_lock(&slab_cache_lock);
566
 
567
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
568
		cache = list_get_instance(cur, slab_cache_t, link);
569
		frames += _slab_reclaim(cache, flags);
570
	}
571
 
572
	spinlock_unlock(&slab_cache_lock);
573
 
574
	return frames;
575
}
576
 
577
 
578
/* Print list of slabs */
579
void slab_print_list(void)
580
{
581
	slab_cache_t *cache;
582
	link_t *cur;
583
 
584
	spinlock_lock(&slab_cache_lock);
762 palkovsky 585
	printf("SLAB name\tOsize\tOrder\n");
759 palkovsky 586
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
587
		cache = list_get_instance(cur, slab_cache_t, link);
762 palkovsky 588
		printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order);
759 palkovsky 589
	}
590
	spinlock_unlock(&slab_cache_lock);
591
}
592
 
593
void slab_cache_init(void)
594
{
595
	/* Initialize magazine cache */
596
	_slab_cache_create(&mag_cache,
597
			   "slab_magazine",
598
			   sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
599
			   sizeof(__address),
600
			   NULL, NULL,
601
			   SLAB_CACHE_NOMAGAZINE);
602
 
603
	/* Initialize structures for malloc */
604
}