Subversion Repositories HelenOS-historic

Rev

Rev 773 | Rev 776 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 773 Rev 775
Line 74... Line 74...
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75
 * is deallocated in each cache (this algorithm should probably change).
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
77
 * magazines.
78
 *
78
 *
-
 
79
 * TODO: For better CPU-scaling the magazine allocation strategy should
-
 
80
 * be extended. Currently, if the cache does not have magazine, it asks
-
 
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
-
 
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
-
 
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
-
 
84
 * buffer. The other possibility is to use the per-cache
-
 
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
-
 
86
 * magazine cache.
79
 *
87
 *
80
 */
88
 */
81
 
89
 
82
 
90
 
83
#include <synch/spinlock.h>
91
#include <synch/spinlock.h>
84
#include <mm/slab.h>
92
#include <mm/slab.h>
Line 293... Line 301...
293
/* CPU-Cache slab functions */
301
/* CPU-Cache slab functions */
294
 
302
 
295
/**
303
/**
296
 * Free all objects in magazine and free memory associated with magazine
304
 * Free all objects in magazine and free memory associated with magazine
297
 *
305
 *
298
 * Assume mag_cache[cpu].lock is locked
306
 * Assume cache->lock is held
299
 *
307
 *
300
 * @return Number of freed pages
308
 * @return Number of freed pages
301
 */
309
 */
302
static count_t magazine_destroy(slab_cache_t *cache,
310
static count_t magazine_destroy(slab_cache_t *cache,
303
                slab_magazine_t *mag)
311
                slab_magazine_t *mag)
Line 617... Line 625...
617
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
625
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
618
            break;
626
            break;
619
    }
627
    }
620
   
628
   
621
    spinlock_unlock(&cache->lock);
629
    spinlock_unlock(&cache->lock);
-
 
630
    /* We can release the cache locks now */
622
    if (flags & SLAB_RECLAIM_ALL) {
631
    if (flags & SLAB_RECLAIM_ALL) {
623
        for (i=0; i < config.cpu_count; i++)
632
        for (i=0; i < config.cpu_count; i++)
624
            spinlock_unlock(&cache->mag_cache[i].lock);
633
            spinlock_unlock(&cache->mag_cache[i].lock);
625
    }
634
    }
626
   
635
   
Line 775... Line 784...
775
/**************************************/
784
/**************************************/
776
/* kalloc/kfree functions             */
785
/* kalloc/kfree functions             */
777
void * kalloc(unsigned int size, int flags)
786
void * kalloc(unsigned int size, int flags)
778
{
787
{
779
    int idx;
788
    int idx;
780
 
789
   
781
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
790
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
782
   
791
   
783
    if (size < (1 << SLAB_MIN_MALLOC_W))
792
    if (size < (1 << SLAB_MIN_MALLOC_W))
784
        size = (1 << SLAB_MIN_MALLOC_W);
793
        size = (1 << SLAB_MIN_MALLOC_W);
785
 
794