Subversion Repositories HelenOS-historic

Rev

Rev 776 | Rev 780 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
  31.  * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
  32.  *
  33.  * with the following exceptions:
  34.  *   - empty SLABS are deallocated immediately
  35.  *     (in Linux they are kept in linked list, in Solaris ???)
  36.  *   - empty magazines are deallocated when not needed
  37.  *     (in Solaris they are held in linked list in slab cache)
  38.  *
  39.  *   Following features are not currently supported but would be easy to do:
  40.  *   - cache coloring
  41.  *   - dynamic magazine growing (different magazine sizes are already
  42.  *     supported, but we would need to adjust allocating strategy)
  43.  *
  44.  * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
  45.  * good SMP scaling.
  46.  *
  47.  * When a new object is being allocated, it is first checked, if it is
  48.  * available in CPU-bound magazine. If it is not found there, it is
  49.  * allocated from CPU-shared SLAB - if partial full is found, it is used,
  50.  * otherwise a new one is allocated.
  51.  *
  52.  * When an object is being deallocated, it is put to CPU-bound magazine.
  53.  * If there is no such magazine, new one is allocated (if it fails,
  54.  * the object is deallocated into SLAB). If the magazine is full, it is
  55.  * put into cpu-shared list of magazines and new one is allocated.
  56.  *
  57.  * The CPU-bound magazine is actually a pair of magazine to avoid
  58.  * thrashing when somebody is allocating/deallocating 1 item at the magazine
  59.  * size boundary. LIFO order is enforced, which should avoid fragmentation
  60.  * as much as possible.
  61.  *  
  62.  * Every cache contains list of full slabs and list of partialy full slabs.
  63.  * Empty SLABS are immediately freed (thrashing will be avoided because
  64.  * of magazines).
  65.  *
  66.  * The SLAB information structure is kept inside the data area, if possible.
  67.  * The cache can be marked that it should not use magazines. This is used
  68.  * only for SLAB related caches to avoid deadlocks and infinite recursion
  69.  * (the SLAB allocator uses itself for allocating all it's control structures).
  70.  *
  71.  * The SLAB allocator allocates lot of space and does not free it. When
  72.  * frame allocator fails to allocate the frame, it calls slab_reclaim().
  73.  * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
  74.  * releases slabs from cpu-shared magazine-list, until at least 1 slab
  75.  * is deallocated in each cache (this algorithm should probably change).
  76.  * The brutal reclaim removes all cached objects, even from CPU-bound
  77.  * magazines.
  78.  *
  79.  * TODO: For better CPU-scaling the magazine allocation strategy should
  80.  * be extended. Currently, if the cache does not have magazine, it asks
  81.  * for non-cpu cached magazine cache to provide one. It might be feasible
  82.  * to add cpu-cached magazine cache (which would allocate it's magazines
  83.  * from non-cpu-cached mag. cache). This would provide a nice per-cpu
  84.  * buffer. The other possibility is to use the per-cache
  85.  * 'empty-magazine-list', which decreases competing for 1 per-system
  86.  * magazine cache.
  87.  *
  88.  * - it might be good to add granularity of locks even to slab level,
  89.  *   we could then try_spinlock over all partial slabs and thus improve
  90.  *   scalability even on slab level
  91.  */
  92.  
  93.  
  94. #include <synch/spinlock.h>
  95. #include <mm/slab.h>
  96. #include <list.h>
  97. #include <memstr.h>
  98. #include <align.h>
  99. #include <mm/heap.h>
  100. #include <mm/frame.h>
  101. #include <config.h>
  102. #include <print.h>
  103. #include <arch.h>
  104. #include <panic.h>
  105. #include <debug.h>
  106. #include <bitops.h>
  107.  
  108. SPINLOCK_INITIALIZE(slab_cache_lock);
  109. static LIST_INITIALIZE(slab_cache_list);
  110.  
  111. /** Magazine cache */
  112. static slab_cache_t mag_cache;
  113. /** Cache for cache descriptors */
  114. static slab_cache_t slab_cache_cache;
  115.  
  116. /** Cache for external slab descriptors
  117.  * This time we want per-cpu cache, so do not make it static
  118.  * - using SLAB for internal SLAB structures will not deadlock,
  119.  *   as all slab structures are 'small' - control structures of
  120.  *   their caches do not require further allocation
  121.  */
  122. static slab_cache_t *slab_extern_cache;
  123. /** Caches for malloc */
  124. static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
  125. char *malloc_names[] =  {
  126.     "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
  127.     "malloc-256","malloc-512","malloc-1K","malloc-2K",
  128.     "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
  129.     "malloc-64K","malloc-128K"
  130. };
  131.  
  132. /** Slab descriptor */
  133. typedef struct {
  134.     slab_cache_t *cache; /**< Pointer to parent cache */
  135.     link_t link;       /* List of full/partial slabs */
  136.     void *start;       /**< Start address of first available item */
  137.     count_t available; /**< Count of available items in this slab */
  138.     index_t nextavail; /**< The index of next available item */
  139. }slab_t;
  140.  
  141. /**************************************/
  142. /* SLAB allocation functions          */
  143.  
  144. /**
  145.  * Allocate frames for slab space and initialize
  146.  *
  147.  */
  148. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  149. {
  150.     void *data;
  151.     slab_t *slab;
  152.     size_t fsize;
  153.     int i;
  154.     zone_t *zone = NULL;
  155.     int status;
  156.     frame_t *frame;
  157.  
  158.     data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
  159.     if (status != FRAME_OK) {
  160.         return NULL;
  161.     }
  162.     if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
  163.         slab = slab_alloc(slab_extern_cache, flags);
  164.         if (!slab) {
  165.             frame_free((__address)data);
  166.             return NULL;
  167.         }
  168.     } else {
  169.         fsize = (PAGE_SIZE << cache->order);
  170.         slab = data + fsize - sizeof(*slab);
  171.     }
  172.        
  173.     /* Fill in slab structures */
  174.     /* TODO: some better way of accessing the frame */
  175.     for (i=0; i < (1 << cache->order); i++) {
  176.         frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
  177.         frame->parent = slab;
  178.     }
  179.  
  180.     slab->start = data;
  181.     slab->available = cache->objects;
  182.     slab->nextavail = 0;
  183.     slab->cache = cache;
  184.  
  185.     for (i=0; i<cache->objects;i++)
  186.         *((int *) (slab->start + i*cache->size)) = i+1;
  187.  
  188.     atomic_inc(&cache->allocated_slabs);
  189.     return slab;
  190. }
  191.  
  192. /**
  193.  * Deallocate space associated with SLAB
  194.  *
  195.  * @return number of freed frames
  196.  */
  197. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  198. {
  199.     frame_free((__address)slab->start);
  200.     if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  201.         slab_free(slab_extern_cache, slab);
  202.  
  203.     atomic_dec(&cache->allocated_slabs);
  204.    
  205.     return 1 << cache->order;
  206. }
  207.  
  208. /** Map object to slab structure */
  209. static slab_t * obj2slab(void *obj)
  210. {
  211.     frame_t *frame;
  212.  
  213.     frame = frame_addr2frame((__address)obj);
  214.     return (slab_t *)frame->parent;
  215. }
  216.  
  217. /**************************************/
  218. /* SLAB functions */
  219.  
  220.  
  221. /**
  222.  * Return object to slab and call a destructor
  223.  *
  224.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  225.  *
  226.  * @return Number of freed pages
  227.  */
  228. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  229.                 slab_t *slab)
  230. {
  231.     count_t frames = 0;
  232.  
  233.     if (!slab)
  234.         slab = obj2slab(obj);
  235.  
  236.     ASSERT(slab->cache == cache);
  237.  
  238.     spinlock_lock(&cache->slablock);
  239.  
  240.     *((int *)obj) = slab->nextavail;
  241.     slab->nextavail = (obj - slab->start)/cache->size;
  242.     slab->available++;
  243.  
  244.     /* Move it to correct list */
  245.     if (slab->available == 1) {
  246.         /* It was in full, move to partial */
  247.         list_remove(&slab->link);
  248.         list_prepend(&slab->link, &cache->partial_slabs);
  249.     }
  250.     if (slab->available == cache->objects) {
  251.         /* Free associated memory */
  252.         list_remove(&slab->link);
  253.         /* This should not produce deadlock, as
  254.          * magazine is always allocated with NO reclaim,
  255.          * keep all locks */
  256.         frames = slab_space_free(cache, slab);
  257.     }
  258.  
  259.     spinlock_unlock(&cache->slablock);
  260.  
  261.     return frames;
  262. }
  263.  
  264. /**
  265.  * Take new object from slab or create new if needed
  266.  *
  267.  * @return Object address or null
  268.  */
  269. static void * slab_obj_create(slab_cache_t *cache, int flags)
  270. {
  271.     slab_t *slab;
  272.     void *obj;
  273.  
  274.     spinlock_lock(&cache->slablock);
  275.  
  276.     if (list_empty(&cache->partial_slabs)) {
  277.         /* Allow recursion and reclaiming
  278.          * - this should work, as the SLAB control structures
  279.          *   are small and do not need to allocte with anything
  280.          *   other ten frame_alloc when they are allocating,
  281.          *   that's why we should get recursion at most 1-level deep
  282.          */
  283.         spinlock_unlock(&cache->slablock);
  284.         slab = slab_space_alloc(cache, flags);
  285.         spinlock_lock(&cache->slablock);
  286.         if (!slab)
  287.             goto err;
  288.     } else {
  289.         slab = list_get_instance(cache->partial_slabs.next,
  290.                      slab_t,
  291.                      link);
  292.         list_remove(&slab->link);
  293.     }
  294.     obj = slab->start + slab->nextavail * cache->size;
  295.     slab->nextavail = *((int *)obj);
  296.     slab->available--;
  297.     if (! slab->available)
  298.         list_prepend(&slab->link, &cache->full_slabs);
  299.     else
  300.         list_prepend(&slab->link, &cache->partial_slabs);
  301.  
  302.     spinlock_unlock(&cache->slablock);
  303.     return obj;
  304. err:
  305.     spinlock_unlock(&cache->slablock);
  306.     return NULL;
  307. }
  308.  
  309. /**************************************/
  310. /* CPU-Cache slab functions */
  311.  
  312. /**
  313.  * Free all objects in magazine and free memory associated with magazine
  314.  *
  315.  * @return Number of freed pages
  316.  */
  317. static count_t magazine_destroy(slab_cache_t *cache,
  318.                 slab_magazine_t *mag)
  319. {
  320.     int i;
  321.     count_t frames = 0;
  322.  
  323.     for (i=0;i < mag->busy; i++) {
  324.         frames += slab_obj_destroy(cache, mag->objs[i], NULL);
  325.         atomic_dec(&cache->cached_objs);
  326.     }
  327.    
  328.     slab_free(&mag_cache, mag);
  329.  
  330.     return frames;
  331. }
  332.  
  333. /**
  334.  * Find full magazine, set it as current and return it
  335.  *
  336.  * Assume cpu_magazine lock is held
  337.  */
  338. static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
  339. {
  340.     slab_magazine_t *cmag, *lastmag, *newmag;
  341.  
  342.     cmag = cache->mag_cache[CPU->id].current;
  343.     lastmag = cache->mag_cache[CPU->id].last;
  344.     if (cmag) { /* First try local CPU magazines */
  345.         if (cmag->busy)
  346.             return cmag;
  347.  
  348.         if (lastmag && lastmag->busy) {
  349.             cache->mag_cache[CPU->id].current = lastmag;
  350.             cache->mag_cache[CPU->id].last = cmag;
  351.             return lastmag;
  352.         }
  353.     }
  354.     /* Local magazines are empty, import one from magazine list */
  355.     spinlock_lock(&cache->maglock);
  356.     if (list_empty(&cache->magazines)) {
  357.         spinlock_unlock(&cache->maglock);
  358.         return NULL;
  359.     }
  360.     newmag = list_get_instance(cache->magazines.next,
  361.                    slab_magazine_t,
  362.                    link);
  363.     list_remove(&newmag->link);
  364.     spinlock_unlock(&cache->maglock);
  365.  
  366.     if (lastmag)
  367.         slab_free(&mag_cache, lastmag);
  368.     cache->mag_cache[CPU->id].last = cmag;
  369.     cache->mag_cache[CPU->id].current = newmag;
  370.     return newmag;
  371. }
  372.  
  373. /**
  374.  * Try to find object in CPU-cache magazines
  375.  *
  376.  * @return Pointer to object or NULL if not available
  377.  */
  378. static void * magazine_obj_get(slab_cache_t *cache)
  379. {
  380.     slab_magazine_t *mag;
  381.     void *obj;
  382.  
  383.     if (!CPU)
  384.         return NULL;
  385.  
  386.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  387.  
  388.     mag = get_full_current_mag(cache);
  389.     if (!mag) {
  390.         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  391.         return NULL;
  392.     }
  393.     obj = mag->objs[--mag->busy];
  394.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  395.     atomic_dec(&cache->cached_objs);
  396.    
  397.     return obj;
  398. }
  399.  
  400. /**
  401.  * Assure that the current magazine is empty, return pointer to it, or NULL if
  402.  * no empty magazine is available and cannot be allocated
  403.  *
  404.  * Assume mag_cache[CPU->id].lock is held
  405.  *
  406.  * We have 2 magazines bound to processor.
  407.  * First try the current.
  408.  *  If full, try the last.
  409.  *   If full, put to magazines list.
  410.  *   allocate new, exchange last & current
  411.  *
  412.  */
  413. static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
  414. {
  415.     slab_magazine_t *cmag,*lastmag,*newmag;
  416.  
  417.     cmag = cache->mag_cache[CPU->id].current;
  418.     lastmag = cache->mag_cache[CPU->id].last;
  419.  
  420.     if (cmag) {
  421.         if (cmag->busy < cmag->size)
  422.             return cmag;
  423.         if (lastmag && lastmag->busy < lastmag->size) {
  424.             cache->mag_cache[CPU->id].last = cmag;
  425.             cache->mag_cache[CPU->id].current = lastmag;
  426.             return lastmag;
  427.         }
  428.     }
  429.     /* current | last are full | nonexistent, allocate new */
  430.     /* We do not want to sleep just because of caching */
  431.     /* Especially we do not want reclaiming to start, as
  432.      * this would deadlock */
  433.     newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  434.     if (!newmag)
  435.         return NULL;
  436.     newmag->size = SLAB_MAG_SIZE;
  437.     newmag->busy = 0;
  438.  
  439.     /* Flush last to magazine list */
  440.     if (lastmag) {
  441.         spinlock_lock(&cache->maglock);
  442.         list_prepend(&lastmag->link, &cache->magazines);
  443.         spinlock_unlock(&cache->maglock);
  444.     }
  445.     /* Move current as last, save new as current */
  446.     cache->mag_cache[CPU->id].last = cmag; 
  447.     cache->mag_cache[CPU->id].current = newmag;
  448.  
  449.     return newmag;
  450. }
  451.  
  452. /**
  453.  * Put object into CPU-cache magazine
  454.  *
  455.  * @return 0 - success, -1 - could not get memory
  456.  */
  457. static int magazine_obj_put(slab_cache_t *cache, void *obj)
  458. {
  459.     slab_magazine_t *mag;
  460.  
  461.     if (!CPU)
  462.         return -1;
  463.  
  464.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  465.  
  466.     mag = make_empty_current_mag(cache);
  467.     if (!mag) {
  468.         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  469.         return -1;
  470.     }
  471.    
  472.     mag->objs[mag->busy++] = obj;
  473.  
  474.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  475.     atomic_inc(&cache->cached_objs);
  476.     return 0;
  477. }
  478.  
  479.  
  480. /**************************************/
  481. /* SLAB CACHE functions */
  482.  
  483. /** Return number of objects that fit in certain cache size */
  484. static int comp_objects(slab_cache_t *cache)
  485. {
  486.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  487.         return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  488.     else
  489.         return (PAGE_SIZE << cache->order) / cache->size;
  490. }
  491.  
  492. /** Return wasted space in slab */
  493. static int badness(slab_cache_t *cache)
  494. {
  495.     int objects;
  496.     int ssize;
  497.  
  498.     objects = comp_objects(cache);
  499.     ssize = PAGE_SIZE << cache->order;
  500.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  501.         ssize -= sizeof(slab_t);
  502.     return ssize - objects*cache->size;
  503. }
  504.  
  505. /** Initialize allocated memory as a slab cache */
  506. static void
  507. _slab_cache_create(slab_cache_t *cache,
  508.            char *name,
  509.            size_t size,
  510.            size_t align,
  511.            int (*constructor)(void *obj, int kmflag),
  512.            void (*destructor)(void *obj),
  513.            int flags)
  514. {
  515.     int i;
  516.     int pages;
  517.  
  518.     memsetb((__address)cache, sizeof(*cache), 0);
  519.     cache->name = name;
  520.  
  521.     if (align < sizeof(__native))
  522.         align = sizeof(__native);
  523.     size = ALIGN_UP(size, align);
  524.        
  525.     cache->size = size;
  526.  
  527.     cache->constructor = constructor;
  528.     cache->destructor = destructor;
  529.     cache->flags = flags;
  530.  
  531.     list_initialize(&cache->full_slabs);
  532.     list_initialize(&cache->partial_slabs);
  533.     list_initialize(&cache->magazines);
  534.     spinlock_initialize(&cache->slablock, "slab_lock");
  535.     spinlock_initialize(&cache->maglock, "slab_maglock");
  536.     if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  537.         for (i=0; i < config.cpu_count; i++) {
  538.             memsetb((__address)&cache->mag_cache[i],
  539.                 sizeof(cache->mag_cache[i]), 0);
  540.             spinlock_initialize(&cache->mag_cache[i].lock,
  541.                         "slab_maglock_cpu");
  542.         }
  543.     }
  544.  
  545.     /* Compute slab sizes, object counts in slabs etc. */
  546.     if (cache->size < SLAB_INSIDE_SIZE)
  547.         cache->flags |= SLAB_CACHE_SLINSIDE;
  548.  
  549.     /* Minimum slab order */
  550.     pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
  551.     cache->order = fnzb(pages);
  552.  
  553.     while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  554.         cache->order += 1;
  555.     }
  556.     cache->objects = comp_objects(cache);
  557.     /* If info fits in, put it inside */
  558.     if (badness(cache) > sizeof(slab_t))
  559.         cache->flags |= SLAB_CACHE_SLINSIDE;
  560.  
  561.     spinlock_lock(&slab_cache_lock);
  562.  
  563.     list_append(&cache->link, &slab_cache_list);
  564.  
  565.     spinlock_unlock(&slab_cache_lock);
  566. }
  567.  
  568. /** Create slab cache  */
  569. slab_cache_t * slab_cache_create(char *name,
  570.                  size_t size,
  571.                  size_t align,
  572.                  int (*constructor)(void *obj, int kmflag),
  573.                  void (*destructor)(void *obj),
  574.                  int flags)
  575. {
  576.     slab_cache_t *cache;
  577.  
  578.     cache = slab_alloc(&slab_cache_cache, 0);
  579.     _slab_cache_create(cache, name, size, align, constructor, destructor,
  580.                flags);
  581.     return cache;
  582. }
  583.  
  584. /**
  585.  * Reclaim space occupied by objects that are already free
  586.  *
  587.  * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
  588.  * @return Number of freed pages
  589.  */
  590. static count_t _slab_reclaim(slab_cache_t *cache, int flags)
  591. {
  592.     int i;
  593.     slab_magazine_t *mag;
  594.     link_t *cur;
  595.     count_t frames = 0;
  596.    
  597.     if (cache->flags & SLAB_CACHE_NOMAGAZINE)
  598.         return 0; /* Nothing to do */
  599.    
  600.     /* First lock all cpu caches, then the complete cache lock */
  601.     if (flags & SLAB_RECLAIM_ALL) {
  602.         for (i=0; i < config.cpu_count; i++)
  603.             spinlock_lock(&cache->mag_cache[i].lock);
  604.     }
  605.     spinlock_lock(&cache->maglock);
  606.    
  607.     if (flags & SLAB_RECLAIM_ALL) {
  608.         /* Aggressive memfree */
  609.         /* Destroy CPU magazines */
  610.         for (i=0; i<config.cpu_count; i++) {
  611.             mag = cache->mag_cache[i].current;
  612.             if (mag)
  613.                 frames += magazine_destroy(cache, mag);
  614.             cache->mag_cache[i].current = NULL;
  615.            
  616.             mag = cache->mag_cache[i].last;
  617.             if (mag)
  618.                 frames += magazine_destroy(cache, mag);
  619.             cache->mag_cache[i].last = NULL;
  620.         }
  621.     }
  622.     /* We can release the cache locks now */
  623.     if (flags & SLAB_RECLAIM_ALL) {
  624.         for (i=0; i < config.cpu_count; i++)
  625.             spinlock_unlock(&cache->mag_cache[i].lock);
  626.     }
  627.     /* Destroy full magazines */
  628.     cur=cache->magazines.prev;
  629.  
  630.     while (cur != &cache->magazines) {
  631.         mag = list_get_instance(cur, slab_magazine_t, link);
  632.        
  633.         cur = cur->prev;
  634.         list_remove(&mag->link);
  635.         frames += magazine_destroy(cache,mag);
  636.         /* If we do not do full reclaim, break
  637.          * as soon as something is freed */
  638.         if (!(flags & SLAB_RECLAIM_ALL) && frames)
  639.             break;
  640.     }
  641.    
  642.     spinlock_unlock(&cache->maglock);
  643.    
  644.     return frames;
  645. }
  646.  
  647. /** Check that there are no slabs and remove cache from system  */
  648. void slab_cache_destroy(slab_cache_t *cache)
  649. {
  650.     /* Do not lock anything, we assume the software is correct and
  651.      * does not touch the cache when it decides to destroy it */
  652.    
  653.     /* Destroy all magazines */
  654.     _slab_reclaim(cache, SLAB_RECLAIM_ALL);
  655.  
  656.     /* All slabs must be empty */
  657.     if (!list_empty(&cache->full_slabs) \
  658.         || !list_empty(&cache->partial_slabs))
  659.         panic("Destroying cache that is not empty.");
  660.  
  661.     spinlock_lock(&slab_cache_lock);
  662.     list_remove(&cache->link);
  663.     spinlock_unlock(&slab_cache_lock);
  664.  
  665.     slab_free(&slab_cache_cache, cache);
  666. }
  667.  
  668. /** Allocate new object from cache - if no flags given, always returns
  669.     memory */
  670. void * slab_alloc(slab_cache_t *cache, int flags)
  671. {
  672.     ipl_t ipl;
  673.     void *result = NULL;
  674.    
  675.     /* Disable interrupts to avoid deadlocks with interrupt handlers */
  676.     ipl = interrupts_disable();
  677.  
  678.     if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
  679.         result = magazine_obj_get(cache);
  680.  
  681.     if (!result)
  682.         result = slab_obj_create(cache, flags);
  683.  
  684.     interrupts_restore(ipl);
  685.  
  686.     if (result)
  687.         atomic_inc(&cache->allocated_objs);
  688.  
  689.     return result;
  690. }
  691.  
  692. /** Return object to cache, use slab if known  */
  693. static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
  694. {
  695.     ipl_t ipl;
  696.  
  697.     ipl = interrupts_disable();
  698.  
  699.     if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  700.         || magazine_obj_put(cache, obj)) {
  701.  
  702.         slab_obj_destroy(cache, obj, slab);
  703.  
  704.     }
  705.     interrupts_restore(ipl);
  706.     atomic_dec(&cache->allocated_objs);
  707. }
  708.  
  709. /** Return slab object to cache */
  710. void slab_free(slab_cache_t *cache, void *obj)
  711. {
  712.     _slab_free(cache,obj,NULL);
  713. }
  714.  
  715. /* Go through all caches and reclaim what is possible */
  716. count_t slab_reclaim(int flags)
  717. {
  718.     slab_cache_t *cache;
  719.     link_t *cur;
  720.     count_t frames = 0;
  721.  
  722.     spinlock_lock(&slab_cache_lock);
  723.  
  724.     /* TODO: Add assert, that interrupts are disabled, otherwise
  725.      * memory allocation from interrupts can deadlock.
  726.      */
  727.  
  728.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  729.         cache = list_get_instance(cur, slab_cache_t, link);
  730.         frames += _slab_reclaim(cache, flags);
  731.     }
  732.  
  733.     spinlock_unlock(&slab_cache_lock);
  734.  
  735.     return frames;
  736. }
  737.  
  738.  
  739. /* Print list of slabs */
  740. void slab_print_list(void)
  741. {
  742.     slab_cache_t *cache;
  743.     link_t *cur;
  744.  
  745.     spinlock_lock(&slab_cache_lock);
  746.     printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
  747.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  748.         cache = list_get_instance(cur, slab_cache_t, link);
  749.         printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
  750.                (1 << cache->order), cache->objects,
  751.                atomic_get(&cache->allocated_slabs),
  752.                atomic_get(&cache->cached_objs),
  753.                atomic_get(&cache->allocated_objs),
  754.                cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
  755.     }
  756.     spinlock_unlock(&slab_cache_lock);
  757. }
  758.  
  759. #ifdef CONFIG_DEBUG
  760. static int _slab_initialized = 0;
  761. #endif
  762.  
  763. void slab_cache_init(void)
  764. {
  765.     int i, size;
  766.  
  767.     /* Initialize magazine cache */
  768.     _slab_cache_create(&mag_cache,
  769.                "slab_magazine",
  770.                sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
  771.                sizeof(__address),
  772.                NULL, NULL,
  773.                SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  774.     /* Initialize slab_cache cache */
  775.     _slab_cache_create(&slab_cache_cache,
  776.                "slab_cache",
  777.                sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
  778.                sizeof(__address),
  779.                NULL, NULL,
  780.                SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  781.     /* Initialize external slab cache */
  782.     slab_extern_cache = slab_cache_create("slab_extern",
  783.                           sizeof(slab_t),
  784.                           0, NULL, NULL,
  785.                           SLAB_CACHE_SLINSIDE);
  786.  
  787.     /* Initialize structures for malloc */
  788.     for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
  789.          i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
  790.          i++, size <<= 1) {
  791.         malloc_caches[i] = slab_cache_create(malloc_names[i],
  792.                              size, 0,
  793.                              NULL,NULL,0);
  794.     }
  795. #ifdef CONFIG_DEBUG      
  796.     _slab_initialized = 1;
  797. #endif
  798. }
  799.  
  800. /**************************************/
  801. /* kalloc/kfree functions             */
  802. void * kalloc(unsigned int size, int flags)
  803. {
  804.     int idx;
  805.  
  806.     ASSERT(_slab_initialized);
  807.     ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
  808.    
  809.     if (size < (1 << SLAB_MIN_MALLOC_W))
  810.         size = (1 << SLAB_MIN_MALLOC_W);
  811.  
  812.     idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
  813.  
  814.     return slab_alloc(malloc_caches[idx], flags);
  815. }
  816.  
  817.  
  818. void kfree(void *obj)
  819. {
  820.     slab_t *slab = obj2slab(obj);
  821.    
  822.     _slab_free(slab->cache, obj, slab);
  823. }
  824.