Subversion Repositories HelenOS

Rev

Rev 768 | Rev 771 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. /*
  30.  * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
  31.  * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
  32.  *
  33.  * with the following exceptions:
  34.  *   - empty SLABS are deallocated immediately
  35.  *     (in Linux they are kept in linked list, in Solaris ???)
  36.  *   - empty magazines are deallocated when not needed
  37.  *     (in Solaris they are held in linked list in slab cache)
  38.  *
  39.  *   Following features are not currently supported but would be easy to do:
  40.  *   - cache coloring
  41.  *   - dynamic magazine growing (different magazine sizes are already
  42.  *     supported, but we would need to adjust allocating strategy)
  43.  *
  44.  * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
  45.  * good SMP scaling.
  46.  *
  47.  * When a new object is being allocated, it is first checked, if it is
  48.  * available in CPU-bound magazine. If it is not found there, it is
  49.  * allocated from CPU-shared SLAB - if partial full is found, it is used,
  50.  * otherwise a new one is allocated.
  51.  *
  52.  * When an object is being deallocated, it is put to CPU-bound magazine.
  53.  * If there is no such magazine, new one is allocated (if it fails,
  54.  * the object is deallocated into SLAB). If the magazine is full, it is
  55.  * put into cpu-shared list of magazines and new one is allocated.
  56.  *
  57.  * The CPU-bound magazine is actually a pair of magazine to avoid
  58.  * thrashing when somebody is allocating/deallocating 1 item at the magazine
  59.  * size boundary. LIFO order is enforced, which should avoid fragmentation
  60.  * as much as possible.
  61.  *  
  62.  * Every cache contains list of full slabs and list of partialy full slabs.
  63.  * Empty SLABS are immediately freed (thrashing will be avoided because
  64.  * of magazines).
  65.  *
  66.  * The SLAB information structure is kept inside the data area, if possible.
  67.  * The cache can be marked that it should not use magazines. This is used
  68.  * only for SLAB related caches to avoid deadlocks and infinite recursion
  69.  * (the SLAB allocator uses itself for allocating all it's control structures).
  70.  *
  71.  * The SLAB allocator allocates lot of space and does not free it. When
  72.  * frame allocator fails to allocate the frame, it calls slab_reclaim().
  73.  * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
  74.  * releases slabs from cpu-shared magazine-list, until at least 1 slab
  75.  * is deallocated in each cache (this algorithm should probably change).
  76.  * The brutal reclaim removes all cached objects, even from CPU-bound
  77.  * magazines.
  78.  *
  79.  *
  80.  */
  81.  
  82.  
  83. #include <synch/spinlock.h>
  84. #include <mm/slab.h>
  85. #include <list.h>
  86. #include <memstr.h>
  87. #include <align.h>
  88. #include <mm/heap.h>
  89. #include <mm/frame.h>
  90. #include <config.h>
  91. #include <print.h>
  92. #include <arch.h>
  93. #include <panic.h>
  94. #include <debug.h>
  95.  
  96. SPINLOCK_INITIALIZE(slab_cache_lock);
  97. static LIST_INITIALIZE(slab_cache_list);
  98.  
  99. /** Magazine cache */
  100. static slab_cache_t mag_cache;
  101. /** Cache for cache descriptors */
  102. static slab_cache_t slab_cache_cache;
  103.  
  104. /** Cache for external slab descriptors
  105.  * This time we want per-cpu cache, so do not make it static
  106.  * - using SLAB for internal SLAB structures will not deadlock,
  107.  *   as all slab structures are 'small' - control structures of
  108.  *   their caches do not require further allocation
  109.  */
  110. static slab_cache_t *slab_extern_cache;
  111.  
  112. /** Slab descriptor */
  113. typedef struct {
  114.     slab_cache_t *cache; /**< Pointer to parent cache */
  115.     link_t link;       /* List of full/partial slabs */
  116.     void *start;       /**< Start address of first available item */
  117.     count_t available; /**< Count of available items in this slab */
  118.     index_t nextavail; /**< The index of next available item */
  119. }slab_t;
  120.  
  121. /**************************************/
  122. /* SLAB allocation functions          */
  123.  
  124. /**
  125.  * Allocate frames for slab space and initialize
  126.  *
  127.  */
  128. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  129. {
  130.     void *data;
  131.     slab_t *slab;
  132.     size_t fsize;
  133.     int i;
  134.     zone_t *zone = NULL;
  135.     int status;
  136.     frame_t *frame;
  137.  
  138.     data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
  139.     if (status != FRAME_OK) {
  140.         return NULL;
  141.     }
  142.     if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
  143.         slab = slab_alloc(slab_extern_cache, flags);
  144.         if (!slab) {
  145.             frame_free((__address)data);
  146.             return NULL;
  147.         }
  148.     } else {
  149.         fsize = (PAGE_SIZE << cache->order);
  150.         slab = data + fsize - sizeof(*slab);
  151.     }
  152.        
  153.     /* Fill in slab structures */
  154.     /* TODO: some better way of accessing the frame */
  155.     for (i=0; i < (1 << cache->order); i++) {
  156.         frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
  157.         frame->parent = slab;
  158.     }
  159.  
  160.     slab->start = data;
  161.     slab->available = cache->objects;
  162.     slab->nextavail = 0;
  163.     slab->cache = cache;
  164.  
  165.     for (i=0; i<cache->objects;i++)
  166.         *((int *) (slab->start + i*cache->size)) = i+1;
  167.  
  168.     atomic_inc(&cache->allocated_slabs);
  169.     return slab;
  170. }
  171.  
  172. /**
  173.  * Deallocate space associated with SLAB
  174.  *
  175.  * @return number of freed frames
  176.  */
  177. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  178. {
  179.     frame_free((__address)slab->start);
  180.     if (! (cache->flags & SLAB_CACHE_SLINSIDE))
  181.         slab_free(slab_extern_cache, slab);
  182.  
  183.     atomic_dec(&cache->allocated_slabs);
  184.    
  185.     return 1 << cache->order;
  186. }
  187.  
  188. /** Map object to slab structure */
  189. static slab_t * obj2slab(void *obj)
  190. {
  191.     frame_t *frame;
  192.  
  193.     frame = frame_addr2frame((__address)obj);
  194.     return (slab_t *)frame->parent;
  195. }
  196.  
  197. /**************************************/
  198. /* SLAB functions */
  199.  
  200.  
  201. /**
  202.  * Return object to slab and call a destructor
  203.  *
  204.  * Assume the cache->lock is held;
  205.  *
  206.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  207.  *
  208.  * @return Number of freed pages
  209.  */
  210. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  211.                 slab_t *slab)
  212. {
  213.     count_t frames = 0;
  214.  
  215.     if (!slab)
  216.         slab = obj2slab(obj);
  217.  
  218.     ASSERT(slab->cache == cache);
  219.  
  220.     *((int *)obj) = slab->nextavail;
  221.     slab->nextavail = (obj - slab->start)/cache->size;
  222.     slab->available++;
  223.  
  224.     /* Move it to correct list */
  225.     if (slab->available == 1) {
  226.         /* It was in full, move to partial */
  227.         list_remove(&slab->link);
  228.         list_prepend(&slab->link, &cache->partial_slabs);
  229.     }
  230.     if (slab->available == cache->objects) {
  231.         /* Free associated memory */
  232.         list_remove(&slab->link);
  233.         /* Avoid deadlock */
  234.         spinlock_unlock(&cache->lock);
  235.         frames = slab_space_free(cache, slab);
  236.         spinlock_lock(&cache->lock);
  237.     }
  238.  
  239.     return frames;
  240. }
  241.  
  242. /**
  243.  * Take new object from slab or create new if needed
  244.  *
  245.  * Assume cache->lock is held.
  246.  *
  247.  * @return Object address or null
  248.  */
  249. static void * slab_obj_create(slab_cache_t *cache, int flags)
  250. {
  251.     slab_t *slab;
  252.     void *obj;
  253.  
  254.     if (list_empty(&cache->partial_slabs)) {
  255.         /* Allow recursion and reclaiming
  256.          * - this should work, as the SLAB control structures
  257.          *   are small and do not need to allocte with anything
  258.          *   other ten frame_alloc when they are allocating,
  259.          *   that's why we should get recursion at most 1-level deep
  260.          */
  261.         spinlock_unlock(&cache->lock);
  262.         slab = slab_space_alloc(cache, flags);
  263.         spinlock_lock(&cache->lock);
  264.         if (!slab) {
  265.             return NULL;
  266.         }
  267.     } else {
  268.         slab = list_get_instance(cache->partial_slabs.next,
  269.                      slab_t,
  270.                      link);
  271.         list_remove(&slab->link);
  272.     }
  273.     obj = slab->start + slab->nextavail * cache->size;
  274.     slab->nextavail = *((int *)obj);
  275.     slab->available--;
  276.     if (! slab->available)
  277.         list_prepend(&slab->link, &cache->full_slabs);
  278.     else
  279.         list_prepend(&slab->link, &cache->partial_slabs);
  280.     return obj;
  281. }
  282.  
  283. /**************************************/
  284. /* CPU-Cache slab functions */
  285.  
  286. /**
  287.  * Free all objects in magazine and free memory associated with magazine
  288.  *
  289.  * Assume mag_cache[cpu].lock is locked
  290.  *
  291.  * @return Number of freed pages
  292.  */
  293. static count_t magazine_destroy(slab_cache_t *cache,
  294.                 slab_magazine_t *mag)
  295. {
  296.     int i;
  297.     count_t frames = 0;
  298.  
  299.     for (i=0;i < mag->busy; i++) {
  300.         frames += slab_obj_destroy(cache, mag->objs[i], NULL);
  301.         atomic_dec(&cache->cached_objs);
  302.     }
  303.    
  304.     slab_free(&mag_cache, mag);
  305.  
  306.     return frames;
  307. }
  308.  
  309. /**
  310.  * Find full magazine, set it as current and return it
  311.  *
  312.  * Assume cpu_magazine lock is held
  313.  */
  314. static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
  315. {
  316.     slab_magazine_t *cmag, *lastmag, *newmag;
  317.  
  318.     cmag = cache->mag_cache[CPU->id].current;
  319.     lastmag = cache->mag_cache[CPU->id].last;
  320.     if (cmag) { /* First try local CPU magazines */
  321.         if (cmag->busy)
  322.             return cmag;
  323.  
  324.         if (lastmag && lastmag->busy) {
  325.             cache->mag_cache[CPU->id].current = lastmag;
  326.             cache->mag_cache[CPU->id].last = cmag;
  327.             return lastmag;
  328.         }
  329.     }
  330.     /* Local magazines are empty, import one from magazine list */
  331.     spinlock_lock(&cache->lock);
  332.     if (list_empty(&cache->magazines)) {
  333.         spinlock_unlock(&cache->lock);
  334.         return NULL;
  335.     }
  336.     newmag = list_get_instance(cache->magazines.next,
  337.                    slab_magazine_t,
  338.                    link);
  339.     list_remove(&newmag->link);
  340.     spinlock_unlock(&cache->lock);
  341.  
  342.     if (lastmag)
  343.         slab_free(&mag_cache, lastmag);
  344.     cache->mag_cache[CPU->id].last = cmag;
  345.     cache->mag_cache[CPU->id].current = newmag;
  346.     return newmag;
  347. }
  348.  
  349. /**
  350.  * Try to find object in CPU-cache magazines
  351.  *
  352.  * @return Pointer to object or NULL if not available
  353.  */
  354. static void * magazine_obj_get(slab_cache_t *cache)
  355. {
  356.     slab_magazine_t *mag;
  357.     void *obj;
  358.  
  359.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  360.  
  361.     mag = get_full_current_mag(cache);
  362.     if (!mag) {
  363.         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  364.         return NULL;
  365.     }
  366.     obj = mag->objs[--mag->busy];
  367.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  368.     atomic_dec(&cache->cached_objs);
  369.    
  370.     return obj;
  371. }
  372.  
  373. /**
  374.  * Assure that the current magazine is empty, return pointer to it, or NULL if
  375.  * no empty magazine is available and cannot be allocated
  376.  *
  377.  * We have 2 magazines bound to processor.
  378.  * First try the current.
  379.  *  If full, try the last.
  380.  *   If full, put to magazines list.
  381.  *   allocate new, exchange last & current
  382.  *
  383.  */
  384. static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
  385. {
  386.     slab_magazine_t *cmag,*lastmag,*newmag;
  387.  
  388.     cmag = cache->mag_cache[CPU->id].current;
  389.     lastmag = cache->mag_cache[CPU->id].last;
  390.  
  391.     if (cmag) {
  392.         if (cmag->busy < cmag->size)
  393.             return cmag;
  394.         if (lastmag && lastmag->busy < lastmag->size) {
  395.             cache->mag_cache[CPU->id].last = cmag;
  396.             cache->mag_cache[CPU->id].current = lastmag;
  397.             return lastmag;
  398.         }
  399.     }
  400.     /* current | last are full | nonexistent, allocate new */
  401.     /* We do not want to sleep just because of caching */
  402.     /* Especially we do not want reclaiming to start, as
  403.      * this would deadlock */
  404.     newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  405.     if (!newmag)
  406.         return NULL;
  407.     newmag->size = SLAB_MAG_SIZE;
  408.     newmag->busy = 0;
  409.  
  410.     /* Flush last to magazine list */
  411.     if (lastmag)
  412.         list_prepend(&lastmag->link, &cache->magazines);
  413.     /* Move current as last, save new as current */
  414.     cache->mag_cache[CPU->id].last = cmag; 
  415.     cache->mag_cache[CPU->id].current = newmag;
  416.  
  417.     return newmag;
  418. }
  419.  
  420. /**
  421.  * Put object into CPU-cache magazine
  422.  *
  423.  * @return 0 - success, -1 - could not get memory
  424.  */
  425. static int magazine_obj_put(slab_cache_t *cache, void *obj)
  426. {
  427.     slab_magazine_t *mag;
  428.  
  429.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  430.  
  431.     mag = make_empty_current_mag(cache);
  432.     if (!mag) {
  433.         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  434.         return -1;
  435.     }
  436.    
  437.     mag->objs[mag->busy++] = obj;
  438.  
  439.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  440.     atomic_inc(&cache->cached_objs);
  441.     return 0;
  442. }
  443.  
  444.  
  445. /**************************************/
  446. /* SLAB CACHE functions */
  447.  
  448. /** Return number of objects that fit in certain cache size */
  449. static int comp_objects(slab_cache_t *cache)
  450. {
  451.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  452.         return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  453.     else
  454.         return (PAGE_SIZE << cache->order) / cache->size;
  455. }
  456.  
  457. /** Return wasted space in slab */
  458. static int badness(slab_cache_t *cache)
  459. {
  460.     int objects;
  461.     int ssize;
  462.  
  463.     objects = comp_objects(cache);
  464.     ssize = PAGE_SIZE << cache->order;
  465.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  466.         ssize -= sizeof(slab_t);
  467.     return ssize - objects*cache->size;
  468. }
  469.  
  470. /** Initialize allocated memory as a slab cache */
  471. static void
  472. _slab_cache_create(slab_cache_t *cache,
  473.            char *name,
  474.            size_t size,
  475.            size_t align,
  476.            int (*constructor)(void *obj, int kmflag),
  477.            void (*destructor)(void *obj),
  478.            int flags)
  479. {
  480.     int i;
  481.  
  482.     memsetb((__address)cache, sizeof(*cache), 0);
  483.     cache->name = name;
  484.  
  485.     if (align < sizeof(__native))
  486.         align = sizeof(__native);
  487.     size = ALIGN_UP(size, align);
  488.        
  489.     cache->size = size;
  490.  
  491.     cache->constructor = constructor;
  492.     cache->destructor = destructor;
  493.     cache->flags = flags;
  494.  
  495.     list_initialize(&cache->full_slabs);
  496.     list_initialize(&cache->partial_slabs);
  497.     list_initialize(&cache->magazines);
  498.     spinlock_initialize(&cache->lock, "cachelock");
  499.     if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
  500.         for (i=0; i< config.cpu_count; i++)
  501.             spinlock_initialize(&cache->mag_cache[i].lock,
  502.                         "cpucachelock");
  503.     }
  504.  
  505.     /* Compute slab sizes, object counts in slabs etc. */
  506.     if (cache->size < SLAB_INSIDE_SIZE)
  507.         cache->flags |= SLAB_CACHE_SLINSIDE;
  508.  
  509.     /* Minimum slab order */
  510.     cache->order = (cache->size-1) >> PAGE_WIDTH;
  511.  
  512.     while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  513.         cache->order += 1;
  514.     }
  515.     cache->objects = comp_objects(cache);
  516.     /* If info fits in, put it inside */
  517.     if (badness(cache) > sizeof(slab_t))
  518.         cache->flags |= SLAB_CACHE_SLINSIDE;
  519.  
  520.     spinlock_lock(&slab_cache_lock);
  521.  
  522.     list_append(&cache->link, &slab_cache_list);
  523.  
  524.     spinlock_unlock(&slab_cache_lock);
  525. }
  526.  
  527. /** Create slab cache  */
  528. slab_cache_t * slab_cache_create(char *name,
  529.                  size_t size,
  530.                  size_t align,
  531.                  int (*constructor)(void *obj, int kmflag),
  532.                  void (*destructor)(void *obj),
  533.                  int flags)
  534. {
  535.     slab_cache_t *cache;
  536.  
  537.     cache = slab_alloc(&slab_cache_cache, 0);
  538.     _slab_cache_create(cache, name, size, align, constructor, destructor,
  539.                flags);
  540.     return cache;
  541. }
  542.  
  543. /**
  544.  * Reclaim space occupied by objects that are already free
  545.  *
  546.  * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
  547.  * @return Number of freed pages
  548.  */
  549. static count_t _slab_reclaim(slab_cache_t *cache, int flags)
  550. {
  551.     int i;
  552.     slab_magazine_t *mag;
  553.     link_t *cur;
  554.     count_t frames = 0;
  555.    
  556.     if (cache->flags & SLAB_CACHE_NOMAGAZINE)
  557.         return 0; /* Nothing to do */
  558.    
  559.     /* First lock all cpu caches, then the complete cache lock */
  560.     if (flags & SLAB_RECLAIM_ALL) {
  561.         for (i=0; i < config.cpu_count; i++)
  562.             spinlock_lock(&cache->mag_cache[i].lock);
  563.     }
  564.     spinlock_lock(&cache->lock);
  565.    
  566.     if (flags & SLAB_RECLAIM_ALL) {
  567.         /* Aggressive memfree */
  568.         /* Destroy CPU magazines */
  569.         for (i=0; i<config.cpu_count; i++) {
  570.             mag = cache->mag_cache[i].current;
  571.             if (mag)
  572.                 frames += magazine_destroy(cache, mag);
  573.             cache->mag_cache[i].current = NULL;
  574.            
  575.             mag = cache->mag_cache[i].last;
  576.             if (mag)
  577.                 frames += magazine_destroy(cache, mag);
  578.             cache->mag_cache[i].last = NULL;
  579.         }
  580.     }
  581.     /* Destroy full magazines */
  582.     cur=cache->magazines.prev;
  583.  
  584.     while (cur != &cache->magazines) {
  585.         mag = list_get_instance(cur, slab_magazine_t, link);
  586.        
  587.         cur = cur->prev;
  588.         list_remove(&mag->link);
  589.         frames += magazine_destroy(cache,mag);
  590.         /* If we do not do full reclaim, break
  591.          * as soon as something is freed */
  592.         if (!(flags & SLAB_RECLAIM_ALL) && frames)
  593.             break;
  594.     }
  595.    
  596.     spinlock_unlock(&cache->lock);
  597.     if (flags & SLAB_RECLAIM_ALL) {
  598.         for (i=0; i < config.cpu_count; i++)
  599.             spinlock_unlock(&cache->mag_cache[i].lock);
  600.     }
  601.    
  602.     return frames;
  603. }
  604.  
  605. /** Check that there are no slabs and remove cache from system  */
  606. void slab_cache_destroy(slab_cache_t *cache)
  607. {
  608.     /* Do not lock anything, we assume the software is correct and
  609.      * does not touch the cache when it decides to destroy it */
  610.    
  611.     /* Destroy all magazines */
  612.     _slab_reclaim(cache, SLAB_RECLAIM_ALL);
  613.  
  614.     /* All slabs must be empty */
  615.     if (!list_empty(&cache->full_slabs) \
  616.         || !list_empty(&cache->partial_slabs))
  617.         panic("Destroying cache that is not empty.");
  618.  
  619.     spinlock_lock(&slab_cache_lock);
  620.     list_remove(&cache->link);
  621.     spinlock_unlock(&slab_cache_lock);
  622.  
  623.     slab_free(&slab_cache_cache, cache);
  624. }
  625.  
  626. /** Allocate new object from cache - if no flags given, always returns
  627.     memory */
  628. void * slab_alloc(slab_cache_t *cache, int flags)
  629. {
  630.     ipl_t ipl;
  631.     void *result = NULL;
  632.  
  633.     /* Disable interrupts to avoid deadlocks with interrupt handlers */
  634.     ipl = interrupts_disable();
  635.    
  636.     if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
  637.         result = magazine_obj_get(cache);
  638.  
  639.     if (!result) {
  640.         spinlock_lock(&cache->lock);
  641.         result = slab_obj_create(cache, flags);
  642.         spinlock_unlock(&cache->lock);
  643.     }
  644.  
  645.     interrupts_restore(ipl);
  646.  
  647.     if (result)
  648.         atomic_inc(&cache->allocated_objs);
  649.  
  650.     return result;
  651. }
  652.  
  653. /** Return object to cache  */
  654. void slab_free(slab_cache_t *cache, void *obj)
  655. {
  656.     ipl_t ipl;
  657.  
  658.     ipl = interrupts_disable();
  659.  
  660.     if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  661.         || magazine_obj_put(cache, obj)) {
  662.        
  663.         spinlock_lock(&cache->lock);
  664.         slab_obj_destroy(cache, obj, NULL);
  665.         spinlock_unlock(&cache->lock);
  666.     }
  667.     interrupts_restore(ipl);
  668.     atomic_dec(&cache->allocated_objs);
  669. }
  670.  
  671. /* Go through all caches and reclaim what is possible */
  672. count_t slab_reclaim(int flags)
  673. {
  674.     slab_cache_t *cache;
  675.     link_t *cur;
  676.     count_t frames = 0;
  677.  
  678.     spinlock_lock(&slab_cache_lock);
  679.  
  680.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  681.         cache = list_get_instance(cur, slab_cache_t, link);
  682.         frames += _slab_reclaim(cache, flags);
  683.     }
  684.  
  685.     spinlock_unlock(&slab_cache_lock);
  686.  
  687.     return frames;
  688. }
  689.  
  690.  
  691. /* Print list of slabs */
  692. void slab_print_list(void)
  693. {
  694.     slab_cache_t *cache;
  695.     link_t *cur;
  696.  
  697.     spinlock_lock(&slab_cache_lock);
  698.     printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
  699.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  700.         cache = list_get_instance(cur, slab_cache_t, link);
  701.         printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
  702.                (1 << cache->order), cache->objects,
  703.                atomic_get(&cache->allocated_slabs),
  704.                atomic_get(&cache->cached_objs),
  705.                atomic_get(&cache->allocated_objs),
  706.                cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
  707.     }
  708.     spinlock_unlock(&slab_cache_lock);
  709. }
  710.  
  711. void slab_cache_init(void)
  712. {
  713.     /* Initialize magazine cache */
  714.     _slab_cache_create(&mag_cache,
  715.                "slab_magazine",
  716.                sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
  717.                sizeof(__address),
  718.                NULL, NULL,
  719.                SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  720.     /* Initialize slab_cache cache */
  721.     _slab_cache_create(&slab_cache_cache,
  722.                "slab_cache",
  723.                sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
  724.                sizeof(__address),
  725.                NULL, NULL,
  726.                SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
  727.     /* Initialize external slab cache */
  728.     slab_extern_cache = slab_cache_create("slab_extern",
  729.                           sizeof(slab_t),
  730.                           0, NULL, NULL,
  731.                           SLAB_CACHE_SLINSIDE);
  732.  
  733.     /* Initialize structures for malloc */
  734. }
  735.