Subversion Repositories HelenOS-historic

Rev

Rev 766 | Rev 768 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <synch/spinlock.h>
  30. #include <mm/slab.h>
  31. #include <list.h>
  32. #include <memstr.h>
  33. #include <align.h>
  34. #include <mm/heap.h>
  35. #include <mm/frame.h>
  36. #include <config.h>
  37. #include <print.h>
  38. #include <arch.h>
  39. #include <panic.h>
  40. #include <debug.h>
  41.  
  42. SPINLOCK_INITIALIZE(slab_cache_lock);
  43. LIST_INITIALIZE(slab_cache_list);
  44.  
  45. slab_cache_t mag_cache;
  46.  
  47.  
  48. typedef struct {
  49.     slab_cache_t *cache; /**< Pointer to parent cache */
  50.     link_t link;       /* List of full/partial slabs */
  51.     void *start;       /**< Start address of first available item */
  52.     count_t available; /**< Count of available items in this slab */
  53.     index_t nextavail; /**< The index of next available item */
  54. }slab_t;
  55.  
  56. /**************************************/
  57. /* SLAB allocation functions          */
  58.  
  59. /**
  60.  * Allocate frames for slab space and initialize
  61.  *
  62.  * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
  63.  */
  64. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  65. {
  66.     void *data;
  67.     slab_t *slab;
  68.     size_t fsize;
  69.     int i;
  70.     zone_t *zone = NULL;
  71.     int status;
  72.     frame_t *frame;
  73.  
  74.     data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
  75.     if (status != FRAME_OK) {
  76.         return NULL;
  77.     }
  78.     if (! cache->flags & SLAB_CACHE_SLINSIDE) {
  79.         slab = malloc(sizeof(*slab)); // , flags);
  80.         if (!slab) {
  81.             frame_free((__address)data);
  82.             return NULL;
  83.         }
  84.     } else {
  85.         fsize = (PAGE_SIZE << cache->order);
  86.         slab = data + fsize - sizeof(*slab);
  87.     }
  88.        
  89.     /* Fill in slab structures */
  90.     /* TODO: some better way of accessing the frame */
  91.     for (i=0; i < (1 << cache->order); i++) {
  92.         frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
  93.         frame->parent = slab;
  94.     }
  95.  
  96.     slab->start = data;
  97.     slab->available = cache->objects;
  98.     slab->nextavail = 0;
  99.     slab->cache = cache;
  100.  
  101.     for (i=0; i<cache->objects;i++)
  102.         *((int *) (slab->start + i*cache->size)) = i+1;
  103.  
  104.     atomic_inc(&cache->allocated_slabs);
  105.  
  106.     return slab;
  107. }
  108.  
  109. /**
  110.  * Deallocate space associated with SLAB
  111.  *
  112.  * @return number of freed frames
  113.  */
  114. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  115. {
  116.     frame_free((__address)slab->start);
  117.     if (! cache->flags & SLAB_CACHE_SLINSIDE)
  118.         free(slab);
  119.  
  120.     atomic_dec(&cache->allocated_slabs);
  121.    
  122.     return 1 << cache->order;
  123. }
  124.  
  125. /** Map object to slab structure */
  126. static slab_t * obj2slab(void *obj)
  127. {
  128.     frame_t *frame;
  129.  
  130.     frame = frame_addr2frame((__address)obj);
  131.     return (slab_t *)frame->parent;
  132. }
  133.  
  134. /**************************************/
  135. /* SLAB functions */
  136.  
  137.  
  138. /**
  139.  * Return object to slab and call a destructor
  140.  *
  141.  * Assume the cache->lock is held;
  142.  *
  143.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  144.  *
  145.  * @return Number of freed pages
  146.  */
  147. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  148.                 slab_t *slab)
  149. {
  150.     count_t frames = 0;
  151.  
  152.     if (!slab)
  153.         slab = obj2slab(obj);
  154.  
  155.     ASSERT(slab->cache == cache);
  156.  
  157.     *((int *)obj) = slab->nextavail;
  158.     slab->nextavail = (obj - slab->start)/cache->size;
  159.     slab->available++;
  160.  
  161.     /* Move it to correct list */
  162.     if (slab->available == 1) {
  163.         /* It was in full, move to partial */
  164.         list_remove(&slab->link);
  165.         list_prepend(&slab->link, &cache->partial_slabs);
  166.     }
  167.     if (slab->available == cache->objects) {
  168.         /* Free associated memory */
  169.         list_remove(&slab->link);
  170.         /* Avoid deadlock */
  171.         spinlock_unlock(&cache->lock);
  172.         frames = slab_space_free(cache, slab);
  173.         spinlock_lock(&cache->lock);
  174.     }
  175.  
  176.     return frames;
  177. }
  178.  
  179. /**
  180.  * Take new object from slab or create new if needed
  181.  *
  182.  * Assume cache->lock is held.
  183.  *
  184.  * @return Object address or null
  185.  */
  186. static void * slab_obj_create(slab_cache_t *cache, int flags)
  187. {
  188.     slab_t *slab;
  189.     void *obj;
  190.  
  191.     if (list_empty(&cache->partial_slabs)) {
  192.         /* Allow recursion and reclaiming
  193.          * - this should work, as the SLAB control structures
  194.          *   are small and do not need to allocte with anything
  195.          *   other ten frame_alloc when they are allocating,
  196.          *   that's why we should get recursion at most 1-level deep
  197.          */
  198.         spinlock_unlock(&cache->lock);
  199.         slab = slab_space_alloc(cache, flags);
  200.         spinlock_lock(&cache->lock);
  201.         if (!slab) {
  202.             return NULL;
  203.         }
  204.     } else {
  205.         slab = list_get_instance(cache->partial_slabs.next,
  206.                      slab_t,
  207.                      link);
  208.         list_remove(&slab->link);
  209.     }
  210.     obj = slab->start + slab->nextavail * cache->size;
  211.     slab->nextavail = *((int *)obj);
  212.     slab->available--;
  213.     if (! slab->available)
  214.         list_prepend(&slab->link, &cache->full_slabs);
  215.     else
  216.         list_prepend(&slab->link, &cache->partial_slabs);
  217.     return obj;
  218. }
  219.  
  220. /**************************************/
  221. /* CPU-Cache slab functions */
  222.  
  223. /**
  224.  * Free all objects in magazine and free memory associated with magazine
  225.  *
  226.  * Assume mag_cache[cpu].lock is locked
  227.  *
  228.  * @return Number of freed pages
  229.  */
  230. static count_t magazine_destroy(slab_cache_t *cache,
  231.                 slab_magazine_t *mag)
  232. {
  233.     int i;
  234.     count_t frames = 0;
  235.  
  236.     for (i=0;i < mag->busy; i++) {
  237.         frames += slab_obj_destroy(cache, mag->objs[i], NULL);
  238.         atomic_dec(&cache->cached_objs);
  239.     }
  240.    
  241.     slab_free(&mag_cache, mag);
  242.  
  243.     return frames;
  244. }
  245.  
  246. /**
  247.  * Try to find object in CPU-cache magazines
  248.  *
  249.  * @return Pointer to object or NULL if not available
  250.  */
  251. static void * magazine_obj_get(slab_cache_t *cache)
  252. {
  253.     slab_magazine_t *mag;
  254.     void *obj;
  255.  
  256.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  257.  
  258.     mag = cache->mag_cache[CPU->id].current;
  259.     if (!mag)
  260.         goto out;
  261.  
  262.     if (!mag->busy) {
  263.         /* If current is empty && last exists && not empty, exchange */
  264.         if (cache->mag_cache[CPU->id].last \
  265.             && cache->mag_cache[CPU->id].last->busy) {
  266.             cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
  267.             cache->mag_cache[CPU->id].last = mag;
  268.             mag = cache->mag_cache[CPU->id].current;
  269.             goto gotit;
  270.         }
  271.         /* If still not busy, exchange current with some from
  272.          * other full magazines */
  273.         spinlock_lock(&cache->lock);
  274.         if (list_empty(&cache->magazines)) {
  275.             spinlock_unlock(&cache->lock);
  276.             goto out;
  277.         }
  278.         /* Free current magazine and take one from list */
  279.         slab_free(&mag_cache, mag);
  280.         mag = list_get_instance(cache->magazines.next,
  281.                     slab_magazine_t,
  282.                     link);
  283.         list_remove(&mag->link);
  284.        
  285.         spinlock_unlock(&cache->lock);
  286.     }
  287. gotit:
  288.     obj = mag->objs[--mag->busy];
  289.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  290.     atomic_dec(&cache->cached_objs);
  291.    
  292.     return obj;
  293. out:   
  294.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  295.     return NULL;
  296. }
  297.  
  298. /**
  299.  * Put object into CPU-cache magazine
  300.  *
  301.  * We have 2 magazines bound to processor.
  302.  * First try the current.
  303.  *  If full, try the last.
  304.  *   If full, put to magazines list.
  305.  *   allocate new, exchange last & current
  306.  *
  307.  * @return 0 - success, -1 - could not get memory
  308.  */
  309. static int magazine_obj_put(slab_cache_t *cache, void *obj)
  310. {
  311.     slab_magazine_t *mag;
  312.  
  313.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  314.    
  315.     mag = cache->mag_cache[CPU->id].current;
  316.     if (!mag) {
  317.         /* We do not want to sleep just because of caching */
  318.         /* Especially we do not want reclaiming to start, as
  319.          * this would deadlock */
  320.         mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  321.         if (!mag) /* Allocation failed, give up on caching */
  322.             goto errout;
  323.  
  324.         cache->mag_cache[CPU->id].current = mag;
  325.         mag->size = SLAB_MAG_SIZE;
  326.         mag->busy = 0;
  327.     } else if (mag->busy == mag->size) {
  328.         /* If the last is full | empty, allocate new */
  329.         mag = cache->mag_cache[CPU->id].last;
  330.         if (!mag || mag->size == mag->busy) {
  331.             if (mag)
  332.                 list_prepend(&mag->link, &cache->magazines);
  333.  
  334.             mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  335.             if (!mag)
  336.                 goto errout;
  337.            
  338.             mag->size = SLAB_MAG_SIZE;
  339.             mag->busy = 0;
  340.             cache->mag_cache[CPU->id].last = mag;
  341.         }
  342.         /* Exchange the 2 */
  343.         cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
  344.         cache->mag_cache[CPU->id].current = mag;
  345.     }
  346.     mag->objs[mag->busy++] = obj;
  347.  
  348.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  349.     atomic_inc(&cache->cached_objs);
  350.     return 0;
  351. errout:
  352.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  353.     return -1;
  354. }
  355.  
  356.  
  357. /**************************************/
  358. /* SLAB CACHE functions */
  359.  
  360. /** Return number of objects that fit in certain cache size */
  361. static int comp_objects(slab_cache_t *cache)
  362. {
  363.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  364.         return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  365.     else
  366.         return (PAGE_SIZE << cache->order) / cache->size;
  367. }
  368.  
  369. /** Return wasted space in slab */
  370. static int badness(slab_cache_t *cache)
  371. {
  372.     int objects;
  373.     int ssize;
  374.  
  375.     objects = comp_objects(cache);
  376.     ssize = PAGE_SIZE << cache->order;
  377.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  378.         ssize -= sizeof(slab_t);
  379.     return ssize - objects*cache->size;
  380. }
  381.  
  382. /** Initialize allocated memory as a slab cache */
  383. static void
  384. _slab_cache_create(slab_cache_t *cache,
  385.            char *name,
  386.            size_t size,
  387.            size_t align,
  388.            int (*constructor)(void *obj, int kmflag),
  389.            void (*destructor)(void *obj),
  390.            int flags)
  391. {
  392.     int i;
  393.  
  394.     memsetb((__address)cache, sizeof(*cache), 0);
  395.     cache->name = name;
  396.  
  397.     if (align < sizeof(__native))
  398.         align = sizeof(__native);
  399.     size = ALIGN_UP(size, align);
  400.        
  401.     cache->size = size;
  402.  
  403.     cache->constructor = constructor;
  404.     cache->destructor = destructor;
  405.     cache->flags = flags;
  406.  
  407.     list_initialize(&cache->full_slabs);
  408.     list_initialize(&cache->partial_slabs);
  409.     list_initialize(&cache->magazines);
  410.     spinlock_initialize(&cache->lock, "cachelock");
  411.     if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
  412.         for (i=0; i< config.cpu_count; i++)
  413.             spinlock_initialize(&cache->mag_cache[i].lock,
  414.                         "cpucachelock");
  415.     }
  416.  
  417.     /* Compute slab sizes, object counts in slabs etc. */
  418.     if (cache->size < SLAB_INSIDE_SIZE)
  419.         cache->flags |= SLAB_CACHE_SLINSIDE;
  420.  
  421.     /* Minimum slab order */
  422.     cache->order = (cache->size-1) >> PAGE_WIDTH;
  423.  
  424.     while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  425.         cache->order += 1;
  426.     }
  427.     cache->objects = comp_objects(cache);
  428.     /* If info fits in, put it inside */
  429.     if (badness(cache) > sizeof(slab_t))
  430.         cache->flags |= SLAB_CACHE_SLINSIDE;
  431.  
  432.     spinlock_lock(&slab_cache_lock);
  433.  
  434.     list_append(&cache->link, &slab_cache_list);
  435.  
  436.     spinlock_unlock(&slab_cache_lock);
  437. }
  438.  
  439. /** Create slab cache  */
  440. slab_cache_t * slab_cache_create(char *name,
  441.                  size_t size,
  442.                  size_t align,
  443.                  int (*constructor)(void *obj, int kmflag),
  444.                  void (*destructor)(void *obj),
  445.                  int flags)
  446. {
  447.     slab_cache_t *cache;
  448.  
  449.     cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
  450.     _slab_cache_create(cache, name, size, align, constructor, destructor,
  451.                flags);
  452.     return cache;
  453. }
  454.  
  455. /**
  456.  * Reclaim space occupied by objects that are already free
  457.  *
  458.  * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
  459.  * @return Number of freed pages
  460.  *
  461.  * TODO: Add light reclaim
  462.  */
  463. static count_t _slab_reclaim(slab_cache_t *cache, int flags)
  464. {
  465.     int i;
  466.     slab_magazine_t *mag;
  467.     link_t *cur;
  468.     count_t frames = 0;
  469.    
  470.     if (cache->flags & SLAB_CACHE_NOMAGAZINE)
  471.         return 0; /* Nothing to do */
  472.    
  473.     /* First lock all cpu caches, then the complete cache lock */
  474.     for (i=0; i < config.cpu_count; i++)
  475.         spinlock_lock(&cache->mag_cache[i].lock);
  476.     spinlock_lock(&cache->lock);
  477.    
  478.     if (flags & SLAB_RECLAIM_ALL) {
  479.         /* Aggressive memfree */
  480.         /* Destroy CPU magazines */
  481.         for (i=0; i<config.cpu_count; i++) {
  482.             mag = cache->mag_cache[i].current;
  483.             if (mag)
  484.                 frames += magazine_destroy(cache, mag);
  485.             cache->mag_cache[i].current = NULL;
  486.            
  487.             mag = cache->mag_cache[i].last;
  488.             if (mag)
  489.                 frames += magazine_destroy(cache, mag);
  490.             cache->mag_cache[i].last = NULL;
  491.         }
  492.     }
  493.     /* Destroy full magazines */
  494.     cur=cache->magazines.prev;
  495.  
  496.     while (cur!=&cache->magazines) {
  497.         mag = list_get_instance(cur, slab_magazine_t, link);
  498.        
  499.         cur = cur->prev;
  500.         list_remove(cur->next);
  501. //      list_remove(&mag->link);
  502.         frames += magazine_destroy(cache,mag);
  503.         /* If we do not do full reclaim, break
  504.          * as soon as something is freed */
  505.         if (!(flags & SLAB_RECLAIM_ALL) && frames)
  506.             break;
  507.     }
  508.    
  509.     spinlock_unlock(&cache->lock);
  510.     for (i=0; i < config.cpu_count; i++)
  511.         spinlock_unlock(&cache->mag_cache[i].lock);
  512.    
  513.     return frames;
  514. }
  515.  
  516. /** Check that there are no slabs and remove cache from system  */
  517. void slab_cache_destroy(slab_cache_t *cache)
  518. {
  519.     /* Do not lock anything, we assume the software is correct and
  520.      * does not touch the cache when it decides to destroy it */
  521.    
  522.     /* Destroy all magazines */
  523.     _slab_reclaim(cache, SLAB_RECLAIM_ALL);
  524.  
  525.     /* All slabs must be empty */
  526.     if (!list_empty(&cache->full_slabs) \
  527.         || !list_empty(&cache->partial_slabs))
  528.         panic("Destroying cache that is not empty.");
  529.  
  530.     spinlock_lock(&slab_cache_lock);
  531.     list_remove(&cache->link);
  532.     spinlock_unlock(&slab_cache_lock);
  533.  
  534.     free(cache);
  535. }
  536.  
  537. /** Allocate new object from cache - if no flags given, always returns
  538.     memory */
  539. void * slab_alloc(slab_cache_t *cache, int flags)
  540. {
  541.     ipl_t ipl;
  542.     void *result = NULL;
  543.  
  544.     /* Disable interrupts to avoid deadlocks with interrupt handlers */
  545.     ipl = interrupts_disable();
  546.    
  547.     if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
  548.         result = magazine_obj_get(cache);
  549.  
  550.     if (!result) {
  551.         spinlock_lock(&cache->lock);
  552.         result = slab_obj_create(cache, flags);
  553.         spinlock_unlock(&cache->lock);
  554.     }
  555.  
  556.     if (result)
  557.         atomic_inc(&cache->allocated_objs);
  558.  
  559.     interrupts_restore(ipl);
  560.  
  561.  
  562.     return result;
  563. }
  564.  
  565. /** Return object to cache  */
  566. void slab_free(slab_cache_t *cache, void *obj)
  567. {
  568.     ipl_t ipl;
  569.  
  570.     ipl = interrupts_disable();
  571.  
  572.     if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  573.         || magazine_obj_put(cache, obj)) {
  574.        
  575.         spinlock_lock(&cache->lock);
  576.         slab_obj_destroy(cache, obj, NULL);
  577.         spinlock_unlock(&cache->lock);
  578.     }
  579.     atomic_dec(&cache->allocated_objs);
  580.     interrupts_restore(ipl);
  581. }
  582.  
  583. /* Go through all caches and reclaim what is possible */
  584. count_t slab_reclaim(int flags)
  585. {
  586.     slab_cache_t *cache;
  587.     link_t *cur;
  588.     count_t frames = 0;
  589.  
  590.     spinlock_lock(&slab_cache_lock);
  591.  
  592.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  593.         cache = list_get_instance(cur, slab_cache_t, link);
  594.         frames += _slab_reclaim(cache, flags);
  595.     }
  596.  
  597.     spinlock_unlock(&slab_cache_lock);
  598.  
  599.     return frames;
  600. }
  601.  
  602.  
  603. /* Print list of slabs */
  604. void slab_print_list(void)
  605. {
  606.     slab_cache_t *cache;
  607.     link_t *cur;
  608.  
  609.     spinlock_lock(&slab_cache_lock);
  610.     printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
  611.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  612.         cache = list_get_instance(cur, slab_cache_t, link);
  613.         printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
  614.                (1 << cache->order), cache->objects,
  615.                atomic_get(&cache->allocated_slabs),
  616.                atomic_get(&cache->cached_objs),
  617.                atomic_get(&cache->allocated_objs),
  618.                cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
  619.     }
  620.     spinlock_unlock(&slab_cache_lock);
  621. }
  622.  
  623. void slab_cache_init(void)
  624. {
  625.     /* Initialize magazine cache */
  626.     _slab_cache_create(&mag_cache,
  627.                "slab_magazine",
  628.                sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
  629.                sizeof(__address),
  630.                NULL, NULL,
  631.                SLAB_CACHE_NOMAGAZINE);
  632.  
  633.     /* Initialize structures for malloc */
  634. }
  635.