Subversion Repositories HelenOS-historic

Rev

Rev 759 | Rev 763 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2006 Ondrej Palkovsky
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <synch/spinlock.h>
  30. #include <mm/slab.h>
  31. #include <list.h>
  32. #include <memstr.h>
  33. #include <align.h>
  34. #include <mm/heap.h>
  35. #include <mm/frame.h>
  36. #include <config.h>
  37. #include <print.h>
  38. #include <arch.h>
  39. #include <panic.h>
  40. #include <debug.h>
  41.  
  42. SPINLOCK_INITIALIZE(slab_cache_lock);
  43. LIST_INITIALIZE(slab_cache_list);
  44.  
  45. slab_cache_t mag_cache;
  46.  
  47.  
  48. typedef struct {
  49.     slab_cache_t *cache; /**< Pointer to parent cache */
  50.     link_t link;       /* List of full/partial slabs */
  51.     void *start;       /**< Start address of first available item */
  52.     count_t available; /**< Count of available items in this slab */
  53.     index_t nextavail; /**< The index of next available item */
  54. }slab_t;
  55.  
  56. /**************************************/
  57. /* SLAB allocation functions          */
  58.  
  59. /**
  60.  * Allocate frames for slab space and initialize
  61.  *
  62.  * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
  63.  */
  64. static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
  65. {
  66.     void *data;
  67.     slab_t *slab;
  68.     size_t fsize;
  69.     int i;
  70.     zone_t *zone = NULL;
  71.     int status;
  72.  
  73.     data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
  74.     if (status != FRAME_OK)
  75.         return NULL;
  76.  
  77.     if (! cache->flags & SLAB_CACHE_SLINSIDE) {
  78.         slab = malloc(sizeof(*slab)); // , flags);
  79.         if (!slab) {
  80.             frame_free((__address)data);
  81.             return NULL;
  82.         }
  83.     } else {
  84.         fsize = (PAGE_SIZE << cache->order);
  85.         slab = data + fsize - sizeof(*slab);
  86.     }
  87.  
  88.     /* Fill in slab structures */
  89.     /* TODO: some better way of accessing the frame, although
  90.      * the optimizer might optimize the division out :-/ */
  91.     for (i=0; i< (1<<cache->order); i++) {
  92.         ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab;
  93.     }
  94.  
  95.     slab->start = data;
  96.     slab->available = cache->objects;
  97.     slab->nextavail = 0;
  98.  
  99.     for (i=0; i<cache->objects;i++)
  100.         *((int *) (slab->start + i*cache->size)) = i+1;
  101.     return slab;
  102. }
  103.  
  104. /**
  105.  * Free space associated with SLAB
  106.  *
  107.  * @return number of freed frames
  108.  */
  109. static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
  110. {
  111.     frame_free((__address)slab->start);
  112.     if (! cache->flags & SLAB_CACHE_SLINSIDE)
  113.         free(slab);
  114.     return 1 << cache->order;
  115. }
  116.  
  117. /** Map object to slab structure */
  118. static slab_t * obj2slab(void *obj)
  119. {
  120.     frame_t *frame;
  121.  
  122.     frame = frame_addr2frame((__address)obj);
  123.     return (slab_t *)frame->parent;
  124. }
  125.  
  126. /**************************************/
  127. /* SLAB functions */
  128.  
  129.  
  130. /**
  131.  * Return object to slab and call a destructor
  132.  *
  133.  * Assume the cache->lock is held;
  134.  *
  135.  * @param slab If the caller knows directly slab of the object, otherwise NULL
  136.  *
  137.  * @return Number of freed pages
  138.  */
  139. static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
  140.                 slab_t *slab)
  141. {
  142.     count_t frames = 0;
  143.  
  144.     if (!slab)
  145.         slab = obj2slab(obj);
  146.  
  147.     spinlock_lock(cache->lock);
  148.  
  149.     *((int *)obj) = slab->nextavail;
  150.     slab->nextavail = (obj - slab->start)/cache->size;
  151.     slab->available++;
  152.  
  153.     /* Move it to correct list */
  154.     if (slab->available == 1) {
  155.         /* It was in full, move to partial */
  156.         list_remove(&slab->link);
  157.         list_prepend(&cache->partial_slabs, &slab->link);
  158.     }
  159.     if (slab->available == cache->objects) {
  160.         /* Free associated memory */
  161.         list_remove(&slab->link);
  162.         /* Avoid deadlock */
  163.         spinlock_unlock(&cache->lock);
  164.         frames = slab_space_free(cache, slab);
  165.         spinlock_lock(&cache->lock);
  166.     }
  167.  
  168.     spinlock_unlock(cache->lock);
  169.  
  170.     return frames;
  171. }
  172.  
  173. /**
  174.  * Take new object from slab or create new if needed
  175.  *
  176.  * Assume cache->lock is held.
  177.  *
  178.  * @return Object address or null
  179.  */
  180. static void * slab_obj_create(slab_cache_t *cache, int flags)
  181. {
  182.     slab_t *slab;
  183.     void *obj;
  184.  
  185.     if (list_empty(&cache->partial_slabs)) {
  186.         /* Allow recursion and reclaiming
  187.          * - this should work, as the SLAB control structures
  188.          *   are small and do not need to allocte with anything
  189.          *   other ten frame_alloc when they are allocating,
  190.          *   that's why we should get recursion at most 1-level deep
  191.          */
  192.         spinlock_unlock(&cache->lock);
  193.         slab = slab_space_alloc(cache, flags);
  194.         spinlock_lock(&cache->lock);
  195.         if (!slab)
  196.             return NULL;
  197.     } else {
  198.         slab = list_get_instance(cache->partial_slabs.next,
  199.                      slab_t,
  200.                      link);
  201.         list_remove(&slab->link);
  202.     }
  203.     obj = slab->start + slab->nextavail * cache->size;
  204.     slab->nextavail = *((int *)obj);
  205.     slab->available--;
  206.     if (! slab->available)
  207.         list_prepend(&cache->full_slabs, &slab->link);
  208.     else
  209.         list_prepend(&cache->partial_slabs, &slab->link);
  210.     return obj;
  211. }
  212.  
  213. /**************************************/
  214. /* CPU-Cache slab functions */
  215.  
  216. /**
  217.  * Free all objects in magazine and free memory associated with magazine
  218.  *
  219.  * Assume mag_cache[cpu].lock is locked
  220.  *
  221.  * @return Number of freed pages
  222.  */
  223. static count_t magazine_destroy(slab_cache_t *cache,
  224.                 slab_magazine_t *mag)
  225. {
  226.     int i;
  227.     count_t frames = 0;
  228.  
  229.     for (i=0;i < mag->busy; i++)
  230.         frames += slab_obj_destroy(cache, mag->objs[i], NULL);
  231.    
  232.     slab_free(&mag_cache, mag);
  233.  
  234.     return frames;
  235. }
  236.  
  237. /**
  238.  * Try to find object in CPU-cache magazines
  239.  *
  240.  * @return Pointer to object or NULL if not available
  241.  */
  242. static void * magazine_obj_get(slab_cache_t *cache)
  243. {
  244.     slab_magazine_t *mag;
  245.  
  246.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  247.  
  248.     mag = cache->mag_cache[CPU->id].current;
  249.     if (!mag)
  250.         goto out;
  251.  
  252.     if (!mag->busy) {
  253.         /* If current is empty && last exists && not empty, exchange */
  254.         if (cache->mag_cache[CPU->id].last \
  255.             && cache->mag_cache[CPU->id].last->busy) {
  256.             cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
  257.             cache->mag_cache[CPU->id].last = mag;
  258.             mag = cache->mag_cache[CPU->id].current;
  259.             goto gotit;
  260.         }
  261.         /* If still not busy, exchange current with some from
  262.          * other full magazines */
  263.         spinlock_lock(&cache->lock);
  264.         if (list_empty(&cache->magazines)) {
  265.             spinlock_unlock(&cache->lock);
  266.             goto out;
  267.         }
  268.         /* Free current magazine and take one from list */
  269.         slab_free(&mag_cache, mag);
  270.         mag = list_get_instance(cache->magazines.next,
  271.                     slab_magazine_t,
  272.                     link);
  273.         list_remove(&mag->link);
  274.        
  275.         spinlock_unlock(&cache->lock);
  276.     }
  277. gotit:
  278.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  279.     return mag->objs[--mag->busy];
  280. out:   
  281.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  282.     return NULL;
  283. }
  284.  
  285. /**
  286.  * Put object into CPU-cache magazine
  287.  *
  288.  * We have 2 magazines bound to processor.
  289.  * First try the current.
  290.  *  If full, try the last.
  291.  *   If full, put to magazines list.
  292.  *   allocate new, exchange last & current
  293.  *
  294.  * @return 0 - success, -1 - could not get memory
  295.  */
  296. static int magazine_obj_put(slab_cache_t *cache, void *obj)
  297. {
  298.     slab_magazine_t *mag;
  299.  
  300.     spinlock_lock(&cache->mag_cache[CPU->id].lock);
  301.    
  302.     mag = cache->mag_cache[CPU->id].current;
  303.     if (!mag) {
  304.         /* We do not want to sleep just because of caching */
  305.         /* Especially we do not want reclaiming to start, as
  306.          * this would deadlock */
  307.         mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  308.         if (!mag) /* Allocation failed, give up on caching */
  309.             goto errout;
  310.  
  311.         cache->mag_cache[CPU->id].current = mag;
  312.         mag->size = SLAB_MAG_SIZE;
  313.         mag->busy = 0;
  314.     } else if (mag->busy == mag->size) {
  315.         /* If the last is full | empty, allocate new */
  316.         mag = cache->mag_cache[CPU->id].last;
  317.         if (!mag || mag->size == mag->busy) {
  318.             if (mag)
  319.                 list_prepend(&cache->magazines, &mag->link);
  320.  
  321.             mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
  322.             if (!mag)
  323.                 goto errout;
  324.            
  325.             mag->size = SLAB_MAG_SIZE;
  326.             mag->busy = 0;
  327.             cache->mag_cache[CPU->id].last = mag;
  328.         }
  329.         /* Exchange the 2 */
  330.         cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
  331.         cache->mag_cache[CPU->id].current = mag;
  332.     }
  333.     mag->objs[mag->busy++] = obj;
  334.  
  335.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  336.     return 0;
  337. errout:
  338.     spinlock_unlock(&cache->mag_cache[CPU->id].lock);
  339.     return -1;
  340. }
  341.  
  342.  
  343. /**************************************/
  344. /* SLAB CACHE functions */
  345.  
  346. /** Return number of objects that fit in certain cache size */
  347. static int comp_objects(slab_cache_t *cache)
  348. {
  349.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  350.         return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
  351.     else
  352.         return (PAGE_SIZE << cache->order) / cache->size;
  353. }
  354.  
  355. /** Return wasted space in slab */
  356. static int badness(slab_cache_t *cache)
  357. {
  358.     int objects;
  359.     int ssize;
  360.  
  361.     objects = comp_objects(cache);
  362.     ssize = PAGE_SIZE << cache->order;
  363.     if (cache->flags & SLAB_CACHE_SLINSIDE)
  364.         ssize -= sizeof(slab_t);
  365.     return ssize - objects*cache->size;
  366. }
  367.  
  368. /** Initialize allocated memory as a slab cache */
  369. static void
  370. _slab_cache_create(slab_cache_t *cache,
  371.            char *name,
  372.            size_t size,
  373.            size_t align,
  374.            int (*constructor)(void *obj, int kmflag),
  375.            void (*destructor)(void *obj),
  376.            int flags)
  377. {
  378.     int i;
  379.  
  380.     memsetb((__address)cache, sizeof(*cache), 0);
  381.     cache->name = name;
  382.  
  383.     if (align)
  384.         size = ALIGN_UP(size, align);
  385.     cache->size = size;
  386.  
  387.     cache->constructor = constructor;
  388.     cache->destructor = destructor;
  389.     cache->flags = flags;
  390.  
  391.     list_initialize(&cache->full_slabs);
  392.     list_initialize(&cache->partial_slabs);
  393.     list_initialize(&cache->magazines);
  394.     spinlock_initialize(&cache->lock, "cachelock");
  395.     if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
  396.         for (i=0; i< config.cpu_count; i++)
  397.             spinlock_initialize(&cache->mag_cache[i].lock,
  398.                         "cpucachelock");
  399.     }
  400.  
  401.     /* Compute slab sizes, object counts in slabs etc. */
  402.     if (cache->size < SLAB_INSIDE_SIZE)
  403.         cache->flags |= SLAB_CACHE_SLINSIDE;
  404.  
  405.     /* Minimum slab order */
  406.     cache->order = (cache->size / PAGE_SIZE) + 1;
  407.        
  408.     while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
  409.         cache->order += 1;
  410.     }
  411.  
  412.     cache->objects = comp_objects(cache);
  413.  
  414.     spinlock_lock(&slab_cache_lock);
  415.  
  416.     list_append(&cache->link, &slab_cache_list);
  417.  
  418.     spinlock_unlock(&slab_cache_lock);
  419. }
  420.  
  421. /** Create slab cache  */
  422. slab_cache_t * slab_cache_create(char *name,
  423.                  size_t size,
  424.                  size_t align,
  425.                  int (*constructor)(void *obj, int kmflag),
  426.                  void (*destructor)(void *obj),
  427.                  int flags)
  428. {
  429.     slab_cache_t *cache;
  430.  
  431.     cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
  432.     _slab_cache_create(cache, name, size, align, constructor, destructor,
  433.                flags);
  434.     return cache;
  435. }
  436.  
  437. /**
  438.  * Reclaim space occupied by objects that are already free
  439.  *
  440.  * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
  441.  * @return Number of freed pages
  442.  *
  443.  * TODO: Add light reclaim
  444.  */
  445. static count_t _slab_reclaim(slab_cache_t *cache, int flags)
  446. {
  447.     int i;
  448.     slab_magazine_t *mag;
  449.     link_t *cur;
  450.     count_t frames = 0;
  451.    
  452.     if (cache->flags & SLAB_CACHE_NOMAGAZINE)
  453.         return 0; /* Nothing to do */
  454.    
  455.     /* First lock all cpu caches, then the complete cache lock */
  456.     for (i=0; i < config.cpu_count; i++)
  457.         spinlock_lock(&cache->mag_cache[i].lock);
  458.     spinlock_lock(&cache->lock);
  459.    
  460.     if (flags & SLAB_RECLAIM_ALL) {
  461.         /* Aggressive memfree */
  462.  
  463.         /* Destroy CPU magazines */
  464.         for (i=0; i<config.cpu_count; i++) {
  465.             mag = cache->mag_cache[i].current;
  466.             if (mag)
  467.                 frames += magazine_destroy(cache, mag);
  468.             cache->mag_cache[i].current = NULL;
  469.            
  470.             mag = cache->mag_cache[i].last;
  471.             if (mag)
  472.                 frames += magazine_destroy(cache, mag);
  473.             cache->mag_cache[i].last = NULL;
  474.         }
  475.     }
  476.     /* Destroy full magazines */
  477.     cur=cache->magazines.prev;
  478.     while (cur!=&cache->magazines) {
  479.         mag = list_get_instance(cur, slab_magazine_t, link);
  480.        
  481.         cur = cur->prev;
  482.         list_remove(cur->next);
  483.         frames += magazine_destroy(cache,mag);
  484.         /* If we do not do full reclaim, break
  485.          * as soon as something is freed */
  486.         if (!(flags & SLAB_RECLAIM_ALL) && frames)
  487.             break;
  488.     }
  489.    
  490.     spinlock_unlock(&cache->lock);
  491.     for (i=0; i < config.cpu_count; i++)
  492.         spinlock_unlock(&cache->mag_cache[i].lock);
  493.    
  494.     return frames;
  495. }
  496.  
  497. /** Check that there are no slabs and remove cache from system  */
  498. void slab_cache_destroy(slab_cache_t *cache)
  499. {
  500.     /* Do not lock anything, we assume the software is correct and
  501.      * does not touch the cache when it decides to destroy it */
  502.    
  503.     /* Destroy all magazines */
  504.     _slab_reclaim(cache, SLAB_RECLAIM_ALL);
  505.  
  506.     /* All slabs must be empty */
  507.     if (!list_empty(&cache->full_slabs) \
  508.         || !list_empty(&cache->partial_slabs))
  509.         panic("Destroying cache that is not empty.");
  510.  
  511.     spinlock_lock(&slab_cache_lock);
  512.     list_remove(&cache->link);
  513.     spinlock_unlock(&slab_cache_lock);
  514.  
  515.     free(cache);
  516. }
  517.  
  518. /** Allocate new object from cache - if no flags given, always returns
  519.     memory */
  520. void * slab_alloc(slab_cache_t *cache, int flags)
  521. {
  522.     ipl_t ipl;
  523.     void *result = NULL;
  524.  
  525.     /* Disable interrupts to avoid deadlocks with interrupt handlers */
  526.     ipl = interrupts_disable();
  527.    
  528.     if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
  529.         result = magazine_obj_get(cache);
  530.  
  531.     if (!result) {
  532.         spinlock_lock(&cache->lock);
  533.         result = slab_obj_create(cache, flags);
  534.         spinlock_unlock(&cache->lock);
  535.     }
  536.  
  537.     interrupts_restore(ipl);
  538.  
  539.     return result;
  540. }
  541.  
  542. /** Return object to cache  */
  543. void slab_free(slab_cache_t *cache, void *obj)
  544. {
  545.     ipl_t ipl;
  546.  
  547.     ipl = interrupts_disable();
  548.  
  549.     if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
  550.         || magazine_obj_put(cache, obj)) {
  551.        
  552.         spinlock_lock(&cache->lock);
  553.         slab_obj_destroy(cache, obj, NULL);
  554.         spinlock_unlock(&cache->lock);
  555.     }
  556.     interrupts_restore(ipl);
  557. }
  558.  
  559. /* Go through all caches and reclaim what is possible */
  560. count_t slab_reclaim(int flags)
  561. {
  562.     slab_cache_t *cache;
  563.     link_t *cur;
  564.     count_t frames = 0;
  565.  
  566.     spinlock_lock(&slab_cache_lock);
  567.  
  568.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  569.         cache = list_get_instance(cur, slab_cache_t, link);
  570.         frames += _slab_reclaim(cache, flags);
  571.     }
  572.  
  573.     spinlock_unlock(&slab_cache_lock);
  574.  
  575.     return frames;
  576. }
  577.  
  578.  
  579. /* Print list of slabs */
  580. void slab_print_list(void)
  581. {
  582.     slab_cache_t *cache;
  583.     link_t *cur;
  584.  
  585.     spinlock_lock(&slab_cache_lock);
  586.     printf("SLAB name\tOsize\tOrder\n");
  587.     for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
  588.         cache = list_get_instance(cur, slab_cache_t, link);
  589.         printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order);
  590.     }
  591.     spinlock_unlock(&slab_cache_lock);
  592. }
  593.  
  594. void slab_cache_init(void)
  595. {
  596.     /* Initialize magazine cache */
  597.     _slab_cache_create(&mag_cache,
  598.                "slab_magazine",
  599.                sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
  600.                sizeof(__address),
  601.                NULL, NULL,
  602.                SLAB_CACHE_NOMAGAZINE);
  603.  
  604.     /* Initialize structures for malloc */
  605. }
  606.