Subversion Repositories HelenOS-historic

Rev

Rev 591 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (C) 2001-2004 Jakub Jermar
  3.  * All rights reserved.
  4.  *
  5.  * Redistribution and use in source and binary forms, with or without
  6.  * modification, are permitted provided that the following conditions
  7.  * are met:
  8.  *
  9.  * - Redistributions of source code must retain the above copyright
  10.  *   notice, this list of conditions and the following disclaimer.
  11.  * - Redistributions in binary form must reproduce the above copyright
  12.  *   notice, this list of conditions and the following disclaimer in the
  13.  *   documentation and/or other materials provided with the distribution.
  14.  * - The name of the author may not be used to endorse or promote products
  15.  *   derived from this software without specific prior written permission.
  16.  *
  17.  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  18.  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  19.  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  20.  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  21.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  22.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  23.  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  24.  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27.  */
  28.  
  29. #include <mm/heap.h>
  30. #include <synch/spinlock.h>
  31. #include <func.h>
  32. #include <memstr.h>
  33. #include <panic.h>
  34. #include <arch/types.h>
  35. #include <arch/asm.h>
  36. #include <arch.h>
  37. #include <align.h>
  38.  
  39. /*
  40.  * First-fit algorithm.
  41.  * Simple, but hopefully correct.
  42.  * Chunks being freed are tested for mergability with their neighbours.
  43.  */
  44.  
  45. static chunk_t *chunk0;
  46. static spinlock_t heaplock;
  47.  
  48. void early_heap_init(__address heap, size_t size)
  49. {
  50.     spinlock_initialize(&heaplock, "heap_lock");
  51.     memsetb(heap, size, 0);
  52.     chunk0 = (chunk_t *) heap;
  53.     chunk0->used = 0;
  54.     chunk0->size = size - sizeof(chunk_t);
  55.     chunk0->next = NULL;
  56.     chunk0->prev = NULL;
  57. }
  58.  
  59. /*
  60.  * Uses first-fit algorithm.
  61.  */
  62. void *early_malloc(size_t size)
  63. {
  64.     ipl_t ipl;
  65.     chunk_t *x, *y, *z;
  66.  
  67.     if (size == 0)
  68.         panic("zero-size allocation request");
  69.        
  70.     size = ALIGN_UP(size, sizeof(__native));
  71.  
  72.     x = chunk0;
  73.     ipl = interrupts_disable();
  74.     spinlock_lock(&heaplock);      
  75.     while (x) {
  76.         if (x->used || x->size < size) {
  77.             x = x->next;
  78.             continue;
  79.         }
  80.        
  81.         x->used = 1;
  82.    
  83.         /*
  84.          * If the chunk exactly matches required size or if truncating
  85.          * it would not provide enough space for storing a new chunk
  86.          * header plus at least one byte of data, we are finished.
  87.          */
  88.         if (x->size < size + sizeof(chunk_t) + 1) {
  89.             spinlock_unlock(&heaplock);
  90.             interrupts_restore(ipl);
  91.             return &x->data[0];
  92.         }
  93.  
  94.         /*
  95.          * Truncate x and create a new chunk.
  96.          */
  97.         y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t));
  98.         y->used = 0;
  99.         y->size = x->size - size - sizeof(chunk_t);
  100.         y->prev = x;
  101.         y->next = NULL;
  102.        
  103.         z = x->next;
  104.         if (z) {
  105.             z->prev = y;
  106.             y->next = z;
  107.         }
  108.        
  109.         x->size = size;
  110.         x->next = y;
  111.         spinlock_unlock(&heaplock);
  112.         interrupts_restore(ipl);
  113.  
  114.         return &x->data[0];
  115.     }
  116.     spinlock_unlock(&heaplock);
  117.     interrupts_restore(ipl);
  118.     return NULL;
  119. }
  120.  
  121. void early_free(void *ptr)
  122. {
  123.     ipl_t ipl;
  124.     chunk_t *x, *y, *z;
  125.  
  126.     if (!ptr)
  127.         panic("free on NULL");
  128.  
  129.  
  130.     y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t));
  131.     if (y->used != 1)
  132.         panic("freeing unused/damaged chunk");
  133.  
  134.     ipl = interrupts_disable();
  135.     spinlock_lock(&heaplock);
  136.     x = y->prev;
  137.     z = y->next;
  138.     /* merge x and y */
  139.     if (x && !x->used) {
  140.         x->size += y->size + sizeof(chunk_t);
  141.         x->next = z;
  142.         if (z)
  143.             z->prev = x;
  144.         y = x;
  145.     }
  146.     /* merge y and z or merge (x merged with y) and z */
  147.     if (z && !z->used) {
  148.         y->size += z->size + sizeof(chunk_t);
  149.         y->next = z->next;
  150.         if (z->next)  {
  151.             /* y is either y or x */
  152.             z->next->prev = y;
  153.         }
  154.     }
  155.     y->used = 0;
  156.     spinlock_unlock(&heaplock);
  157.     interrupts_restore(ipl);
  158. }
  159.