Subversion Repositories HelenOS-historic

Rev

Rev 763 | Rev 765 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 763 Rev 764
Line 67... Line 67...
67
    slab_t *slab;
67
    slab_t *slab;
68
    size_t fsize;
68
    size_t fsize;
69
    int i;
69
    int i;
70
    zone_t *zone = NULL;
70
    zone_t *zone = NULL;
71
    int status;
71
    int status;
-
 
72
    frame_t *frame;
72
 
73
 
73
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
74
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
74
    if (status != FRAME_OK)
75
    if (status != FRAME_OK) {
75
        return NULL;
76
        return NULL;
76
 
77
    }
77
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
78
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
78
        slab = malloc(sizeof(*slab)); // , flags);
79
        slab = malloc(sizeof(*slab)); // , flags);
79
        if (!slab) {
80
        if (!slab) {
80
            frame_free((__address)data);
81
            frame_free((__address)data);
81
            return NULL;
82
            return NULL;
82
        }
83
        }
83
    } else {
84
    } else {
84
        fsize = (PAGE_SIZE << cache->order);
85
        fsize = (PAGE_SIZE << cache->order);
85
        slab = data + fsize - sizeof(*slab);
86
        slab = data + fsize - sizeof(*slab);
86
    }
87
    }
87
 
88
       
88
    /* Fill in slab structures */
89
    /* Fill in slab structures */
89
    /* TODO: some better way of accessing the frame */
90
    /* TODO: some better way of accessing the frame */
90
    for (i=0; i< (1<<cache->order); i++) {
91
    for (i=0; i< (1<<cache->order); i++) {
91
        ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab;
92
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
-
 
93
        frame->parent = slab;
92
    }
94
    }
93
 
95
 
94
    slab->start = data;
96
    slab->start = data;
95
    slab->available = cache->objects;
97
    slab->available = cache->objects;
96
    slab->nextavail = 0;
98
    slab->nextavail = 0;
97
 
99
 
98
    for (i=0; i<cache->objects;i++)
100
    for (i=0; i<cache->objects;i++)
99
        *((int *) (slab->start + i*cache->size)) = i+1;
101
        *((int *) (slab->start + i*cache->size)) = i+1;
-
 
102
 
-
 
103
    atomic_inc(&cache->allocated_slabs);
-
 
104
 
100
    return slab;
105
    return slab;
101
}
106
}
102
 
107
 
103
/**
108
/**
104
 * Free space associated with SLAB
109
 * Free space associated with SLAB
Line 108... Line 113...
108
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
113
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
109
{
114
{
110
    frame_free((__address)slab->start);
115
    frame_free((__address)slab->start);
111
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
116
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
112
        free(slab);
117
        free(slab);
-
 
118
 
-
 
119
    atomic_dec(&cache->allocated_slabs);
-
 
120
   
113
    return 1 << cache->order;
121
    return 1 << cache->order;
114
}
122
}
115
 
123
 
116
/** Map object to slab structure */
124
/** Map object to slab structure */
117
static slab_t * obj2slab(void *obj)
125
static slab_t * obj2slab(void *obj)
Line 151... Line 159...
151
 
159
 
152
    /* Move it to correct list */
160
    /* Move it to correct list */
153
    if (slab->available == 1) {
161
    if (slab->available == 1) {
154
        /* It was in full, move to partial */
162
        /* It was in full, move to partial */
155
        list_remove(&slab->link);
163
        list_remove(&slab->link);
156
        list_prepend(&cache->partial_slabs, &slab->link);
164
        list_prepend(&slab->link, &cache->partial_slabs);
157
    }
165
    }
158
    if (slab->available == cache->objects) {
166
    if (slab->available == cache->objects) {
159
        /* Free associated memory */
167
        /* Free associated memory */
160
        list_remove(&slab->link);
168
        list_remove(&slab->link);
161
        /* Avoid deadlock */
169
        /* Avoid deadlock */
Line 189... Line 197...
189
         *   that's why we should get recursion at most 1-level deep
197
         *   that's why we should get recursion at most 1-level deep
190
         */
198
         */
191
        spinlock_unlock(&cache->lock);
199
        spinlock_unlock(&cache->lock);
192
        slab = slab_space_alloc(cache, flags);
200
        slab = slab_space_alloc(cache, flags);
193
        spinlock_lock(&cache->lock);
201
        spinlock_lock(&cache->lock);
194
        if (!slab)
202
        if (!slab) {
195
            return NULL;
203
            return NULL;
-
 
204
        }
196
    } else {
205
    } else {
197
        slab = list_get_instance(cache->partial_slabs.next,
206
        slab = list_get_instance(cache->partial_slabs.next,
198
                     slab_t,
207
                     slab_t,
199
                     link);
208
                     link);
200
        list_remove(&slab->link);
209
        list_remove(&slab->link);
201
    }
210
    }
202
    obj = slab->start + slab->nextavail * cache->size;
211
    obj = slab->start + slab->nextavail * cache->size;
203
    slab->nextavail = *((int *)obj);
212
    slab->nextavail = *((int *)obj);
204
    slab->available--;
213
    slab->available--;
205
    if (! slab->available)
214
    if (! slab->available)
206
        list_prepend(&cache->full_slabs, &slab->link);
215
        list_prepend(&slab->link, &cache->full_slabs);
207
    else
216
    else
208
        list_prepend(&cache->partial_slabs, &slab->link);
217
        list_prepend(&slab->link, &cache->partial_slabs);
209
    return obj;
218
    return obj;
210
}
219
}
211
 
220
 
212
/**************************************/
221
/**************************************/
213
/* CPU-Cache slab functions */
222
/* CPU-Cache slab functions */
Line 313... Line 322...
313
    } else if (mag->busy == mag->size) {
322
    } else if (mag->busy == mag->size) {
314
        /* If the last is full | empty, allocate new */
323
        /* If the last is full | empty, allocate new */
315
        mag = cache->mag_cache[CPU->id].last;
324
        mag = cache->mag_cache[CPU->id].last;
316
        if (!mag || mag->size == mag->busy) {
325
        if (!mag || mag->size == mag->busy) {
317
            if (mag)
326
            if (mag)
318
                list_prepend(&cache->magazines, &mag->link);
327
                list_prepend(&mag->link, &cache->magazines);
319
 
328
 
320
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
329
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
321
            if (!mag)
330
            if (!mag)
322
                goto errout;
331
                goto errout;
323
           
332
           
Line 531... Line 540...
531
        spinlock_lock(&cache->lock);
540
        spinlock_lock(&cache->lock);
532
        result = slab_obj_create(cache, flags);
541
        result = slab_obj_create(cache, flags);
533
        spinlock_unlock(&cache->lock);
542
        spinlock_unlock(&cache->lock);
534
    }
543
    }
535
 
544
 
-
 
545
    if (result)
-
 
546
        atomic_inc(&cache->allocated_objs);
-
 
547
 
536
    interrupts_restore(ipl);
548
    interrupts_restore(ipl);
537
 
549
 
-
 
550
 
538
    return result;
551
    return result;
539
}
552
}
540
 
553
 
541
/** Return object to cache  */
554
/** Return object to cache  */
542
void slab_free(slab_cache_t *cache, void *obj)
555
void slab_free(slab_cache_t *cache, void *obj)
Line 550... Line 563...
550
       
563
       
551
        spinlock_lock(&cache->lock);
564
        spinlock_lock(&cache->lock);
552
        slab_obj_destroy(cache, obj, NULL);
565
        slab_obj_destroy(cache, obj, NULL);
553
        spinlock_unlock(&cache->lock);
566
        spinlock_unlock(&cache->lock);
554
    }
567
    }
-
 
568
    atomic_dec(&cache->allocated_objs);
555
    interrupts_restore(ipl);
569
    interrupts_restore(ipl);
556
}
570
}
557
 
571
 
558
/* Go through all caches and reclaim what is possible */
572
/* Go through all caches and reclaim what is possible */
559
count_t slab_reclaim(int flags)
573
count_t slab_reclaim(int flags)
Line 580... Line 594...
580
{
594
{
581
    slab_cache_t *cache;
595
    slab_cache_t *cache;
582
    link_t *cur;
596
    link_t *cur;
583
 
597
 
584
    spinlock_lock(&slab_cache_lock);
598
    spinlock_lock(&slab_cache_lock);
585
    printf("SLAB name\tOsize\tOrder\n");
599
    printf("SLAB name\tOsize\tOrder\tOcnt\tSlabs\tAllocobjs\n");
586
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
600
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
587
        cache = list_get_instance(cur, slab_cache_t, link);
601
        cache = list_get_instance(cur, slab_cache_t, link);
588
        printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order);
602
        printf("%s\t%d\t%d\t%d\t%d\t%d\n", cache->name, cache->size,
-
 
603
               cache->order, cache->objects,
-
 
604
               atomic_get(&cache->allocated_slabs),
-
 
605
               atomic_get(&cache->allocated_objs));
589
    }
606
    }
590
    spinlock_unlock(&slab_cache_lock);
607
    spinlock_unlock(&slab_cache_lock);
591
}
608
}
592
 
609
 
593
void slab_cache_init(void)
610
void slab_cache_init(void)