Subversion Repositories HelenOS-historic

Rev

Rev 778 | Rev 781 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 778 Rev 780
Line 232... Line 232...
232
 
232
 
233
    if (!slab)
233
    if (!slab)
234
        slab = obj2slab(obj);
234
        slab = obj2slab(obj);
235
 
235
 
236
    ASSERT(slab->cache == cache);
236
    ASSERT(slab->cache == cache);
-
 
237
    ASSERT(slab->available < cache->objects);
237
 
238
 
238
    spinlock_lock(&cache->slablock);
239
    spinlock_lock(&cache->slablock);
239
 
240
 
240
    *((int *)obj) = slab->nextavail;
241
    *((int *)obj) = slab->nextavail;
241
    slab->nextavail = (obj - slab->start)/cache->size;
242
    slab->nextavail = (obj - slab->start)/cache->size;
242
    slab->available++;
243
    slab->available++;
243
 
244
 
244
    /* Move it to correct list */
245
    /* Move it to correct list */
245
    if (slab->available == 1) {
-
 
246
        /* It was in full, move to partial */
-
 
247
        list_remove(&slab->link);
-
 
248
        list_prepend(&slab->link, &cache->partial_slabs);
-
 
249
    }
-
 
250
    if (slab->available == cache->objects) {
246
    if (slab->available == cache->objects) {
251
        /* Free associated memory */
247
        /* Free associated memory */
252
        list_remove(&slab->link);
248
        list_remove(&slab->link);
253
        /* This should not produce deadlock, as
249
        /* This should not produce deadlock, as
254
         * magazine is always allocated with NO reclaim,
250
         * magazine is always allocated with NO reclaim,
255
         * keep all locks */
251
         * keep all locks */
256
        frames = slab_space_free(cache, slab);
252
        frames = slab_space_free(cache, slab);
-
 
253
    } else if (slab->available == 1) {
-
 
254
        /* It was in full, move to partial */
-
 
255
        list_remove(&slab->link);
-
 
256
        list_prepend(&slab->link, &cache->partial_slabs);
257
    }
257
    }
258
 
258
 
259
    spinlock_unlock(&cache->slablock);
259
    spinlock_unlock(&cache->slablock);
260
 
260
 
261
    return frames;
261
    return frames;
Line 280... Line 280...
280
         *   other ten frame_alloc when they are allocating,
280
         *   other ten frame_alloc when they are allocating,
281
         *   that's why we should get recursion at most 1-level deep
281
         *   that's why we should get recursion at most 1-level deep
282
         */
282
         */
283
        spinlock_unlock(&cache->slablock);
283
        spinlock_unlock(&cache->slablock);
284
        slab = slab_space_alloc(cache, flags);
284
        slab = slab_space_alloc(cache, flags);
285
        spinlock_lock(&cache->slablock);
-
 
286
        if (!slab)
285
        if (!slab)
287
            goto err;
286
            return NULL;
-
 
287
        spinlock_lock(&cache->slablock);
288
    } else {
288
    } else {
289
        slab = list_get_instance(cache->partial_slabs.next,
289
        slab = list_get_instance(cache->partial_slabs.next,
290
                     slab_t,
290
                     slab_t,
291
                     link);
291
                     link);
292
        list_remove(&slab->link);
292
        list_remove(&slab->link);
Line 299... Line 299...
299
    else
299
    else
300
        list_prepend(&slab->link, &cache->partial_slabs);
300
        list_prepend(&slab->link, &cache->partial_slabs);
301
 
301
 
302
    spinlock_unlock(&cache->slablock);
302
    spinlock_unlock(&cache->slablock);
303
    return obj;
303
    return obj;
304
err:
-
 
305
    spinlock_unlock(&cache->slablock);
-
 
306
    return NULL;
-
 
307
}
304
}
308
 
305
 
309
/**************************************/
306
/**************************************/
310
/* CPU-Cache slab functions */
307
/* CPU-Cache slab functions */
311
 
308