Subversion Repositories HelenOS

Rev

Rev 4537 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4537 Rev 4668
Line 44... Line 44...
44
#include <sys/mman.h>
44
#include <sys/mman.h>
45
#include <async.h>
45
#include <async.h>
46
#include <ipc/ipc.h>
46
#include <ipc/ipc.h>
47
#include <as.h>
47
#include <as.h>
48
#include <assert.h>
48
#include <assert.h>
49
#include <futex.h>
49
#include <fibril_sync.h>
50
#include <adt/list.h>
50
#include <adt/list.h>
51
#include <adt/hash_table.h>
51
#include <adt/hash_table.h>
52
#include <mem.h>
52
#include <mem.h>
53
 
53
 
54
/** Lock protecting the device connection list */
54
/** Lock protecting the device connection list */
55
static futex_t dcl_lock = FUTEX_INITIALIZER;
55
static FIBRIL_MUTEX_INITIALIZE(dcl_lock);
56
/** Device connection list head. */
56
/** Device connection list head. */
57
static LIST_INITIALIZE(dcl_head);
57
static LIST_INITIALIZE(dcl_head);
58
 
58
 
59
#define CACHE_BUCKETS_LOG2      10
59
#define CACHE_BUCKETS_LOG2      10
60
#define CACHE_BUCKETS           (1 << CACHE_BUCKETS_LOG2)
60
#define CACHE_BUCKETS           (1 << CACHE_BUCKETS_LOG2)
61
 
61
 
62
typedef struct {
62
typedef struct {
63
    futex_t lock;
63
    fibril_mutex_t lock;
64
    size_t block_size;      /**< Block size. */
64
    size_t block_size;      /**< Block size. */
65
    unsigned block_count;       /**< Total number of blocks. */
65
    unsigned block_count;       /**< Total number of blocks. */
-
 
66
    unsigned blocks_cached;     /**< Number of cached blocks. */
66
    hash_table_t block_hash;
67
    hash_table_t block_hash;
67
    link_t free_head;
68
    link_t free_head;
-
 
69
    enum cache_mode mode;
68
} cache_t;
70
} cache_t;
69
 
71
 
70
typedef struct {
72
typedef struct {
71
    link_t link;
73
    link_t link;
72
    dev_handle_t dev_handle;
74
    dev_handle_t dev_handle;
73
    int dev_phone;
75
    int dev_phone;
-
 
76
    fibril_mutex_t com_area_lock;
74
    void *com_area;
77
    void *com_area;
75
    size_t com_size;
78
    size_t com_size;
76
    void *bb_buf;
79
    void *bb_buf;
77
    off_t bb_off;
80
    off_t bb_off;
78
    size_t bb_size;
81
    size_t bb_size;
79
    cache_t *cache;
82
    cache_t *cache;
80
} devcon_t;
83
} devcon_t;
81
 
84
 
-
 
85
static int read_block(devcon_t *devcon, bn_t boff, size_t block_size);
-
 
86
static int write_block(devcon_t *devcon, bn_t boff, size_t block_size);
-
 
87
 
82
static devcon_t *devcon_search(dev_handle_t dev_handle)
88
static devcon_t *devcon_search(dev_handle_t dev_handle)
83
{
89
{
84
    link_t *cur;
90
    link_t *cur;
85
 
91
 
86
    futex_down(&dcl_lock);
92
    fibril_mutex_lock(&dcl_lock);
87
    for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
93
    for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
88
        devcon_t *devcon = list_get_instance(cur, devcon_t, link);
94
        devcon_t *devcon = list_get_instance(cur, devcon_t, link);
89
        if (devcon->dev_handle == dev_handle) {
95
        if (devcon->dev_handle == dev_handle) {
90
            futex_up(&dcl_lock);
96
            fibril_mutex_unlock(&dcl_lock);
91
            return devcon;
97
            return devcon;
92
        }
98
        }
93
    }
99
    }
94
    futex_up(&dcl_lock);
100
    fibril_mutex_unlock(&dcl_lock);
95
    return NULL;
101
    return NULL;
96
}
102
}
97
 
103
 
98
static int devcon_add(dev_handle_t dev_handle, int dev_phone, void *com_area,
104
static int devcon_add(dev_handle_t dev_handle, int dev_phone, void *com_area,
99
   size_t com_size)
105
   size_t com_size)
Line 106... Line 112...
106
        return ENOMEM;
112
        return ENOMEM;
107
   
113
   
108
    link_initialize(&devcon->link);
114
    link_initialize(&devcon->link);
109
    devcon->dev_handle = dev_handle;
115
    devcon->dev_handle = dev_handle;
110
    devcon->dev_phone = dev_phone;
116
    devcon->dev_phone = dev_phone;
-
 
117
    fibril_mutex_initialize(&devcon->com_area_lock);
111
    devcon->com_area = com_area;
118
    devcon->com_area = com_area;
112
    devcon->com_size = com_size;
119
    devcon->com_size = com_size;
113
    devcon->bb_buf = NULL;
120
    devcon->bb_buf = NULL;
114
    devcon->bb_off = 0;
121
    devcon->bb_off = 0;
115
    devcon->bb_size = 0;
122
    devcon->bb_size = 0;
116
    devcon->cache = NULL;
123
    devcon->cache = NULL;
117
 
124
 
118
    futex_down(&dcl_lock);
125
    fibril_mutex_lock(&dcl_lock);
119
    for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
126
    for (cur = dcl_head.next; cur != &dcl_head; cur = cur->next) {
120
        devcon_t *d = list_get_instance(cur, devcon_t, link);
127
        devcon_t *d = list_get_instance(cur, devcon_t, link);
121
        if (d->dev_handle == dev_handle) {
128
        if (d->dev_handle == dev_handle) {
122
            futex_up(&dcl_lock);
129
            fibril_mutex_unlock(&dcl_lock);
123
            free(devcon);
130
            free(devcon);
124
            return EEXIST;
131
            return EEXIST;
125
        }
132
        }
126
    }
133
    }
127
    list_append(&devcon->link, &dcl_head);
134
    list_append(&devcon->link, &dcl_head);
128
    futex_up(&dcl_lock);
135
    fibril_mutex_unlock(&dcl_lock);
129
    return EOK;
136
    return EOK;
130
}
137
}
131
 
138
 
132
static void devcon_remove(devcon_t *devcon)
139
static void devcon_remove(devcon_t *devcon)
133
{
140
{
134
    futex_down(&dcl_lock);
141
    fibril_mutex_lock(&dcl_lock);
135
    list_remove(&devcon->link);
142
    list_remove(&devcon->link);
136
    futex_up(&dcl_lock);
143
    fibril_mutex_unlock(&dcl_lock);
137
}
144
}
138
 
145
 
139
int block_init(dev_handle_t dev_handle, size_t com_size)
146
int block_init(dev_handle_t dev_handle, size_t com_size)
140
{
147
{
141
    int rc;
148
    int rc;
Line 205... Line 212...
205
        return EEXIST;
212
        return EEXIST;
206
    bb_buf = malloc(size);
213
    bb_buf = malloc(size);
207
    if (!bb_buf)
214
    if (!bb_buf)
208
        return ENOMEM;
215
        return ENOMEM;
209
   
216
   
210
    off_t bufpos = 0;
-
 
211
    size_t buflen = 0;
-
 
212
    rc = block_read(dev_handle, &bufpos, &buflen, &off,
217
    fibril_mutex_lock(&devcon->com_area_lock);
213
        bb_buf, size, size);
218
    rc = read_block(devcon, 0, size);
214
    if (rc != EOK) {
219
    if (rc != EOK) {
-
 
220
        fibril_mutex_unlock(&devcon->com_area_lock);
215
            free(bb_buf);
221
            free(bb_buf);
216
        return rc;
222
        return rc;
217
    }
223
    }
-
 
224
    memcpy(bb_buf, devcon->com_area, size);
-
 
225
    fibril_mutex_unlock(&devcon->com_area_lock);
-
 
226
 
218
    devcon->bb_buf = bb_buf;
227
    devcon->bb_buf = bb_buf;
219
    devcon->bb_off = off;
228
    devcon->bb_off = off;
220
    devcon->bb_size = size;
229
    devcon->bb_size = size;
221
 
230
 
222
    return EOK;
231
    return EOK;
Line 248... Line 257...
248
    .hash = cache_hash,
257
    .hash = cache_hash,
249
    .compare = cache_compare,
258
    .compare = cache_compare,
250
    .remove_callback = cache_remove_callback
259
    .remove_callback = cache_remove_callback
251
};
260
};
252
 
261
 
253
int block_cache_init(dev_handle_t dev_handle, size_t size, unsigned blocks)
262
int block_cache_init(dev_handle_t dev_handle, size_t size, unsigned blocks,
-
 
263
    enum cache_mode mode)
254
{
264
{
255
    devcon_t *devcon = devcon_search(dev_handle);
265
    devcon_t *devcon = devcon_search(dev_handle);
256
    cache_t *cache;
266
    cache_t *cache;
257
    if (!devcon)
267
    if (!devcon)
258
        return ENOENT;
268
        return ENOENT;
Line 260... Line 270...
260
        return EEXIST;
270
        return EEXIST;
261
    cache = malloc(sizeof(cache_t));
271
    cache = malloc(sizeof(cache_t));
262
    if (!cache)
272
    if (!cache)
263
        return ENOMEM;
273
        return ENOMEM;
264
   
274
   
265
    futex_initialize(&cache->lock, 1);
275
    fibril_mutex_initialize(&cache->lock);
266
    list_initialize(&cache->free_head);
276
    list_initialize(&cache->free_head);
267
    cache->block_size = size;
277
    cache->block_size = size;
268
    cache->block_count = blocks;
278
    cache->block_count = blocks;
-
 
279
    cache->blocks_cached = 0;
-
 
280
    cache->mode = mode;
269
 
281
 
270
    if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1,
282
    if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1,
271
        &cache_ops)) {
283
        &cache_ops)) {
272
        free(cache);
284
        free(cache);
273
        return ENOMEM;
285
        return ENOMEM;
Line 275... Line 287...
275
 
287
 
276
    devcon->cache = cache;
288
    devcon->cache = cache;
277
    return EOK;
289
    return EOK;
278
}
290
}
279
 
291
 
-
 
292
#define CACHE_LO_WATERMARK  10  
-
 
293
#define CACHE_HI_WATERMARK  20  
280
static bool cache_can_grow(cache_t *cache)
294
static bool cache_can_grow(cache_t *cache)
281
{
295
{
-
 
296
    if (cache->blocks_cached < CACHE_LO_WATERMARK)
-
 
297
        return true;
-
 
298
    if (!list_empty(&cache->free_head))
-
 
299
        return false;
282
    return true;
300
    return true;
283
}
301
}
284
 
302
 
285
static void block_initialize(block_t *b)
303
static void block_initialize(block_t *b)
286
{
304
{
287
    futex_initialize(&b->lock, 1);
305
    fibril_mutex_initialize(&b->lock);
288
    b->refcnt = 1;
306
    b->refcnt = 1;
289
    b->dirty = false;
307
    b->dirty = false;
290
    rwlock_initialize(&b->contents_lock);
308
    fibril_rwlock_initialize(&b->contents_lock);
291
    link_initialize(&b->free_link);
309
    link_initialize(&b->free_link);
292
    link_initialize(&b->hash_link);
310
    link_initialize(&b->hash_link);
293
}
311
}
294
 
312
 
295
/** Instantiate a block in memory and get a reference to it.
313
/** Instantiate a block in memory and get a reference to it.
Line 307... Line 325...
307
    devcon_t *devcon;
325
    devcon_t *devcon;
308
    cache_t *cache;
326
    cache_t *cache;
309
    block_t *b;
327
    block_t *b;
310
    link_t *l;
328
    link_t *l;
311
    unsigned long key = boff;
329
    unsigned long key = boff;
-
 
330
    bn_t oboff;
312
   
331
   
313
    devcon = devcon_search(dev_handle);
332
    devcon = devcon_search(dev_handle);
314
 
333
 
315
    assert(devcon);
334
    assert(devcon);
316
    assert(devcon->cache);
335
    assert(devcon->cache);
317
   
336
   
318
    cache = devcon->cache;
337
    cache = devcon->cache;
319
    futex_down(&cache->lock);
338
    fibril_mutex_lock(&cache->lock);
320
    l = hash_table_find(&cache->block_hash, &key);
339
    l = hash_table_find(&cache->block_hash, &key);
321
    if (l) {
340
    if (l) {
322
        /*
341
        /*
323
         * We found the block in the cache.
342
         * We found the block in the cache.
324
         */
343
         */
325
        b = hash_table_get_instance(l, block_t, hash_link);
344
        b = hash_table_get_instance(l, block_t, hash_link);
326
        futex_down(&b->lock);
345
        fibril_mutex_lock(&b->lock);
327
        if (b->refcnt++ == 0)
346
        if (b->refcnt++ == 0)
328
            list_remove(&b->free_link);
347
            list_remove(&b->free_link);
329
        futex_up(&b->lock);
348
        fibril_mutex_unlock(&b->lock);
330
        futex_up(&cache->lock);
349
        fibril_mutex_unlock(&cache->lock);
331
    } else {
350
    } else {
332
        /*
351
        /*
333
         * The block was not found in the cache.
352
         * The block was not found in the cache.
334
         */
353
         */
335
        int rc;
354
        int rc;
336
        off_t bufpos = 0;
-
 
337
        size_t buflen = 0;
-
 
338
        off_t pos = boff * cache->block_size;
-
 
339
        bool sync = false;
355
        bool sync = false;
340
 
356
 
341
        if (cache_can_grow(cache)) {
357
        if (cache_can_grow(cache)) {
342
            /*
358
            /*
343
             * We can grow the cache by allocating new blocks.
359
             * We can grow the cache by allocating new blocks.
Line 350... Line 366...
350
            b->data = malloc(cache->block_size);
366
            b->data = malloc(cache->block_size);
351
            if (!b->data) {
367
            if (!b->data) {
352
                free(b);
368
                free(b);
353
                goto recycle;
369
                goto recycle;
354
            }
370
            }
-
 
371
            cache->blocks_cached++;
355
        } else {
372
        } else {
356
            /*
373
            /*
357
             * Try to recycle a block from the free list.
374
             * Try to recycle a block from the free list.
358
             */
375
             */
359
            unsigned long temp_key;
376
            unsigned long temp_key;
360
recycle:
377
recycle:
361
            assert(!list_empty(&cache->free_head));
378
            assert(!list_empty(&cache->free_head));
362
            l = cache->free_head.next;
379
            l = cache->free_head.next;
363
            list_remove(l);
380
            list_remove(l);
364
            b = hash_table_get_instance(l, block_t, hash_link);
381
            b = list_get_instance(l, block_t, free_link);
365
            sync = b->dirty;
382
            sync = b->dirty;
-
 
383
            oboff = b->boff;
366
            temp_key = b->boff;
384
            temp_key = b->boff;
367
            hash_table_remove(&cache->block_hash, &temp_key, 1);
385
            hash_table_remove(&cache->block_hash, &temp_key, 1);
368
        }
386
        }
369
 
387
 
370
        block_initialize(b);
388
        block_initialize(b);
Line 376... Line 394...
376
        /*
394
        /*
377
         * Lock the block before releasing the cache lock. Thus we don't
395
         * Lock the block before releasing the cache lock. Thus we don't
378
         * kill concurent operations on the cache while doing I/O on the
396
         * kill concurent operations on the cache while doing I/O on the
379
         * block.
397
         * block.
380
         */
398
         */
381
        futex_down(&b->lock);
399
        fibril_mutex_lock(&b->lock);
382
        futex_up(&cache->lock);
400
        fibril_mutex_unlock(&cache->lock);
383
 
401
 
384
        if (sync) {
402
        if (sync) {
385
            /*
403
            /*
386
             * The block is dirty and needs to be written back to
404
             * The block is dirty and needs to be written back to
387
             * the device before we can read in the new contents.
405
             * the device before we can read in the new contents.
388
             */
406
             */
389
            abort();    /* TODO: block_write() */
407
            fibril_mutex_lock(&devcon->com_area_lock);
-
 
408
            memcpy(devcon->com_area, b->data, b->size);
-
 
409
            rc = write_block(devcon, oboff, cache->block_size);
-
 
410
            assert(rc == EOK);
-
 
411
            fibril_mutex_unlock(&devcon->com_area_lock);
390
        }
412
        }
391
        if (!(flags & BLOCK_FLAGS_NOREAD)) {
413
        if (!(flags & BLOCK_FLAGS_NOREAD)) {
392
            /*
414
            /*
393
             * The block contains old or no data. We need to read
415
             * The block contains old or no data. We need to read
394
             * the new contents from the device.
416
             * the new contents from the device.
395
             */
417
             */
396
            rc = block_read(dev_handle, &bufpos, &buflen, &pos,
418
            fibril_mutex_lock(&devcon->com_area_lock);
397
                b->data, cache->block_size, cache->block_size);
419
            rc = read_block(devcon, b->boff, cache->block_size);
398
            assert(rc == EOK);
420
            assert(rc == EOK);
-
 
421
            memcpy(b->data, devcon->com_area, cache->block_size);
-
 
422
            fibril_mutex_unlock(&devcon->com_area_lock);
399
        }
423
        }
400
 
424
 
401
        futex_up(&b->lock);
425
        fibril_mutex_unlock(&b->lock);
402
    }
426
    }
403
    return b;
427
    return b;
404
}
428
}
405
 
429
 
406
/** Release a reference to a block.
430
/** Release a reference to a block.
Line 411... Line 435...
411
 */
435
 */
412
void block_put(block_t *block)
436
void block_put(block_t *block)
413
{
437
{
414
    devcon_t *devcon = devcon_search(block->dev_handle);
438
    devcon_t *devcon = devcon_search(block->dev_handle);
415
    cache_t *cache;
439
    cache_t *cache;
-
 
440
    int rc;
416
 
441
 
417
    assert(devcon);
442
    assert(devcon);
418
    assert(devcon->cache);
443
    assert(devcon->cache);
419
 
444
 
420
    cache = devcon->cache;
445
    cache = devcon->cache;
421
    futex_down(&cache->lock);
446
    fibril_mutex_lock(&cache->lock);
422
    futex_down(&block->lock);
447
    fibril_mutex_lock(&block->lock);
423
    if (!--block->refcnt) {
448
    if (!--block->refcnt) {
424
        /*
449
        /*
425
         * Last reference to the block was dropped, put the block on the
450
         * Last reference to the block was dropped. Either free the
-
 
451
         * block or put it on the free list.
-
 
452
         */
-
 
453
        if (cache->blocks_cached > CACHE_HI_WATERMARK) {
-
 
454
            /*
-
 
455
             * Currently there are too many cached blocks.
-
 
456
             */
-
 
457
            if (block->dirty) {
-
 
458
                fibril_mutex_lock(&devcon->com_area_lock);
-
 
459
                memcpy(devcon->com_area, block->data,
-
 
460
                    block->size);
-
 
461
                rc = write_block(devcon, block->boff,
-
 
462
                    block->size);
-
 
463
                assert(rc == EOK);
-
 
464
                fibril_mutex_unlock(&devcon->com_area_lock);
-
 
465
            }
-
 
466
            /*
-
 
467
             * Take the block out of the cache and free it.
-
 
468
             */
-
 
469
            unsigned long key = block->boff;
-
 
470
            hash_table_remove(&cache->block_hash, &key, 1);
426
         * free list.
471
            free(block);
-
 
472
            free(block->data);
-
 
473
            cache->blocks_cached--;
-
 
474
            fibril_mutex_unlock(&cache->lock);
-
 
475
            return;
-
 
476
        }
-
 
477
        /*
-
 
478
         * Put the block on the free list.
427
         */
479
         */
428
        list_append(&block->free_link, &cache->free_head);
480
        list_append(&block->free_link, &cache->free_head);
-
 
481
        if (cache->mode != CACHE_MODE_WB && block->dirty) {
-
 
482
            fibril_mutex_lock(&devcon->com_area_lock);
-
 
483
            memcpy(devcon->com_area, block->data, block->size);
-
 
484
            rc = write_block(devcon, block->boff, block->size);
-
 
485
            assert(rc == EOK);
-
 
486
            fibril_mutex_unlock(&devcon->com_area_lock);
-
 
487
 
-
 
488
            block->dirty = false;
-
 
489
        }
429
    }
490
    }
430
    futex_up(&block->lock);
491
    fibril_mutex_unlock(&block->lock);
431
    futex_up(&cache->lock);
492
    fibril_mutex_unlock(&cache->lock);
432
}
493
}
433
 
494
 
434
/** Read data from a block device.
495
/** Read sequential data from a block device.
435
 *
496
 *
436
 * @param dev_handle    Device handle of the block device.
497
 * @param dev_handle    Device handle of the block device.
437
 * @param bufpos    Pointer to the first unread valid offset within the
498
 * @param bufpos    Pointer to the first unread valid offset within the
438
 *          communication buffer.
499
 *          communication buffer.
439
 * @param buflen    Pointer to the number of unread bytes that are ready in
500
 * @param buflen    Pointer to the number of unread bytes that are ready in
Line 443... Line 504...
443
 * @param size      Size of the destination buffer.
504
 * @param size      Size of the destination buffer.
444
 * @param block_size    Block size to be used for the transfer.
505
 * @param block_size    Block size to be used for the transfer.
445
 *
506
 *
446
 * @return      EOK on success or a negative return code on failure.
507
 * @return      EOK on success or a negative return code on failure.
447
 */
508
 */
448
int
-
 
449
block_read(dev_handle_t dev_handle, off_t *bufpos, size_t *buflen, off_t *pos,
509
int block_seqread(dev_handle_t dev_handle, off_t *bufpos, size_t *buflen,
450
    void *dst, size_t size, size_t block_size)
510
    off_t *pos, void *dst, size_t size, size_t block_size)
451
{
511
{
452
    off_t offset = 0;
512
    off_t offset = 0;
453
    size_t left = size;
513
    size_t left = size;
454
    devcon_t *devcon = devcon_search(dev_handle);
514
    devcon_t *devcon = devcon_search(dev_handle);
455
    assert(devcon);
515
    assert(devcon);
456
   
516
   
-
 
517
    fibril_mutex_lock(&devcon->com_area_lock);
457
    while (left > 0) {
518
    while (left > 0) {
458
        size_t rd;
519
        size_t rd;
459
       
520
       
460
        if (*bufpos + left < *buflen)
521
        if (*bufpos + left < *buflen)
461
            rd = left;
522
            rd = left;
Line 472... Line 533...
472
            *bufpos += rd;
533
            *bufpos += rd;
473
            *pos += rd;
534
            *pos += rd;
474
            left -= rd;
535
            left -= rd;
475
        }
536
        }
476
       
537
       
477
        if (*bufpos == *buflen) {
538
        if (*bufpos == (off_t) *buflen) {
478
            /* Refill the communication buffer with a new block. */
539
            /* Refill the communication buffer with a new block. */
479
            ipcarg_t retval;
540
            int rc;
480
            int rc = async_req_2_1(devcon->dev_phone, BD_READ_BLOCK,
-
 
-
 
541
 
481
                *pos / block_size, block_size, &retval);
542
            rc = read_block(devcon, *pos / block_size, block_size);
482
            if ((rc != EOK) || (retval != EOK))
543
            if (rc != EOK) {
483
                return (rc != EOK ? rc : retval);
544
                fibril_mutex_unlock(&devcon->com_area_lock);
-
 
545
                return rc;
-
 
546
            }
484
           
547
           
485
            *bufpos = 0;
548
            *bufpos = 0;
486
            *buflen = block_size;
549
            *buflen = block_size;
487
        }
550
        }
488
    }
551
    }
-
 
552
    fibril_mutex_unlock(&devcon->com_area_lock);
489
   
553
   
490
    return EOK;
554
    return EOK;
491
}
555
}
492
 
556
 
-
 
557
/** Read block from block device.
-
 
558
 *
-
 
559
 * @param devcon    Device connection.
-
 
560
 * @param boff      Block index.
-
 
561
 * @param block_size    Block size.
-
 
562
 * @param src       Buffer for storing the data.
-
 
563
 *
-
 
564
 * @return      EOK on success or negative error code on failure.
-
 
565
 */
-
 
566
static int read_block(devcon_t *devcon, bn_t boff, size_t block_size)
-
 
567
{
-
 
568
    ipcarg_t retval;
-
 
569
    int rc;
-
 
570
 
-
 
571
    assert(devcon);
-
 
572
    rc = async_req_2_1(devcon->dev_phone, BD_READ_BLOCK, boff, block_size,
-
 
573
        &retval);
-
 
574
    if ((rc != EOK) || (retval != EOK))
-
 
575
        return (rc != EOK ? rc : (int) retval);
-
 
576
 
-
 
577
    return EOK;
-
 
578
}
-
 
579
 
-
 
580
/** Write block to block device.
-
 
581
 *
-
 
582
 * @param devcon    Device connection.
-
 
583
 * @param boff      Block index.
-
 
584
 * @param block_size    Block size.
-
 
585
 * @param src       Buffer containing the data to write.
-
 
586
 *
-
 
587
 * @return      EOK on success or negative error code on failure.
-
 
588
 */
-
 
589
static int write_block(devcon_t *devcon, bn_t boff, size_t block_size)
-
 
590
{
-
 
591
    ipcarg_t retval;
-
 
592
    int rc;
-
 
593
 
-
 
594
    assert(devcon);
-
 
595
    rc = async_req_2_1(devcon->dev_phone, BD_WRITE_BLOCK, boff, block_size,
-
 
596
        &retval);
-
 
597
    if ((rc != EOK) || (retval != EOK))
-
 
598
        return (rc != EOK ? rc : (int) retval);
-
 
599
 
-
 
600
    return EOK;
-
 
601
}
-
 
602
 
493
/** @}
603
/** @}
494
 */
604
 */