/branches/sparc/kernel/generic/include/udebug/udebug.h |
---|
176,7 → 176,7 |
*/ |
atomic_t int_lock; |
/** Synchronize debug ops on this thread / access to this structure */ |
/** Synchronize debug ops on this thread / access to this structure. */ |
mutex_t lock; |
waitq_t go_wq; |
183,11 → 183,11 |
call_t *go_call; |
unative_t syscall_args[6]; |
/** What type of event are we stopped in or 0 if none */ |
/** What type of event are we stopped in or 0 if none. */ |
udebug_event_t cur_event; |
bool stop; |
bool stoppable; |
bool debug_active; /**< In a debugging session */ |
bool go; /**< thread is GO */ |
bool stoppable; /**< thread is stoppable */ |
bool debug_active; /**< thread is in a debugging session */ |
} udebug_thread_t; |
struct task; |
/branches/sparc/kernel/generic/src/udebug/udebug.c |
---|
95,7 → 95,7 |
atomic_set(&ut->int_lock, 1); |
ut->go_call = NULL; |
ut->stop = true; |
ut->go = false; |
ut->stoppable = true; |
ut->debug_active = false; |
ut->cur_event = 0; /* none */ |
198,7 → 198,8 |
* Active debugging session |
*/ |
if (THREAD->udebug.debug_active && THREAD->udebug.stop) { |
if (THREAD->udebug.debug_active == true && |
THREAD->udebug.go == false) { |
/* |
* Thread was requested to stop - answer go call |
*/ |
239,7 → 240,7 |
mutex_lock(&THREAD->udebug.lock); |
if (THREAD->udebug.debug_active && |
THREAD->udebug.stop == true) { |
THREAD->udebug.go == false) { |
TASK->udebug.begin_call = NULL; |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
247,7 → 248,7 |
udebug_wait_for_go(&THREAD->udebug.go_wq); |
goto restart; |
/* must try again - have to lose stoppability atomically */ |
/* Must try again - have to lose stoppability atomically. */ |
} else { |
++TASK->udebug.not_stoppable_count; |
ASSERT(THREAD->udebug.stoppable == true); |
321,9 → 322,9 |
mutex_lock(&TASK->udebug.lock); |
mutex_lock(&THREAD->udebug.lock); |
/* Must only generate events when in debugging session and have go */ |
/* Must only generate events when in debugging session and is go. */ |
if (THREAD->udebug.debug_active != true || |
THREAD->udebug.stop == true || |
THREAD->udebug.go == false || |
(TASK->udebug.evmask & UDEBUG_EVMASK(etype)) == 0) { |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
348,11 → 349,11 |
THREAD->udebug.syscall_args[5] = a6; |
/* |
* Make sure udebug.stop is true when going to sleep |
* Make sure udebug.go is false when going to sleep |
* in case we get woken up by DEBUG_END. (At which |
* point it must be back to the initial true value). |
*/ |
THREAD->udebug.stop = true; |
THREAD->udebug.go = false; |
THREAD->udebug.cur_event = etype; |
ipc_answer(&TASK->answerbox, call); |
387,9 → 388,9 |
/* Must only generate events when in debugging session */ |
if (THREAD->udebug.debug_active != true) { |
LOG("- debug_active: %s, udebug.stop: %s\n", |
LOG("- debug_active: %s, udebug.go: %s\n", |
THREAD->udebug.debug_active ? "yes(+)" : "no(-)", |
THREAD->udebug.stop ? "yes(-)" : "no(+)"); |
THREAD->udebug.go ? "yes(-)" : "no(+)"); |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
return; |
404,11 → 405,11 |
IPC_SET_ARG2(call->data, (unative_t)t); |
/* |
* Make sure udebug.stop is true when going to sleep |
* Make sure udebug.go is false when going to sleep |
* in case we get woken up by DEBUG_END. (At which |
* point it must be back to the initial true value). |
*/ |
THREAD->udebug.stop = true; |
THREAD->udebug.go = false; |
THREAD->udebug.cur_event = UDEBUG_EVENT_THREAD_B; |
ipc_answer(&TASK->answerbox, call); |
439,11 → 440,11 |
LOG("udebug_thread_e_event\n"); |
LOG("- check state\n"); |
/* Must only generate events when in debugging session */ |
/* Must only generate events when in debugging session. */ |
if (THREAD->udebug.debug_active != true) { |
/* printf("- debug_active: %s, udebug.stop: %s\n", |
/* printf("- debug_active: %s, udebug.go: %s\n", |
THREAD->udebug.debug_active ? "yes(+)" : "no(-)", |
THREAD->udebug.stop ? "yes(-)" : "no(+)");*/ |
THREAD->udebug.go ? "yes(-)" : "no(+)");*/ |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
return; |
456,10 → 457,10 |
IPC_SET_RETVAL(call->data, 0); |
IPC_SET_ARG1(call->data, UDEBUG_EVENT_THREAD_E); |
/* Prevent any further debug activity in thread */ |
/* Prevent any further debug activity in thread. */ |
THREAD->udebug.debug_active = false; |
THREAD->udebug.cur_event = 0; /* none */ |
THREAD->udebug.stop = true; /* set to initial value */ |
THREAD->udebug.go = false; /* set to initial value */ |
ipc_answer(&TASK->answerbox, call); |
466,8 → 467,8 |
mutex_unlock(&THREAD->udebug.lock); |
mutex_unlock(&TASK->udebug.lock); |
/* Leave int_lock enabled */ |
/* This event does not sleep - debugging has finished in this thread */ |
/* Leave int_lock enabled. */ |
/* This event does not sleep - debugging has finished in this thread. */ |
} |
/** |
512,19 → 513,19 |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
/* Only process userspace threads */ |
/* Only process userspace threads. */ |
if ((flags & THREAD_FLAG_USPACE) != 0) { |
/* Prevent any further debug activity in thread */ |
/* Prevent any further debug activity in thread. */ |
t->udebug.debug_active = false; |
t->udebug.cur_event = 0; /* none */ |
/* Still has go? */ |
if (t->udebug.stop == false) { |
/* Is the thread still go? */ |
if (t->udebug.go == true) { |
/* |
* Yes, so clear go. As debug_active == false, |
* this doesn't affect anything. |
*/ |
t->udebug.stop = true; |
t->udebug.go = false; |
/* Answer GO call */ |
LOG("answer GO call with EVENT_FINISHED\n"); |
/branches/sparc/kernel/generic/src/udebug/udebug_ops.c |
---|
57,7 → 57,7 |
* |
* Specifically, verifies that thread t exists, is a userspace thread, |
* and belongs to the current task (TASK). Verifies, that the thread |
* has (or hasn't) go according to having_go (typically false). |
* is (or is not) go according to being_go (typically false). |
* It also locks t->udebug.lock, making sure that t->udebug.debug_active |
* is true - that the thread is in a valid debugging session. |
* |
70,11 → 70,11 |
* the t->lock spinlock to the t->udebug.lock mutex. |
* |
* @param t Pointer, need not at all be valid. |
* @param having_go Required thread state. |
* @param being_go Required thread state. |
* |
* Returns EOK if all went well, or an error code otherwise. |
*/ |
static int _thread_op_begin(thread_t *t, bool having_go) |
static int _thread_op_begin(thread_t *t, bool being_go) |
{ |
task_id_t taskid; |
ipl_t ipl; |
98,7 → 98,7 |
spinlock_lock(&t->lock); |
spinlock_unlock(&threads_lock); |
/* Verify that 't' is a userspace thread */ |
/* Verify that 't' is a userspace thread. */ |
if ((t->flags & THREAD_FLAG_USPACE) == 0) { |
/* It's not, deny its existence */ |
spinlock_unlock(&t->lock); |
107,7 → 107,7 |
return ENOENT; |
} |
/* Verify debugging state */ |
/* Verify debugging state. */ |
if (t->udebug.debug_active != true) { |
/* Not in debugging session or undesired GO state */ |
spinlock_unlock(&t->lock); |
124,9 → 124,9 |
spinlock_unlock(&t->lock); |
interrupts_restore(ipl); |
/* Only mutex TASK->udebug.lock left */ |
/* Only mutex TASK->udebug.lock left. */ |
/* Now verify that the thread belongs to the current task */ |
/* Now verify that the thread belongs to the current task. */ |
if (t->task != TASK) { |
/* No such thread belonging this task*/ |
mutex_unlock(&TASK->udebug.lock); |
139,18 → 139,18 |
*/ |
mutex_lock(&t->udebug.lock); |
/* The big task mutex is no longer needed */ |
/* The big task mutex is no longer needed. */ |
mutex_unlock(&TASK->udebug.lock); |
if (!t->udebug.stop != having_go) { |
/* Not in debugging session or undesired GO state */ |
if (t->udebug.go != being_go) { |
/* Not in debugging session or undesired GO state. */ |
mutex_unlock(&t->udebug.lock); |
return EINVAL; |
} |
/* Only t->udebug.lock left */ |
/* Only t->udebug.lock left. */ |
return EOK; /* All went well */ |
return EOK; /* All went well. */ |
} |
/** End debugging operation on a thread. */ |
204,7 → 204,7 |
reply = 0; /* no reply */ |
} |
/* Set udebug.debug_active on all of the task's userspace threads */ |
/* Set udebug.debug_active on all of the task's userspace threads. */ |
for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { |
t = list_get_instance(cur, thread_t, th_link); |
273,7 → 273,7 |
/** Give thread GO. |
* |
* Upon recieving a go message, the thread is given GO. Having GO |
* Upon recieving a go message, the thread is given GO. Being GO |
* means the thread is allowed to execute userspace code (until |
* a debugging event or STOP occurs, at which point the thread loses GO. |
* |
284,7 → 284,7 |
{ |
int rc; |
/* On success, this will lock t->udebug.lock */ |
/* On success, this will lock t->udebug.lock. */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
return rc; |
291,11 → 291,11 |
} |
t->udebug.go_call = call; |
t->udebug.stop = false; |
t->udebug.go = true; |
t->udebug.cur_event = 0; /* none */ |
/* |
* Neither t's lock nor threads_lock may be held during wakeup |
* Neither t's lock nor threads_lock may be held during wakeup. |
*/ |
waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); |
328,21 → 328,21 |
return rc; |
} |
/* Take GO away from the thread */ |
t->udebug.stop = true; |
/* Take GO away from the thread. */ |
t->udebug.go = false; |
if (!t->udebug.stoppable) { |
/* Answer will be sent when the thread becomes stoppable */ |
if (t->udebug.stoppable != true) { |
/* Answer will be sent when the thread becomes stoppable. */ |
_thread_op_end(t); |
return 0; |
} |
/* |
* Answer GO call |
* Answer GO call. |
*/ |
LOG("udebug_stop - answering go call\n"); |
/* Make sure nobody takes this call away from us */ |
/* Make sure nobody takes this call away from us. */ |
call = t->udebug.go_call; |
t->udebug.go_call = NULL; |
422,7 → 422,7 |
flags = t->flags; |
spinlock_unlock(&t->lock); |
/* Not interested in kernel threads */ |
/* Not interested in kernel threads. */ |
if ((flags & THREAD_FLAG_USPACE) != 0) { |
/* Using thread struct pointer as identification hash */ |
tid = (unative_t) t; |
458,16 → 458,16 |
int rc; |
unative_t *arg_buffer; |
/* Prepare a buffer to hold the arguments */ |
/* Prepare a buffer to hold the arguments. */ |
arg_buffer = malloc(6 * sizeof(unative_t), 0); |
/* On success, this will lock t->udebug.lock */ |
/* On success, this will lock t->udebug.lock. */ |
rc = _thread_op_begin(t, false); |
if (rc != EOK) { |
return rc; |
} |
/* Additionally we need to verify that we are inside a syscall */ |
/* Additionally we need to verify that we are inside a syscall. */ |
if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && |
t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { |
_thread_op_end(t); |
474,7 → 474,7 |
return EINVAL; |
} |
/* Copy to a local buffer before releasing the lock */ |
/* Copy to a local buffer before releasing the lock. */ |
memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); |
_thread_op_end(t); |
/branches/sparc/kernel/arch/ppc32/src/mm/tlb.c |
---|
35,6 → 35,7 |
#include <mm/tlb.h> |
#include <arch/mm/tlb.h> |
#include <arch/interrupt.h> |
#include <interrupt.h> |
#include <mm/as.h> |
#include <arch.h> |
#include <print.h> |
116,6 → 117,10 |
s = get_symtab_entry(istate->lr); |
if (s) |
sym2 = s; |
fault_if_from_uspace(istate, |
"%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, |
istate->pc, symbol, sym2); |
panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, |
istate->pc, symbol, sym2); |
} |
/branches/sparc/uspace/lib/libblock/libblock.c |
---|
296,10 → 296,13 |
* |
* @param dev_handle Device handle of the block device. |
* @param boff Block offset. |
* @param flags If BLOCK_FLAGS_NOREAD is specified, block_get() |
* will not read the contents of the block from the |
* device. |
* |
* @return Block structure. |
*/ |
block_t *block_get(dev_handle_t dev_handle, bn_t boff) |
block_t *block_get(dev_handle_t dev_handle, bn_t boff, int flags) |
{ |
devcon_t *devcon; |
cache_t *cache; |
385,13 → 388,15 |
*/ |
abort(); /* TODO: block_write() */ |
} |
if (!(flags & BLOCK_FLAGS_NOREAD)) { |
/* |
* The block contains old or no data. We need to read the new |
* contents from the device. |
* The block contains old or no data. We need to read |
* the new contents from the device. |
*/ |
rc = block_read(dev_handle, &bufpos, &buflen, &pos, b->data, |
cache->block_size, cache->block_size); |
rc = block_read(dev_handle, &bufpos, &buflen, &pos, |
b->data, cache->block_size, cache->block_size); |
assert(rc == EOK); |
} |
futex_up(&b->lock); |
} |
/branches/sparc/uspace/lib/libblock/libblock.h |
---|
44,6 → 44,22 |
#include <libadt/hash_table.h> |
#include <libadt/list.h> |
/* |
* Flags that can be used with block_get(). |
*/ |
/** |
* This macro is a symbolic value for situations where no special flags are |
* needed. |
*/ |
#define BLOCK_FLAGS_NONE 0 |
/** |
* When the client of block_get() intends to overwrite the current contents of |
* the block, this flag is used to avoid the unnecessary read. |
*/ |
#define BLOCK_FLAGS_NOREAD 1 |
typedef unsigned bn_t; /**< Block number type. */ |
typedef struct block { |
77,7 → 93,7 |
extern int block_cache_init(dev_handle_t, size_t, unsigned); |
extern block_t *block_get(dev_handle_t, bn_t); |
extern block_t *block_get(dev_handle_t, bn_t, int flags); |
extern void block_put(block_t *); |
extern int block_read(int, off_t *, size_t *, off_t *, void *, size_t, size_t); |
/branches/sparc/uspace/srv/console/console.c |
---|
465,7 → 465,9 |
} |
continue; |
} |
keybuffer_pop(&conn->keybuffer, (int *) &arg1); |
int ch; |
keybuffer_pop(&conn->keybuffer, &ch); |
arg1 = ch; |
break; |
} |
ipc_answer_2(callid, EOK, arg1, arg2); |
/branches/sparc/uspace/srv/fs/fat/fat_fat.c |
---|
94,7 → 94,7 |
fsec = (clst * sizeof(fat_cluster_t)) / bps; |
fidx = clst % (bps / sizeof(fat_cluster_t)); |
/* read FAT1 */ |
b = block_get(dev_handle, rscnt + fsec); |
b = block_get(dev_handle, rscnt + fsec, BLOCK_FLAGS_NONE); |
clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]); |
assert(clst != FAT_CLST_BAD); |
block_put(b); |
114,12 → 114,13 |
* @param firstc First cluster used by the file. Can be zero if the file |
* is empty. |
* @param bn Block number. |
* @param flags Flags passed to libblock. |
* |
* @return Block structure holding the requested block. |
*/ |
block_t * |
_fat_block_get(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc, |
bn_t bn) |
bn_t bn, int flags) |
{ |
block_t *b; |
unsigned bps; |
143,7 → 144,7 |
if (firstc == FAT_CLST_ROOT) { |
/* root directory special case */ |
assert(bn < rds); |
b = block_get(dev_handle, rscnt + bs->fatcnt * sf + bn); |
b = block_get(dev_handle, rscnt + bs->fatcnt * sf + bn, flags); |
return b; |
} |
153,7 → 154,7 |
assert(clusters == max_clusters); |
b = block_get(dev_handle, ssa + (lastc - FAT_CLST_FIRST) * bs->spc + |
bn % bs->spc); |
bn % bs->spc, flags); |
return b; |
} |
183,7 → 184,9 |
/* zero out already allocated space */ |
for (o = nodep->size - 1; o < pos && o < boundary; |
o = ALIGN_DOWN(o + bps, bps)) { |
b = fat_block_get(bs, nodep, o / bps); |
int flags = (o % bps == 0) ? |
BLOCK_FLAGS_NOREAD : BLOCK_FLAGS_NONE; |
b = fat_block_get(bs, nodep, o / bps, flags); |
memset(b->data + o % bps, 0, bps - o % bps); |
b->dirty = true; /* need to sync node */ |
block_put(b); |
195,7 → 198,7 |
/* zero out the initial part of the new cluster chain */ |
for (o = boundary; o < pos; o += bps) { |
b = _fat_block_get(bs, nodep->idx->dev_handle, mcl, |
(o - boundary) / bps); |
(o - boundary) / bps, BLOCK_FLAGS_NOREAD); |
memset(b->data, 0, min(bps, pos - o)); |
b->dirty = true; /* need to sync node */ |
block_put(b); |
221,7 → 224,8 |
bps = uint16_t_le2host(bs->bps); |
rscnt = uint16_t_le2host(bs->rscnt); |
b = block_get(dev_handle, rscnt + (clst * sizeof(fat_cluster_t)) / bps); |
b = block_get(dev_handle, rscnt + (clst * sizeof(fat_cluster_t)) / bps, |
BLOCK_FLAGS_NONE); |
cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); |
value = uint16_t_le2host(*cp); |
block_put(b); |
253,7 → 257,7 |
assert(fatno < bs->fatcnt); |
b = block_get(dev_handle, rscnt + sf * fatno + |
(clst * sizeof(fat_cluster_t)) / bps); |
(clst * sizeof(fat_cluster_t)) / bps, BLOCK_FLAGS_NONE); |
cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); |
*cp = host2uint16_t_le(value); |
b->dirty = true; /* need to sync block */ |
323,7 → 327,7 |
*/ |
futex_down(&fat_alloc_lock); |
for (b = 0, cl = 0; b < sf; blk++) { |
blk = block_get(dev_handle, rscnt + b); |
blk = block_get(dev_handle, rscnt + b, BLOCK_FLAGS_NOREAD); |
for (c = 0; c < bps / sizeof(fat_cluster_t); c++, cl++) { |
fat_cluster_t *clst = (fat_cluster_t *)blk->data + c; |
if (uint16_t_le2host(*clst) == FAT_CLST_RES0) { |
/branches/sparc/uspace/srv/fs/fat/fat_fat.h |
---|
63,11 → 63,11 |
extern uint16_t fat_cluster_walk(struct fat_bs *, dev_handle_t, fat_cluster_t, |
fat_cluster_t *, uint16_t); |
#define fat_block_get(bs, np, bn) \ |
_fat_block_get((bs), (np)->idx->dev_handle, (np)->firstc, (bn)) |
#define fat_block_get(bs, np, bn, flags) \ |
_fat_block_get((bs), (np)->idx->dev_handle, (np)->firstc, (bn), (flags)) |
extern struct block *_fat_block_get(struct fat_bs *, dev_handle_t, |
fat_cluster_t, bn_t); |
fat_cluster_t, bn_t, int); |
extern void fat_append_clusters(struct fat_bs *, struct fat_node *, |
fat_cluster_t); |
/branches/sparc/uspace/srv/fs/fat/fat_ops.c |
---|
89,7 → 89,7 |
/* Read the block that contains the dentry of interest. */ |
b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, |
(node->idx->pdi * sizeof(fat_dentry_t)) / bps); |
(node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); |
181,7 → 181,7 |
/* Read the block that contains the dentry of interest. */ |
b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, |
(idxp->pdi * sizeof(fat_dentry_t)) / bps); |
(idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); |
assert(b); |
d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); |
282,7 → 282,7 |
dps = bps / sizeof(fat_dentry_t); |
blocks = parentp->size / bps; |
for (i = 0; i < blocks; i++) { |
b = fat_block_get(bs, parentp, i); |
b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); |
for (j = 0; j < dps; j++) { |
d = ((fat_dentry_t *)b->data) + j; |
switch (fat_classify_dentry(d)) { |
372,7 → 372,7 |
for (i = 0; i < blocks; i++) { |
fat_dentry_t *d; |
b = fat_block_get(bs, nodep, i); |
b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); |
for (j = 0; j < dps; j++) { |
d = ((fat_dentry_t *)b->data) + j; |
switch (fat_classify_dentry(d)) { |
574,7 → 574,8 |
} else { |
bytes = min(len, bps - pos % bps); |
bytes = min(bytes, nodep->size - pos); |
b = fat_block_get(bs, nodep, pos / bps); |
b = fat_block_get(bs, nodep, pos / bps, |
BLOCK_FLAGS_NONE); |
(void) ipc_data_read_finalize(callid, b->data + pos % bps, |
bytes); |
block_put(b); |
599,7 → 600,7 |
while (bnum < nodep->size / bps) { |
off_t o; |
b = fat_block_get(bs, nodep, bnum); |
b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); |
for (o = pos % (bps / sizeof(fat_dentry_t)); |
o < bps / sizeof(fat_dentry_t); |
o++, pos++) { |
647,6 → 648,7 |
unsigned spc; |
unsigned bpc; /* bytes per cluster */ |
off_t boundary; |
int flags = BLOCK_FLAGS_NONE; |
if (!nodep) { |
ipc_answer_0(rid, ENOENT); |
675,6 → 677,8 |
* value signalizing a smaller number of bytes written. |
*/ |
bytes = min(len, bps - pos % bps); |
if (bytes == bps) |
flags |= BLOCK_FLAGS_NOREAD; |
boundary = ROUND_UP(nodep->size, bpc); |
if (pos < boundary) { |
685,7 → 689,7 |
* next block size boundary. |
*/ |
fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); |
b = fat_block_get(bs, nodep, pos / bps); |
b = fat_block_get(bs, nodep, pos / bps, flags); |
(void) ipc_data_write_finalize(callid, b->data + pos % bps, |
bytes); |
b->dirty = true; /* need to sync block */ |
718,7 → 722,8 |
} |
/* zero fill any gaps */ |
fat_fill_gap(bs, nodep, mcl, pos); |
b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc); |
b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, |
flags); |
(void) ipc_data_write_finalize(callid, b->data + pos % bps, |
bytes); |
b->dirty = true; /* need to sync block */ |