//kernel/trunk/generic/src/ddi/ddi.c |
---|
135,12 → 135,6 |
return ENOENT; |
} |
/* |
* TODO: We are currently lacking support for task destroying. |
* Once it is added to the kernel, we must take care to |
* synchronize in a way that prevents race conditions here. |
*/ |
/* Lock the task and release the lock protecting tasks_btree. */ |
spinlock_lock(&t->lock); |
spinlock_unlock(&tasks_lock); |
//kernel/trunk/generic/src/proc/task.c |
---|
140,10 → 140,6 |
*/ |
void task_destroy(task_t *t) |
{ |
spinlock_lock(&tasks_lock); |
btree_remove(&tasks_btree, t->taskid, NULL); |
spinlock_unlock(&tasks_lock); |
task_destroy_arch(t); |
btree_destroy(&t->futexes); |
274,11 → 270,12 |
interrupts_restore(ipl); |
return ENOENT; |
} |
spinlock_lock(&ta->lock); |
ta->refcount++; |
spinlock_unlock(&ta->lock); |
btree_remove(&tasks_btree, ta->taskid, NULL); |
spinlock_unlock(&tasks_lock); |
t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp"); |
//kernel/trunk/generic/src/security/cap.c |
---|
112,10 → 112,15 |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
spinlock_unlock(&tasks_lock); |
spinlock_lock(&t->lock); |
cap_set(t, cap_get(t) | caps); |
spinlock_unlock(&t->lock); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return 0; |
} |
149,7 → 154,6 |
interrupts_restore(ipl); |
return (__native) ENOENT; |
} |
spinlock_unlock(&tasks_lock); |
/* |
* Revoking capabilities is different from granting them in that |
157,12 → 161,17 |
* doesn't have CAP_CAP. |
*/ |
if (!(cap_get(TASK) & CAP_CAP) || !(t == TASK)) { |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return (__native) EPERM; |
} |
spinlock_lock(&t->lock); |
cap_set(t, cap_get(t) & ~caps); |
spinlock_unlock(&t->lock); |
cap_set(t, cap_get(t) & ~caps); |
spinlock_unlock(&tasks_lock); |
interrupts_restore(ipl); |
return 0; |
} |