/kernel/trunk/generic/src/proc/task.c |
---|
55,19 → 55,8 |
#define LOADED_PROG_STACK_PAGES_NO 1 |
#endif |
/** Spinlock protecting the tasks_btree B+tree. */ |
SPINLOCK_INITIALIZE(tasks_lock); |
/** B+tree of active tasks. |
* |
* The task is guaranteed to exist after it was found in the tasks_btree as long as: |
* @li the tasks_lock is held, |
* @li the task's lock is held when task's lock is acquired before releasing tasks_lock or |
* @li the task's refcount is grater than 0 |
* |
*/ |
btree_t tasks_btree; |
static task_id_t task_counter = 0; |
static void ktaskclnp(void *arg); |
250,6 → 239,11 |
* The tasks_lock must be already held by the caller of this function |
* and interrupts must be disabled. |
* |
* The task is guaranteed to exist after it was found in the tasks_btree as long as: |
* @li the tasks_lock is held, |
* @li the task's lock is held when task's lock is acquired before releasing tasks_lock or |
* @li the task's refcount is grater than 0 |
* |
* @param id Task ID. |
* |
* @return Task structure address or NULL if there is no such task ID. |
/kernel/trunk/generic/src/proc/thread.c |
---|
75,16 → 75,10 |
"Undead" |
}; |
/** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ |
/** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
SPINLOCK_INITIALIZE(threads_lock); |
btree_t threads_btree; /**< B+tree of all threads. */ |
/** B+tree of all threads. |
* |
* When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long |
* as the threads_lock is held. |
*/ |
btree_t threads_btree; |
SPINLOCK_INITIALIZE(tidlock); |
__u32 last_tid = 0; |
558,6 → 552,9 |
* Note that threads_lock must be already held and |
* interrupts must be already disabled. |
* |
* When a thread is found in threads_btree, it is guaranteed to exist as long |
* as the threads_lock is held. |
* |
* @param t Pointer to thread. |
* |
* @return True if thread t is known to the system, false otherwise. |
/kernel/trunk/genarch/src/mm/asid.c |
---|
124,8 → 124,8 |
* Get the system rid of the stolen ASID. |
*/ |
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0); |
tlb_shootdown_finalize(); |
tlb_invalidate_asid(asid); |
tlb_shootdown_finalize(); |
} else { |
/* |
135,13 → 135,6 |
asid = asid_find_free(); |
asids_allocated++; |
/* |
* Purge the allocated rid from TLBs. |
*/ |
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0); |
tlb_invalidate_asid(asid); |
tlb_shootdown_finalize(); |
} |
spinlock_unlock(&asidlock); |