Rev 2071 | Rev 2116 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2071 | Rev 2107 | ||
---|---|---|---|
Line 128... | Line 128... | ||
128 | spinlock_lock(&parea_lock); |
128 | spinlock_lock(&parea_lock); |
129 | parea_t *parea; |
129 | parea_t *parea; |
130 | btree_node_t *nodep; |
130 | btree_node_t *nodep; |
131 | parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep); |
131 | parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep); |
132 | if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) && |
132 | if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) && |
133 | !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) && |
133 | !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) && |
134 | parea->cacheable)) { |
134 | parea->cacheable)) { |
135 | /* |
135 | /* |
136 | * This physical memory area cannot be mapped. |
136 | * This physical memory area cannot be mapped. |
137 | */ |
137 | */ |
138 | spinlock_unlock(&parea_lock); |
138 | spinlock_unlock(&parea_lock); |
139 | interrupts_restore(ipl); |
139 | interrupts_restore(ipl); |
Line 232... | Line 232... | ||
232 | * @param pages Number of pages |
232 | * @param pages Number of pages |
233 | * @param flags Flags of newly mapped pages |
233 | * @param flags Flags of newly mapped pages |
234 | * |
234 | * |
235 | * @return 0 on success, otherwise it returns error code found in errno.h |
235 | * @return 0 on success, otherwise it returns error code found in errno.h |
236 | */ |
236 | */ |
237 | unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t |
237 | unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, |
238 | pages, unative_t flags) |
238 | unative_t pages, unative_t flags) |
239 | { |
239 | { |
240 | return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, |
240 | return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, |
241 | FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), |
241 | FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), |
242 | (count_t) pages, (int) flags); |
242 | (count_t) pages, (int) flags); |
243 | } |
243 | } |
244 | 244 | ||
245 | /** Wrapper for SYS_ENABLE_IOSPACE syscall. |
245 | /** Wrapper for SYS_ENABLE_IOSPACE syscall. |
246 | * |
246 | * |
247 | * @param uspace_io_arg User space address of DDI argument structure. |
247 | * @param uspace_io_arg User space address of DDI argument structure. |
Line 256... | Line 256... | ||
256 | rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
256 | rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); |
257 | if (rc != 0) |
257 | if (rc != 0) |
258 | return (unative_t) rc; |
258 | return (unative_t) rc; |
259 | 259 | ||
260 | return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, |
260 | return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, |
261 | (uintptr_t) arg.ioaddr, (size_t) arg.size); |
261 | (uintptr_t) arg.ioaddr, (size_t) arg.size); |
262 | } |
262 | } |
263 | 263 | ||
264 | /** Disable or enable preemption. |
264 | /** Disable or enable preemption. |
265 | * |
265 | * |
266 | * @param enable If non-zero, the preemption counter will be decremented, |
266 | * @param enable If non-zero, the preemption counter will be decremented, |
Line 269... | Line 269... | ||
269 | * |
269 | * |
270 | * @return Zero on success or EPERM if callers capabilities are not sufficient. |
270 | * @return Zero on success or EPERM if callers capabilities are not sufficient. |
271 | */ |
271 | */ |
272 | unative_t sys_preempt_control(int enable) |
272 | unative_t sys_preempt_control(int enable) |
273 | { |
273 | { |
274 | if (! cap_get(TASK) & CAP_PREEMPT_CONTROL) |
274 | if (!cap_get(TASK) & CAP_PREEMPT_CONTROL) |
275 | return EPERM; |
275 | return EPERM; |
276 | if (enable) |
276 | if (enable) |
277 | preemption_enable(); |
277 | preemption_enable(); |
278 | else |
278 | else |
279 | preemption_disable(); |
279 | preemption_disable(); |