Subversion Repositories HelenOS

Rev

Rev 4377 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4377 Rev 4692
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericddi
29
/** @addtogroup genericddi
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief Device Driver Interface functions.
35
 * @brief Device Driver Interface functions.
36
 *
36
 *
37
 * This file contains functions that comprise the Device Driver Interface.
37
 * This file contains functions that comprise the Device Driver Interface.
38
 * These are the functions for mapping physical memory and enabling I/O
38
 * These are the functions for mapping physical memory and enabling I/O
39
 * space to tasks.
39
 * space to tasks.
40
 */
40
 */
41
 
41
 
42
#include <ddi/ddi.h>
42
#include <ddi/ddi.h>
43
#include <ddi/ddi_arg.h>
43
#include <ddi/ddi_arg.h>
44
#include <proc/task.h>
44
#include <proc/task.h>
45
#include <security/cap.h>
45
#include <security/cap.h>
46
#include <mm/frame.h>
46
#include <mm/frame.h>
47
#include <mm/as.h>
47
#include <mm/as.h>
48
#include <synch/spinlock.h>
48
#include <synch/spinlock.h>
49
#include <syscall/copy.h>
49
#include <syscall/copy.h>
50
#include <adt/btree.h>
50
#include <adt/btree.h>
51
#include <arch.h>
51
#include <arch.h>
52
#include <align.h>
52
#include <align.h>
53
#include <errno.h>
53
#include <errno.h>
54
 
54
 
55
/** This lock protects the parea_btree. */
55
/** This lock protects the parea_btree. */
56
SPINLOCK_INITIALIZE(parea_lock);
56
SPINLOCK_INITIALIZE(parea_lock);
57
 
57
 
58
/** B+tree with enabled physical memory areas. */
58
/** B+tree with enabled physical memory areas. */
59
static btree_t parea_btree;
59
static btree_t parea_btree;
60
 
60
 
61
/** Initialize DDI. */
61
/** Initialize DDI. */
62
void ddi_init(void)
62
void ddi_init(void)
63
{
63
{
64
    btree_create(&parea_btree);
64
    btree_create(&parea_btree);
65
}
65
}
66
 
66
 
67
/** Enable piece of physical memory for mapping by physmem_map().
67
/** Enable piece of physical memory for mapping by physmem_map().
68
 *
68
 *
69
 * @param parea Pointer to physical area structure.
69
 * @param parea Pointer to physical area structure.
70
 *
70
 *
71
 */
71
 */
72
void ddi_parea_register(parea_t *parea)
72
void ddi_parea_register(parea_t *parea)
73
{
73
{
74
    ipl_t ipl = interrupts_disable();
74
    ipl_t ipl = interrupts_disable();
75
    spinlock_lock(&parea_lock);
75
    spinlock_lock(&parea_lock);
76
   
76
   
77
    /*
77
    /*
78
     * We don't check for overlaps here as the kernel is pretty sane.
78
     * We don't check for overlaps here as the kernel is pretty sane.
79
     */
79
     */
80
    btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
80
    btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
81
   
81
   
82
    spinlock_unlock(&parea_lock);
82
    spinlock_unlock(&parea_lock);
83
    interrupts_restore(ipl);
83
    interrupts_restore(ipl);
84
}
84
}
85
 
85
 
86
/** Map piece of physical memory into virtual address space of current task.
86
/** Map piece of physical memory into virtual address space of current task.
87
 *
87
 *
88
 * @param pf    Physical address of the starting frame.
88
 * @param pf    Physical address of the starting frame.
89
 * @param vp    Virtual address of the starting page.
89
 * @param vp    Virtual address of the starting page.
90
 * @param pages Number of pages to map.
90
 * @param pages Number of pages to map.
91
 * @param flags Address space area flags for the mapping.
91
 * @param flags Address space area flags for the mapping.
92
 *
92
 *
93
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
93
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
94
 *         syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
94
 *         syscall, EBADMEM if pf or vf is not page aligned, ENOENT if there
95
 *         is no task matching the specified ID or the physical address space
95
 *         is no task matching the specified ID or the physical address space
96
 *         is not enabled for mapping and ENOMEM if there was a problem in
96
 *         is not enabled for mapping and ENOMEM if there was a problem in
97
 *         creating address space area.
97
 *         creating address space area.
98
 *
98
 *
99
 */
99
 */
100
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
100
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags)
101
{
101
{
102
    ASSERT(TASK);
102
    ASSERT(TASK);
103
    ASSERT((pf % FRAME_SIZE) == 0);
103
    ASSERT((pf % FRAME_SIZE) == 0);
104
    ASSERT((vp % PAGE_SIZE) == 0);
104
    ASSERT((vp % PAGE_SIZE) == 0);
105
   
105
   
106
    /*
106
    /*
107
     * Make sure the caller is authorised to make this syscall.
107
     * Make sure the caller is authorised to make this syscall.
108
     */
108
     */
109
    cap_t caps = cap_get(TASK);
109
    cap_t caps = cap_get(TASK);
110
    if (!(caps & CAP_MEM_MANAGER))
110
    if (!(caps & CAP_MEM_MANAGER))
111
        return EPERM;
111
        return EPERM;
112
   
112
   
113
    mem_backend_data_t backend_data;
113
    mem_backend_data_t backend_data;
114
    backend_data.base = pf;
114
    backend_data.base = pf;
115
    backend_data.frames = pages;
115
    backend_data.frames = pages;
116
   
116
   
117
    ipl_t ipl = interrupts_disable();
117
    ipl_t ipl = interrupts_disable();
118
   
118
   
119
    /* Find the zone of the physical memory */
119
    /* Find the zone of the physical memory */
120
    spinlock_lock(&zones.lock);
120
    spinlock_lock(&zones.lock);
121
    count_t znum = find_zone(ADDR2PFN(pf), pages, 0);
121
    size_t znum = find_zone(ADDR2PFN(pf), pages, 0);
122
   
122
   
123
    if (znum == (count_t) -1) {
123
    if (znum == (size_t) -1) {
124
        /* Frames not found in any zones
124
        /* Frames not found in any zones
125
         * -> assume it is hardware device and allow mapping
125
         * -> assume it is hardware device and allow mapping
126
         */
126
         */
127
        spinlock_unlock(&zones.lock);
127
        spinlock_unlock(&zones.lock);
128
        goto map;
128
        goto map;
129
    }
129
    }
130
   
130
   
131
    if (zones.info[znum].flags & ZONE_FIRMWARE) {
131
    if (zones.info[znum].flags & ZONE_FIRMWARE) {
132
        /* Frames are part of firmware */
132
        /* Frames are part of firmware */
133
        spinlock_unlock(&zones.lock);
133
        spinlock_unlock(&zones.lock);
134
        goto map;
134
        goto map;
135
    }
135
    }
136
   
136
   
137
    if (zone_flags_available(zones.info[znum].flags)) {
137
    if (zone_flags_available(zones.info[znum].flags)) {
138
        /* Frames are part of physical memory, check if the memory
138
        /* Frames are part of physical memory, check if the memory
139
         * region is enabled for mapping.
139
         * region is enabled for mapping.
140
         */
140
         */
141
        spinlock_unlock(&zones.lock);
141
        spinlock_unlock(&zones.lock);
142
       
142
       
143
        spinlock_lock(&parea_lock);
143
        spinlock_lock(&parea_lock);
144
        btree_node_t *nodep;
144
        btree_node_t *nodep;
145
        parea_t *parea = (parea_t *) btree_search(&parea_btree,
145
        parea_t *parea = (parea_t *) btree_search(&parea_btree,
146
            (btree_key_t) pf, &nodep);
146
            (btree_key_t) pf, &nodep);
147
       
147
       
148
        if ((!parea) || (parea->frames < pages))
148
        if ((!parea) || (parea->frames < pages))
149
            goto err;
149
            goto err;
150
       
150
       
151
        spinlock_unlock(&parea_lock);
151
        spinlock_unlock(&parea_lock);
152
        goto map;
152
        goto map;
153
    }
153
    }
154
   
154
   
155
err:
155
err:
156
    spinlock_unlock(&zones.lock);
156
    spinlock_unlock(&zones.lock);
157
    interrupts_restore(ipl);
157
    interrupts_restore(ipl);
158
    return ENOENT;
158
    return ENOENT;
159
   
159
   
160
map:
160
map:
161
    spinlock_lock(&TASK->lock);
161
    spinlock_lock(&TASK->lock);
162
   
162
   
163
    if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
163
    if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
164
        AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
164
        AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
165
        /*
165
        /*
166
         * The address space area could not have been created.
166
         * The address space area could not have been created.
167
         * We report it using ENOMEM.
167
         * We report it using ENOMEM.
168
         */
168
         */
169
        spinlock_unlock(&TASK->lock);
169
        spinlock_unlock(&TASK->lock);
170
        interrupts_restore(ipl);
170
        interrupts_restore(ipl);
171
        return ENOMEM;
171
        return ENOMEM;
172
    }
172
    }
173
   
173
   
174
    /*
174
    /*
175
     * Mapping is created on-demand during page fault.
175
     * Mapping is created on-demand during page fault.
176
     */
176
     */
177
   
177
   
178
    spinlock_unlock(&TASK->lock);
178
    spinlock_unlock(&TASK->lock);
179
    interrupts_restore(ipl);
179
    interrupts_restore(ipl);
180
    return 0;
180
    return 0;
181
}
181
}
182
 
182
 
183
/** Enable range of I/O space for task.
183
/** Enable range of I/O space for task.
184
 *
184
 *
185
 * @param id Task ID of the destination task.
185
 * @param id Task ID of the destination task.
186
 * @param ioaddr Starting I/O address.
186
 * @param ioaddr Starting I/O address.
187
 * @param size Size of the enabled I/O space..
187
 * @param size Size of the enabled I/O space..
188
 *
188
 *
189
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
189
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
190
 *           syscall, ENOENT if there is no task matching the specified ID.
190
 *           syscall, ENOENT if there is no task matching the specified ID.
191
 *
191
 *
192
 */
192
 */
193
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
193
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
194
{
194
{
195
    /*
195
    /*
196
     * Make sure the caller is authorised to make this syscall.
196
     * Make sure the caller is authorised to make this syscall.
197
     */
197
     */
198
    cap_t caps = cap_get(TASK);
198
    cap_t caps = cap_get(TASK);
199
    if (!(caps & CAP_IO_MANAGER))
199
    if (!(caps & CAP_IO_MANAGER))
200
        return EPERM;
200
        return EPERM;
201
   
201
   
202
    ipl_t ipl = interrupts_disable();
202
    ipl_t ipl = interrupts_disable();
203
    spinlock_lock(&tasks_lock);
203
    spinlock_lock(&tasks_lock);
204
   
204
   
205
    task_t *task = task_find_by_id(id);
205
    task_t *task = task_find_by_id(id);
206
   
206
   
207
    if ((!task) || (!context_check(CONTEXT, task->context))) {
207
    if ((!task) || (!context_check(CONTEXT, task->context))) {
208
        /*
208
        /*
209
         * There is no task with the specified ID
209
         * There is no task with the specified ID
210
         * or the task belongs to a different security
210
         * or the task belongs to a different security
211
         * context.
211
         * context.
212
         */
212
         */
213
        spinlock_unlock(&tasks_lock);
213
        spinlock_unlock(&tasks_lock);
214
        interrupts_restore(ipl);
214
        interrupts_restore(ipl);
215
        return ENOENT;
215
        return ENOENT;
216
    }
216
    }
217
   
217
   
218
    /* Lock the task and release the lock protecting tasks_btree. */
218
    /* Lock the task and release the lock protecting tasks_btree. */
219
    spinlock_lock(&task->lock);
219
    spinlock_lock(&task->lock);
220
    spinlock_unlock(&tasks_lock);
220
    spinlock_unlock(&tasks_lock);
221
   
221
   
222
    int rc = ddi_iospace_enable_arch(task, ioaddr, size);
222
    int rc = ddi_iospace_enable_arch(task, ioaddr, size);
223
   
223
   
224
    spinlock_unlock(&task->lock);
224
    spinlock_unlock(&task->lock);
225
    interrupts_restore(ipl);
225
    interrupts_restore(ipl);
226
   
226
   
227
    return rc;
227
    return rc;
228
}
228
}
229
 
229
 
230
/** Wrapper for SYS_PHYSMEM_MAP syscall.
230
/** Wrapper for SYS_PHYSMEM_MAP syscall.
231
 *
231
 *
232
 * @param phys_base Physical base address to map
232
 * @param phys_base Physical base address to map
233
 * @param virt_base Destination virtual address
233
 * @param virt_base Destination virtual address
234
 * @param pages Number of pages
234
 * @param pages Number of pages
235
 * @param flags Flags of newly mapped pages
235
 * @param flags Flags of newly mapped pages
236
 *
236
 *
237
 * @return 0 on success, otherwise it returns error code found in errno.h
237
 * @return 0 on success, otherwise it returns error code found in errno.h
238
 *
238
 *
239
 */
239
 */
240
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
240
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
241
    unative_t pages, unative_t flags)
241
    unative_t pages, unative_t flags)
242
{
242
{
243
    return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
243
    return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
244
        FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
244
        FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
245
        (count_t) pages, (int) flags);
245
        (size_t) pages, (int) flags);
246
}
246
}
247
 
247
 
248
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
248
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
249
 *
249
 *
250
 * @param uspace_io_arg User space address of DDI argument structure.
250
 * @param uspace_io_arg User space address of DDI argument structure.
251
 *
251
 *
252
 * @return 0 on success, otherwise it returns error code found in errno.h
252
 * @return 0 on success, otherwise it returns error code found in errno.h
253
 *
253
 *
254
 */
254
 */
255
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
255
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
256
{
256
{
257
    ddi_ioarg_t arg;
257
    ddi_ioarg_t arg;
258
    int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
258
    int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
259
    if (rc != 0)
259
    if (rc != 0)
260
        return (unative_t) rc;
260
        return (unative_t) rc;
261
   
261
   
262
    return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
262
    return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
263
        (uintptr_t) arg.ioaddr, (size_t) arg.size);
263
        (uintptr_t) arg.ioaddr, (size_t) arg.size);
264
}
264
}
265
 
265
 
266
/** Disable or enable preemption.
266
/** Disable or enable preemption.
267
 *
267
 *
268
 * @param enable If non-zero, the preemption counter will be decremented,
268
 * @param enable If non-zero, the preemption counter will be decremented,
269
 *               leading to potential enabling of preemption. Otherwise
269
 *               leading to potential enabling of preemption. Otherwise
270
 *               the preemption counter will be incremented, preventing
270
 *               the preemption counter will be incremented, preventing
271
 *               preemption from occurring.
271
 *               preemption from occurring.
272
 *
272
 *
273
 * @return Zero on success or EPERM if callers capabilities are not sufficient.
273
 * @return Zero on success or EPERM if callers capabilities are not sufficient.
274
 *
274
 *
275
 */
275
 */
276
unative_t sys_preempt_control(int enable)
276
unative_t sys_preempt_control(int enable)
277
{
277
{
278
    if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
278
    if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
279
        return EPERM;
279
        return EPERM;
280
   
280
   
281
    if (enable)
281
    if (enable)
282
        preemption_enable();
282
        preemption_enable();
283
    else
283
    else
284
        preemption_disable();
284
        preemption_disable();
285
   
285
   
286
    return 0;
286
    return 0;
287
}
287
}
288
 
288
 
289
/** @}
289
/** @}
290
 */
290
 */
291
 
291