Subversion Repositories HelenOS

Rev

Rev 3908 | Rev 3973 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3908 Rev 3940
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericddi
29
/** @addtogroup genericddi
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Device Driver Interface functions.
35
 * @brief   Device Driver Interface functions.
36
 *
36
 *
37
 * This file contains functions that comprise the Device Driver Interface.
37
 * This file contains functions that comprise the Device Driver Interface.
38
 * These are the functions for mapping physical memory and enabling I/O
38
 * These are the functions for mapping physical memory and enabling I/O
39
 * space to tasks.
39
 * space to tasks.
40
 */
40
 */
41
 
41
 
42
#include <ddi/ddi.h>
42
#include <ddi/ddi.h>
43
#include <ddi/ddi_arg.h>
43
#include <ddi/ddi_arg.h>
44
#include <proc/task.h>
44
#include <proc/task.h>
45
#include <security/cap.h>
45
#include <security/cap.h>
46
#include <mm/frame.h>
46
#include <mm/frame.h>
47
#include <mm/as.h>
47
#include <mm/as.h>
48
#include <synch/spinlock.h>
48
#include <synch/spinlock.h>
49
#include <syscall/copy.h>
49
#include <syscall/copy.h>
50
#include <adt/list.h>
50
#include <adt/list.h>
51
#include <arch.h>
51
#include <arch.h>
52
#include <align.h>
52
#include <align.h>
53
#include <errno.h>
53
#include <errno.h>
54
 
54
 
55
/** This lock protects the parea_btree. */
55
/** This lock protects the parea_btree. */
56
SPINLOCK_INITIALIZE(parea_lock);
56
SPINLOCK_INITIALIZE(parea_lock);
57
 
57
 
58
/** List with enabled physical memory areas. */
58
/** List with enabled physical memory areas. */
59
static LIST_INITIALIZE(parea_head);
59
static LIST_INITIALIZE(parea_head);
60
 
60
 
61
/** Physical memory area for devices. */
-
 
62
static parea_t dev_area;
-
 
63
 
-
 
64
/** Initialize DDI. */
61
/** Initialize DDI. */
65
void ddi_init(void)
62
void ddi_init(void)
66
{
63
{
67
    hw_area(&dev_area.pbase, &dev_area.frames);
-
 
68
    ddi_parea_register(&dev_area);
64
    hw_area();
69
}
65
}
70
 
66
 
71
/** Enable piece of physical memory for mapping by physmem_map().
67
/** Enable piece of physical memory for mapping by physmem_map().
72
 *
68
 *
73
 * @param parea Pointer to physical area structure.
69
 * @param parea Pointer to physical area structure.
74
 *
70
 *
75
 * @todo This function doesn't check for overlaps. It depends on the kernel to
71
 * @todo This function doesn't check for overlaps. It depends on the kernel to
76
 * create disjunct physical memory areas.
72
 * create disjunct physical memory areas.
77
 */
73
 */
78
void ddi_parea_register(parea_t *parea)
74
void ddi_parea_register(parea_t *parea)
79
{
75
{
80
    ipl_t ipl;
76
    ipl_t ipl;
81
   
77
   
82
    ipl = interrupts_disable();
78
    ipl = interrupts_disable();
83
    spinlock_lock(&parea_lock);
79
    spinlock_lock(&parea_lock);
84
   
80
   
85
    /*
81
    /*
86
     * TODO: we should really check for overlaps here.
82
     * TODO: we should really check for overlaps here.
87
     * However, we should be safe because the kernel is pretty sane.
83
     * However, we should be safe because the kernel is pretty sane.
88
     */
84
     */
89
    link_initialize(&parea->link);
85
    link_initialize(&parea->link);
90
    list_append(&parea->link, &parea_head);
86
    list_append(&parea->link, &parea_head);
91
   
87
   
92
    spinlock_unlock(&parea_lock);
88
    spinlock_unlock(&parea_lock);
93
    interrupts_restore(ipl);
89
    interrupts_restore(ipl);
94
}
90
}
95
 
91
 
96
/** Map piece of physical memory into virtual address space of current task.
92
/** Map piece of physical memory into virtual address space of current task.
97
 *
93
 *
98
 * @param pf Physical address of the starting frame.
94
 * @param pf Physical address of the starting frame.
99
 * @param vp Virtual address of the starting page.
95
 * @param vp Virtual address of the starting page.
100
 * @param pages Number of pages to map.
96
 * @param pages Number of pages to map.
101
 * @param flags Address space area flags for the mapping.
97
 * @param flags Address space area flags for the mapping.
102
 *
98
 *
103
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
99
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
104
 *  syscall, ENOENT if there is no task matching the specified ID or the
100
 *  syscall, ENOENT if there is no task matching the specified ID or the
105
 *  physical address space is not enabled for mapping and ENOMEM if there
101
 *  physical address space is not enabled for mapping and ENOMEM if there
106
 *  was a problem in creating address space area.
102
 *  was a problem in creating address space area.
107
 */
103
 */
108
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
104
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, pfn_t pages, int flags)
109
{
105
{
110
    ipl_t ipl;
106
    ipl_t ipl;
111
    cap_t caps;
107
    cap_t caps;
112
    mem_backend_data_t backend_data;
108
    mem_backend_data_t backend_data;
113
   
109
   
114
    backend_data.base = pf;
110
    backend_data.base = pf;
115
    backend_data.frames = pages;
111
    backend_data.frames = pages;
116
   
112
   
117
    /*
113
    /*
118
     * Make sure the caller is authorised to make this syscall.
114
     * Make sure the caller is authorised to make this syscall.
119
     */
115
     */
120
    caps = cap_get(TASK);
116
    caps = cap_get(TASK);
121
    if (!(caps & CAP_MEM_MANAGER))
117
    if (!(caps & CAP_MEM_MANAGER))
122
        return EPERM;
118
        return EPERM;
123
   
119
   
124
    ipl = interrupts_disable();
120
    ipl = interrupts_disable();
125
   
121
   
126
    /*
122
    /*
127
     * Check if the physical memory area is enabled for mapping.
123
     * Check if the physical memory area is enabled for mapping.
128
     */
124
     */
129
    spinlock_lock(&parea_lock);
125
    spinlock_lock(&parea_lock);
130
   
126
   
131
    bool fnd = false;
127
    bool fnd = false;
132
    link_t *cur;
128
    link_t *cur;
133
   
129
   
134
    for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
130
    for (cur = parea_head.next; cur != &parea_head; cur = cur->next) {
135
        parea_t *parea = list_get_instance(cur, parea_t, link);
131
        parea_t *parea = list_get_instance(cur, parea_t, link);
136
        if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
132
        if ((parea->pbase <= pf) && (ADDR2PFN(pf - parea->pbase) + pages <= parea->frames)) {
137
            fnd = true;
133
            fnd = true;
138
            break;
134
            break;
139
        }
135
        }
140
    }
136
    }
141
   
137
   
142
    spinlock_unlock(&parea_lock);
138
    spinlock_unlock(&parea_lock);
143
   
139
   
144
    if (!fnd) {
140
    if (!fnd) {
145
        /*
141
        /*
146
         * Physical memory area cannot be mapped.
142
         * Physical memory area cannot be mapped.
147
         */
143
         */
148
        interrupts_restore(ipl);
144
        interrupts_restore(ipl);
149
        return ENOENT;
145
        return ENOENT;
150
    }
146
    }
151
   
147
   
152
    spinlock_lock(&TASK->lock);
148
    spinlock_lock(&TASK->lock);
153
   
149
   
154
    if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
150
    if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
155
        &phys_backend, &backend_data)) {
151
        &phys_backend, &backend_data)) {
156
        /*
152
        /*
157
         * The address space area could not have been created.
153
         * The address space area could not have been created.
158
         * We report it using ENOMEM.
154
         * We report it using ENOMEM.
159
         */
155
         */
160
        spinlock_unlock(&TASK->lock);
156
        spinlock_unlock(&TASK->lock);
161
        interrupts_restore(ipl);
157
        interrupts_restore(ipl);
162
        return ENOMEM;
158
        return ENOMEM;
163
    }
159
    }
164
   
160
   
165
    /*
161
    /*
166
     * Mapping is created on-demand during page fault.
162
     * Mapping is created on-demand during page fault.
167
     */
163
     */
168
   
164
   
169
    spinlock_unlock(&TASK->lock);
165
    spinlock_unlock(&TASK->lock);
170
    interrupts_restore(ipl);
166
    interrupts_restore(ipl);
171
    return 0;
167
    return 0;
172
}
168
}
173
 
169
 
174
/** Enable range of I/O space for task.
170
/** Enable range of I/O space for task.
175
 *
171
 *
176
 * @param id Task ID of the destination task.
172
 * @param id Task ID of the destination task.
177
 * @param ioaddr Starting I/O address.
173
 * @param ioaddr Starting I/O address.
178
 * @param size Size of the enabled I/O space..
174
 * @param size Size of the enabled I/O space..
179
 *
175
 *
180
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
176
 * @return 0 on success, EPERM if the caller lacks capabilities to use this
181
 *  syscall, ENOENT if there is no task matching the specified ID.
177
 *  syscall, ENOENT if there is no task matching the specified ID.
182
 */
178
 */
183
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
179
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
184
{
180
{
185
    ipl_t ipl;
181
    ipl_t ipl;
186
    cap_t caps;
182
    cap_t caps;
187
    task_t *t;
183
    task_t *t;
188
    int rc;
184
    int rc;
189
   
185
   
190
    /*
186
    /*
191
     * Make sure the caller is authorised to make this syscall.
187
     * Make sure the caller is authorised to make this syscall.
192
     */
188
     */
193
    caps = cap_get(TASK);
189
    caps = cap_get(TASK);
194
    if (!(caps & CAP_IO_MANAGER))
190
    if (!(caps & CAP_IO_MANAGER))
195
        return EPERM;
191
        return EPERM;
196
   
192
   
197
    ipl = interrupts_disable();
193
    ipl = interrupts_disable();
198
    spinlock_lock(&tasks_lock);
194
    spinlock_lock(&tasks_lock);
199
   
195
   
200
    t = task_find_by_id(id);
196
    t = task_find_by_id(id);
201
   
197
   
202
    if ((!t) || (!context_check(CONTEXT, t->context))) {
198
    if ((!t) || (!context_check(CONTEXT, t->context))) {
203
        /*
199
        /*
204
         * There is no task with the specified ID
200
         * There is no task with the specified ID
205
         * or the task belongs to a different security
201
         * or the task belongs to a different security
206
         * context.
202
         * context.
207
         */
203
         */
208
        spinlock_unlock(&tasks_lock);
204
        spinlock_unlock(&tasks_lock);
209
        interrupts_restore(ipl);
205
        interrupts_restore(ipl);
210
        return ENOENT;
206
        return ENOENT;
211
    }
207
    }
212
 
208
 
213
    /* Lock the task and release the lock protecting tasks_btree. */
209
    /* Lock the task and release the lock protecting tasks_btree. */
214
    spinlock_lock(&t->lock);
210
    spinlock_lock(&t->lock);
215
    spinlock_unlock(&tasks_lock);
211
    spinlock_unlock(&tasks_lock);
216
 
212
 
217
    rc = ddi_iospace_enable_arch(t, ioaddr, size);
213
    rc = ddi_iospace_enable_arch(t, ioaddr, size);
218
   
214
   
219
    spinlock_unlock(&t->lock);
215
    spinlock_unlock(&t->lock);
220
    interrupts_restore(ipl);
216
    interrupts_restore(ipl);
221
    return rc;
217
    return rc;
222
}
218
}
223
 
219
 
224
/** Wrapper for SYS_PHYSMEM_MAP syscall.
220
/** Wrapper for SYS_PHYSMEM_MAP syscall.
225
 *
221
 *
226
 * @param phys_base Physical base address to map
222
 * @param phys_base Physical base address to map
227
 * @param virt_base Destination virtual address
223
 * @param virt_base Destination virtual address
228
 * @param pages Number of pages
224
 * @param pages Number of pages
229
 * @param flags Flags of newly mapped pages
225
 * @param flags Flags of newly mapped pages
230
 *
226
 *
231
 * @return 0 on success, otherwise it returns error code found in errno.h
227
 * @return 0 on success, otherwise it returns error code found in errno.h
232
 */
228
 */
233
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
229
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
234
    unative_t pages, unative_t flags)
230
    unative_t pages, unative_t flags)
235
{
231
{
236
    return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
232
    return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
237
        FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
233
        FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
238
        (pfn_t) pages, (int) flags);
234
        (pfn_t) pages, (int) flags);
239
}
235
}
240
 
236
 
241
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
237
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
242
 *
238
 *
243
 * @param uspace_io_arg User space address of DDI argument structure.
239
 * @param uspace_io_arg User space address of DDI argument structure.
244
 *
240
 *
245
 * @return 0 on success, otherwise it returns error code found in errno.h
241
 * @return 0 on success, otherwise it returns error code found in errno.h
246
 */
242
 */
247
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
243
unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)
248
{
244
{
249
    ddi_ioarg_t arg;
245
    ddi_ioarg_t arg;
250
    int rc;
246
    int rc;
251
   
247
   
252
    rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
248
    rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t));
253
    if (rc != 0)
249
    if (rc != 0)
254
        return (unative_t) rc;
250
        return (unative_t) rc;
255
       
251
       
256
    return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
252
    return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
257
        (uintptr_t) arg.ioaddr, (size_t) arg.size);
253
        (uintptr_t) arg.ioaddr, (size_t) arg.size);
258
}
254
}
259
 
255
 
260
/** Disable or enable preemption.
256
/** Disable or enable preemption.
261
 *
257
 *
262
 * @param enable If non-zero, the preemption counter will be decremented,
258
 * @param enable If non-zero, the preemption counter will be decremented,
263
 *  leading to potential enabling of preemption. Otherwise the preemption
259
 *  leading to potential enabling of preemption. Otherwise the preemption
264
 *  counter will be incremented, preventing preemption from occurring.
260
 *  counter will be incremented, preventing preemption from occurring.
265
 *
261
 *
266
 * @return Zero on success or EPERM if callers capabilities are not sufficient.
262
 * @return Zero on success or EPERM if callers capabilities are not sufficient.
267
 */
263
 */
268
unative_t sys_preempt_control(int enable)
264
unative_t sys_preempt_control(int enable)
269
{
265
{
270
    if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
266
    if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)
271
        return EPERM;
267
        return EPERM;
272
    if (enable)
268
    if (enable)
273
        preemption_enable();
269
        preemption_enable();
274
    else
270
    else
275
        preemption_disable();
271
        preemption_disable();
276
    return 0;
272
    return 0;
277
}
273
}
278
 
274
 
279
/** @}
275
/** @}
280
 */
276
 */
281
 
277