Subversion Repositories HelenOS-historic

Rev

Rev 687 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 687 Rev 689
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <mm/vm.h>
29
#include <mm/vm.h>
30
#include <mm/page.h>
30
#include <mm/page.h>
31
#include <mm/frame.h>
31
#include <mm/frame.h>
32
#include <mm/tlb.h>
32
#include <mm/tlb.h>
33
#include <mm/heap.h>
33
#include <mm/heap.h>
34
#include <arch/mm/page.h>
34
#include <arch/mm/page.h>
35
#include <genarch/mm/page_pt.h>
35
#include <genarch/mm/page_pt.h>
36
#include <arch/mm/asid.h>
36
#include <arch/mm/asid.h>
37
#include <arch/mm/vm.h>
37
#include <arch/mm/vm.h>
38
#include <arch/types.h>
38
#include <arch/types.h>
39
#include <typedefs.h>
39
#include <typedefs.h>
40
#include <synch/spinlock.h>
40
#include <synch/spinlock.h>
41
#include <config.h>
41
#include <config.h>
42
#include <list.h>
42
#include <list.h>
43
#include <panic.h>
43
#include <panic.h>
44
#include <arch/asm.h>
44
#include <arch/asm.h>
45
#include <debug.h>
45
#include <debug.h>
46
#include <memstr.h>
46
#include <memstr.h>
47
#include <arch.h>
47
#include <arch.h>
48
 
48
 
49
#define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
49
#define KAS_START_INDEX     PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
50
#define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
50
#define KAS_END_INDEX       PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
51
#define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
51
#define KAS_INDICES     (1+(KAS_END_INDEX-KAS_START_INDEX))
52
 
52
 
53
vm_t *vm_create(pte_t *ptl0)
53
vm_t *vm_create(pte_t *ptl0)
54
{
54
{
55
    vm_t *m;
55
    vm_t *m;
56
 
56
 
57
    m = (vm_t *) malloc(sizeof(vm_t));
57
    m = (vm_t *) malloc(sizeof(vm_t));
58
    if (m) {
58
    if (m) {
59
        spinlock_initialize(&m->lock, "vm_lock");
59
        spinlock_initialize(&m->lock, "vm_lock");
60
        list_initialize(&m->vm_area_head);
60
        list_initialize(&m->vm_area_head);
61
 
61
 
62
        m->asid = asid_get();
62
        m->asid = asid_get();
63
 
63
 
64
        /*
64
        /*
65
         * Each vm_t is supposed to have its own page table.
65
         * Each vm_t is supposed to have its own page table.
66
         * It is either passed one or it has to allocate and set one up.
66
         * It is either passed one or it has to allocate and set one up.
67
         */
67
         */
68
        m->ptl0 = ptl0;
68
        m->ptl0 = ptl0;
69
        if (!m->ptl0) {
69
        if (!m->ptl0) {
70
            pte_t *src_ptl0, *dst_ptl0;
70
            pte_t *src_ptl0, *dst_ptl0;
71
       
71
       
72
            src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
72
            src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
73
            dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME);
73
            dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL);
74
 
74
 
75
//          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
75
//          memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
76
//          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
76
//          memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
77
           
77
           
78
            memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
78
            memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
79
 
79
 
80
            m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
80
            m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
81
        }
81
        }
82
    }
82
    }
83
 
83
 
84
    return m;
84
    return m;
85
}
85
}
86
 
86
 
87
void vm_destroy(vm_t *m)
87
void vm_destroy(vm_t *m)
88
{
88
{
89
}
89
}
90
 
90
 
91
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
91
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
92
{
92
{
93
    ipl_t ipl;
93
    ipl_t ipl;
94
    vm_area_t *a;
94
    vm_area_t *a;
95
   
95
   
96
    if (addr % PAGE_SIZE)
96
    if (addr % PAGE_SIZE)
97
        panic("addr not aligned to a page boundary");
97
        panic("addr not aligned to a page boundary");
98
   
98
   
99
    ipl = interrupts_disable();
99
    ipl = interrupts_disable();
100
    spinlock_lock(&m->lock);
100
    spinlock_lock(&m->lock);
101
   
101
   
102
    /*
102
    /*
103
     * TODO: test vm_area which is to be created doesn't overlap with an existing one.
103
     * TODO: test vm_area which is to be created doesn't overlap with an existing one.
104
     */
104
     */
105
   
105
   
106
    a = (vm_area_t *) malloc(sizeof(vm_area_t));
106
    a = (vm_area_t *) malloc(sizeof(vm_area_t));
107
    if (a) {
107
    if (a) {
108
        int i;
108
        int i;
109
   
109
   
110
        a->mapping = (__address *) malloc(size * sizeof(__address));
110
        a->mapping = (__address *) malloc(size * sizeof(__address));
111
        if (!a->mapping) {
111
        if (!a->mapping) {
112
            free(a);
112
            free(a);
113
            spinlock_unlock(&m->lock);
113
            spinlock_unlock(&m->lock);
114
            interrupts_restore(ipl);
114
            interrupts_restore(ipl);
115
            return NULL;
115
            return NULL;
116
        }
116
        }
117
       
117
       
118
        for (i=0; i<size; i++)
118
        for (i=0; i<size; i++)
119
            a->mapping[i] = frame_alloc(0, ONE_FRAME);
119
            a->mapping[i] = frame_alloc(0, ONE_FRAME, NULL);
120
       
120
       
121
        spinlock_initialize(&a->lock, "vm_area_lock");
121
        spinlock_initialize(&a->lock, "vm_area_lock");
122
           
122
           
123
        link_initialize(&a->link);         
123
        link_initialize(&a->link);         
124
        a->type = type;
124
        a->type = type;
125
        a->size = size;
125
        a->size = size;
126
        a->address = addr;
126
        a->address = addr;
127
       
127
       
128
        list_append(&a->link, &m->vm_area_head);
128
        list_append(&a->link, &m->vm_area_head);
129
 
129
 
130
    }
130
    }
131
 
131
 
132
    spinlock_unlock(&m->lock);
132
    spinlock_unlock(&m->lock);
133
    interrupts_restore(ipl);
133
    interrupts_restore(ipl);
134
   
134
   
135
    return a;
135
    return a;
136
}
136
}
137
 
137
 
138
void vm_area_destroy(vm_area_t *a)
138
void vm_area_destroy(vm_area_t *a)
139
{
139
{
140
}
140
}
141
 
141
 
142
void vm_area_map(vm_area_t *a, vm_t *m)
142
void vm_area_map(vm_area_t *a, vm_t *m)
143
{
143
{
144
    int i, flags;
144
    int i, flags;
145
    ipl_t ipl;
145
    ipl_t ipl;
146
   
146
   
147
    ipl = interrupts_disable();
147
    ipl = interrupts_disable();
148
    spinlock_lock(&m->lock);
148
    spinlock_lock(&m->lock);
149
    spinlock_lock(&a->lock);
149
    spinlock_lock(&a->lock);
150
 
150
 
151
    switch (a->type) {
151
    switch (a->type) {
152
        case VMA_TEXT:
152
        case VMA_TEXT:
153
            flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
153
            flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
154
            break;
154
            break;
155
        case VMA_DATA:
155
        case VMA_DATA:
156
        case VMA_STACK:
156
        case VMA_STACK:
157
            flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
157
            flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
158
            break;
158
            break;
159
        default:
159
        default:
160
            panic("unexpected vm_type_t %d", a->type);
160
            panic("unexpected vm_type_t %d", a->type);
161
    }
161
    }
162
 
162
 
163
    ASSERT(m->ptl0);
163
    ASSERT(m->ptl0);
164
    for (i=0; i<a->size; i++)
164
    for (i=0; i<a->size; i++)
165
        page_mapping_insert(a->address + i*PAGE_SIZE, m->asid, a->mapping[i], flags, (__address) m->ptl0);
165
        page_mapping_insert(a->address + i*PAGE_SIZE, m->asid, a->mapping[i], flags, (__address) m->ptl0);
166
       
166
       
167
    spinlock_unlock(&a->lock);
167
    spinlock_unlock(&a->lock);
168
    spinlock_unlock(&m->lock);
168
    spinlock_unlock(&m->lock);
169
    interrupts_restore(ipl);
169
    interrupts_restore(ipl);
170
}
170
}
171
 
171
 
172
void vm_area_unmap(vm_area_t *a, vm_t *m)
172
void vm_area_unmap(vm_area_t *a, vm_t *m)
173
{
173
{
174
    int i;
174
    int i;
175
    ipl_t ipl;
175
    ipl_t ipl;
176
   
176
   
177
    ipl = interrupts_disable();
177
    ipl = interrupts_disable();
178
    spinlock_lock(&m->lock);
178
    spinlock_lock(&m->lock);
179
    spinlock_lock(&a->lock);
179
    spinlock_lock(&a->lock);
180
 
180
 
181
    ASSERT(m->ptl0);
181
    ASSERT(m->ptl0);
182
    for (i=0; i<a->size; i++)      
182
    for (i=0; i<a->size; i++)      
183
        page_mapping_insert(a->address + i*PAGE_SIZE, 0, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
183
        page_mapping_insert(a->address + i*PAGE_SIZE, 0, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
184
   
184
   
185
    spinlock_unlock(&a->lock);
185
    spinlock_unlock(&a->lock);
186
    spinlock_unlock(&m->lock);
186
    spinlock_unlock(&m->lock);
187
    interrupts_restore(ipl);
187
    interrupts_restore(ipl);
188
}
188
}
189
 
189
 
190
void vm_install(vm_t *m)
190
void vm_install(vm_t *m)
191
{
191
{
192
    ipl_t ipl;
192
    ipl_t ipl;
193
   
193
   
194
    ipl = interrupts_disable();
194
    ipl = interrupts_disable();
195
 
195
 
196
    tlb_shootdown_start();
196
    tlb_shootdown_start();
197
    spinlock_lock(&m->lock);
197
    spinlock_lock(&m->lock);
198
 
198
 
199
    ASSERT(m->ptl0);
199
    ASSERT(m->ptl0);
200
    SET_PTL0_ADDRESS(m->ptl0);
200
    SET_PTL0_ADDRESS(m->ptl0);
201
 
201
 
202
    spinlock_unlock(&m->lock);
202
    spinlock_unlock(&m->lock);
203
    tlb_shootdown_finalize();
203
    tlb_shootdown_finalize();
204
 
204
 
205
    interrupts_restore(ipl);
205
    interrupts_restore(ipl);
206
 
206
 
207
    vm_install_arch(m);
207
    vm_install_arch(m);
208
   
208
   
209
    VM = m;
209
    VM = m;
210
}
210
}
211
 
211