Subversion Repositories HelenOS

Rev

Rev 1070 | Rev 1108 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1070 Rev 1076
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/*
29
/*
30
 * This file contains address space manipulation functions.
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
33
 */
34
 
34
 
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch/mm/as.h>
36
#include <arch/mm/as.h>
37
#include <mm/page.h>
37
#include <mm/page.h>
38
#include <mm/frame.h>
38
#include <mm/frame.h>
39
#include <mm/slab.h>
39
#include <mm/slab.h>
40
#include <mm/tlb.h>
40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
42
#include <genarch/mm/page_pt.h>
43
#include <mm/asid.h>
43
#include <mm/asid.h>
44
#include <arch/mm/asid.h>
44
#include <arch/mm/asid.h>
45
#include <arch/types.h>
45
#include <arch/types.h>
46
#include <typedefs.h>
46
#include <typedefs.h>
47
#include <synch/spinlock.h>
47
#include <synch/spinlock.h>
48
#include <config.h>
48
#include <config.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <panic.h>
50
#include <panic.h>
51
#include <arch/asm.h>
51
#include <arch/asm.h>
52
#include <debug.h>
52
#include <debug.h>
53
#include <memstr.h>
53
#include <memstr.h>
54
#include <macros.h>
54
#include <macros.h>
55
#include <arch.h>
55
#include <arch.h>
56
#include <print.h>
56
#include <print.h>
57
 
57
 
58
as_operations_t *as_operations = NULL;
58
as_operations_t *as_operations = NULL;
59
 
59
 
60
/** Address space lock. It protects inactive_as_with_asid_head. */
60
/** Address space lock. It protects inactive_as_with_asid_head. */
61
SPINLOCK_INITIALIZE(as_lock);
61
SPINLOCK_INITIALIZE(as_lock);
62
 
62
 
63
/**
63
/**
64
 * This list contains address spaces that are not active on any
64
 * This list contains address spaces that are not active on any
65
 * processor and that have valid ASID.
65
 * processor and that have valid ASID.
66
 */
66
 */
67
LIST_INITIALIZE(inactive_as_with_asid_head);
67
LIST_INITIALIZE(inactive_as_with_asid_head);
68
 
68
 
69
/** Kernel address space. */
69
/** Kernel address space. */
70
as_t *AS_KERNEL = NULL;
70
as_t *AS_KERNEL = NULL;
71
 
71
 
72
static int get_area_flags(as_area_t *a);
72
static int get_area_flags(as_area_t *a);
73
static as_area_t *find_area_and_lock(as_t *as, __address va);
73
static as_area_t *find_area_and_lock(as_t *as, __address va);
74
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
74
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
75
 
75
 
76
/** Initialize address space subsystem. */
76
/** Initialize address space subsystem. */
77
void as_init(void)
77
void as_init(void)
78
{
78
{
79
    as_arch_init();
79
    as_arch_init();
80
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
80
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
81
        if (!AS_KERNEL)
81
        if (!AS_KERNEL)
82
                panic("can't create kernel address space\n");
82
                panic("can't create kernel address space\n");
83
}
83
}
84
 
84
 
85
/** Create address space.
85
/** Create address space.
86
 *
86
 *
87
 * @param flags Flags that influence way in wich the address space is created.
87
 * @param flags Flags that influence way in wich the address space is created.
88
 */
88
 */
89
as_t *as_create(int flags)
89
as_t *as_create(int flags)
90
{
90
{
91
    as_t *as;
91
    as_t *as;
92
 
92
 
93
    as = (as_t *) malloc(sizeof(as_t), 0);
93
    as = (as_t *) malloc(sizeof(as_t), 0);
94
    link_initialize(&as->inactive_as_with_asid_link);
94
    link_initialize(&as->inactive_as_with_asid_link);
95
    spinlock_initialize(&as->lock, "as_lock");
95
    spinlock_initialize(&as->lock, "as_lock");
96
    list_initialize(&as->as_area_head);
96
    list_initialize(&as->as_area_head);
97
   
97
   
98
    if (flags & FLAG_AS_KERNEL)
98
    if (flags & FLAG_AS_KERNEL)
99
        as->asid = ASID_KERNEL;
99
        as->asid = ASID_KERNEL;
100
    else
100
    else
101
        as->asid = ASID_INVALID;
101
        as->asid = ASID_INVALID;
102
   
102
   
103
    as->refcount = 0;
103
    as->refcount = 0;
104
    as->page_table = page_table_create(flags);
104
    as->page_table = page_table_create(flags);
105
 
105
 
106
    return as;
106
    return as;
107
}
107
}
108
 
108
 
109
/** Free Adress space */
109
/** Free Adress space */
110
void as_free(as_t *as)
110
void as_free(as_t *as)
111
{
111
{
112
    ASSERT(as->refcount == 0);
112
    ASSERT(as->refcount == 0);
113
 
113
 
114
    /* TODO: free as_areas and other resources held by as */
114
    /* TODO: free as_areas and other resources held by as */
115
    /* TODO: free page table */
115
    /* TODO: free page table */
116
    free(as);
116
    free(as);
117
}
117
}
118
 
118
 
119
/** Create address space area of common attributes.
119
/** Create address space area of common attributes.
120
 *
120
 *
121
 * The created address space area is added to the target address space.
121
 * The created address space area is added to the target address space.
122
 *
122
 *
123
 * @param as Target address space.
123
 * @param as Target address space.
124
 * @param flags Flags of the area.
124
 * @param flags Flags of the area.
125
 * @param size Size of area.
125
 * @param size Size of area.
126
 * @param base Base address of area.
126
 * @param base Base address of area.
127
 *
127
 *
128
 * @return Address space area on success or NULL on failure.
128
 * @return Address space area on success or NULL on failure.
129
 */
129
 */
130
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
130
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
131
{
131
{
132
    ipl_t ipl;
132
    ipl_t ipl;
133
    as_area_t *a;
133
    as_area_t *a;
134
   
134
   
135
    if (base % PAGE_SIZE)
135
    if (base % PAGE_SIZE)
136
        return NULL;
136
        return NULL;
137
 
137
 
138
    /* Writeable executable areas are not supported. */
138
    /* Writeable executable areas are not supported. */
139
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
139
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
140
        return NULL;
140
        return NULL;
141
   
141
   
142
    ipl = interrupts_disable();
142
    ipl = interrupts_disable();
143
    spinlock_lock(&as->lock);
143
    spinlock_lock(&as->lock);
144
   
144
   
145
    if (!check_area_conflicts(as, base, size, NULL)) {
145
    if (!check_area_conflicts(as, base, size, NULL)) {
146
        spinlock_unlock(&as->lock);
146
        spinlock_unlock(&as->lock);
147
        interrupts_restore(ipl);
147
        interrupts_restore(ipl);
148
        return NULL;
148
        return NULL;
149
    }
149
    }
150
   
150
   
151
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
151
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
152
 
152
 
153
    spinlock_initialize(&a->lock, "as_area_lock");
153
    spinlock_initialize(&a->lock, "as_area_lock");
154
   
154
   
155
    link_initialize(&a->link);         
155
    link_initialize(&a->link);         
156
    a->flags = flags;
156
    a->flags = flags;
157
    a->pages = SIZE2FRAMES(size);
157
    a->pages = SIZE2FRAMES(size);
158
    a->base = base;
158
    a->base = base;
159
   
159
   
160
    list_append(&a->link, &as->as_area_head);
160
    list_append(&a->link, &as->as_area_head);
161
 
161
 
162
    spinlock_unlock(&as->lock);
162
    spinlock_unlock(&as->lock);
163
    interrupts_restore(ipl);
163
    interrupts_restore(ipl);
164
 
164
 
165
    return a;
165
    return a;
166
}
166
}
167
 
167
 
168
/** Initialize mapping for one page of address space.
168
/** Initialize mapping for one page of address space.
169
 *
169
 *
170
 * This functions maps 'page' to 'frame' according
170
 * This functions maps 'page' to 'frame' according
171
 * to attributes of the address space area to
171
 * to attributes of the address space area to
172
 * wich 'page' belongs.
172
 * wich 'page' belongs.
173
 *
173
 *
174
 * @param as Target address space.
174
 * @param as Target address space.
175
 * @param page Virtual page within the area.
175
 * @param page Virtual page within the area.
176
 * @param frame Physical frame to which page will be mapped.
176
 * @param frame Physical frame to which page will be mapped.
177
 */
177
 */
178
void as_set_mapping(as_t *as, __address page, __address frame)
178
void as_set_mapping(as_t *as, __address page, __address frame)
179
{
179
{
180
    as_area_t *area;
180
    as_area_t *area;
181
    ipl_t ipl;
181
    ipl_t ipl;
182
   
182
   
183
    ipl = interrupts_disable();
183
    ipl = interrupts_disable();
184
    page_table_lock(as, true);
184
    page_table_lock(as, true);
185
   
185
   
186
    area = find_area_and_lock(as, page);
186
    area = find_area_and_lock(as, page);
187
    if (!area) {
187
    if (!area) {
188
        panic("page not part of any as_area\n");
188
        panic("page not part of any as_area\n");
189
    }
189
    }
190
 
190
 
191
    page_mapping_insert(as, page, frame, get_area_flags(area));
191
    page_mapping_insert(as, page, frame, get_area_flags(area));
192
   
192
   
193
    spinlock_unlock(&area->lock);
193
    spinlock_unlock(&area->lock);
194
    page_table_unlock(as, true);
194
    page_table_unlock(as, true);
195
    interrupts_restore(ipl);
195
    interrupts_restore(ipl);
196
}
196
}
197
 
197
 
198
/** Handle page fault within the current address space.
198
/** Handle page fault within the current address space.
199
 *
199
 *
200
 * This is the high-level page fault handler.
200
 * This is the high-level page fault handler.
201
 * Interrupts are assumed disabled.
201
 * Interrupts are assumed disabled.
202
 *
202
 *
203
 * @param page Faulting page.
203
 * @param page Faulting page.
204
 *
204
 *
205
 * @return 0 on page fault, 1 on success.
205
 * @return 0 on page fault, 1 on success.
206
 */
206
 */
207
int as_page_fault(__address page)
207
int as_page_fault(__address page)
208
{
208
{
209
    pte_t *pte;
209
    pte_t *pte;
210
    as_area_t *area;
210
    as_area_t *area;
211
    __address frame;
211
    __address frame;
212
   
212
   
213
    ASSERT(AS);
213
    ASSERT(AS);
214
 
214
 
215
    spinlock_lock(&AS->lock);
215
    spinlock_lock(&AS->lock);
216
    area = find_area_and_lock(AS, page);   
216
    area = find_area_and_lock(AS, page);   
217
    if (!area) {
217
    if (!area) {
218
        /*
218
        /*
219
         * No area contained mapping for 'page'.
219
         * No area contained mapping for 'page'.
220
         * Signal page fault to low-level handler.
220
         * Signal page fault to low-level handler.
221
         */
221
         */
222
        spinlock_unlock(&AS->lock);
222
        spinlock_unlock(&AS->lock);
223
        return 0;
223
        return 0;
224
    }
224
    }
225
 
225
 
226
    page_table_lock(AS, false);
226
    page_table_lock(AS, false);
227
   
227
   
228
    /*
228
    /*
229
     * To avoid race condition between two page faults
229
     * To avoid race condition between two page faults
230
     * on the same address, we need to make sure
230
     * on the same address, we need to make sure
231
     * the mapping has not been already inserted.
231
     * the mapping has not been already inserted.
232
     */
232
     */
233
    if ((pte = page_mapping_find(AS, page))) {
233
    if ((pte = page_mapping_find(AS, page))) {
234
        if (PTE_PRESENT(pte)) {
234
        if (PTE_PRESENT(pte)) {
235
            page_table_unlock(AS, false);
235
            page_table_unlock(AS, false);
236
            spinlock_unlock(&area->lock);
236
            spinlock_unlock(&area->lock);
237
            spinlock_unlock(&AS->lock);
237
            spinlock_unlock(&AS->lock);
238
            return 1;
238
            return 1;
239
        }
239
        }
240
    }
240
    }
241
 
241
 
242
    /*
242
    /*
243
     * In general, there can be several reasons that
243
     * In general, there can be several reasons that
244
     * can have caused this fault.
244
     * can have caused this fault.
245
     *
245
     *
246
     * - non-existent mapping: the area is a scratch
246
     * - non-existent mapping: the area is a scratch
247
     *   area (e.g. stack) and so far has not been
247
     *   area (e.g. stack) and so far has not been
248
     *   allocated a frame for the faulting page
248
     *   allocated a frame for the faulting page
249
     *
249
     *
250
     * - non-present mapping: another possibility,
250
     * - non-present mapping: another possibility,
251
     *   currently not implemented, would be frame
251
     *   currently not implemented, would be frame
252
     *   reuse; when this becomes a possibility,
252
     *   reuse; when this becomes a possibility,
253
     *   do not forget to distinguish between
253
     *   do not forget to distinguish between
254
     *   the different causes
254
     *   the different causes
255
     */
255
     */
256
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
256
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
257
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
257
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
258
   
258
   
259
    /*
259
    /*
260
     * Map 'page' to 'frame'.
260
     * Map 'page' to 'frame'.
261
     * Note that TLB shootdown is not attempted as only new information is being
261
     * Note that TLB shootdown is not attempted as only new information is being
262
     * inserted into page tables.
262
     * inserted into page tables.
263
     */
263
     */
264
    page_mapping_insert(AS, page, frame, get_area_flags(area));
264
    page_mapping_insert(AS, page, frame, get_area_flags(area));
265
    page_table_unlock(AS, false);
265
    page_table_unlock(AS, false);
266
   
266
   
267
    spinlock_unlock(&area->lock);
267
    spinlock_unlock(&area->lock);
268
    spinlock_unlock(&AS->lock);
268
    spinlock_unlock(&AS->lock);
269
    return 1;
269
    return 1;
270
}
270
}
271
 
271
 
272
/** Switch address spaces.
272
/** Switch address spaces.
273
 *
273
 *
274
 * @param old Old address space or NULL.
274
 * @param old Old address space or NULL.
275
 * @param new New address space.
275
 * @param new New address space.
276
 */
276
 */
277
void as_switch(as_t *old, as_t *new)
277
void as_switch(as_t *old, as_t *new)
278
{
278
{
279
    ipl_t ipl;
279
    ipl_t ipl;
280
    bool needs_asid = false;
280
    bool needs_asid = false;
281
   
281
   
282
    ipl = interrupts_disable();
282
    ipl = interrupts_disable();
283
    spinlock_lock(&as_lock);
283
    spinlock_lock(&as_lock);
284
 
284
 
285
    /*
285
    /*
286
     * First, take care of the old address space.
286
     * First, take care of the old address space.
287
     */
287
     */
288
    if (old) {
288
    if (old) {
289
        spinlock_lock(&old->lock);
289
        spinlock_lock(&old->lock);
290
        ASSERT(old->refcount);
290
        ASSERT(old->refcount);
291
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
291
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
292
            /*
292
            /*
293
             * The old address space is no longer active on
293
             * The old address space is no longer active on
294
             * any processor. It can be appended to the
294
             * any processor. It can be appended to the
295
             * list of inactive address spaces with assigned
295
             * list of inactive address spaces with assigned
296
             * ASID.
296
             * ASID.
297
             */
297
             */
298
             ASSERT(old->asid != ASID_INVALID);
298
             ASSERT(old->asid != ASID_INVALID);
299
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
299
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
300
        }
300
        }
301
        spinlock_unlock(&old->lock);
301
        spinlock_unlock(&old->lock);
302
    }
302
    }
303
 
303
 
304
    /*
304
    /*
305
     * Second, prepare the new address space.
305
     * Second, prepare the new address space.
306
     */
306
     */
307
    spinlock_lock(&new->lock);
307
    spinlock_lock(&new->lock);
308
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
308
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
309
        if (new->asid != ASID_INVALID)
309
        if (new->asid != ASID_INVALID)
310
            list_remove(&new->inactive_as_with_asid_link);
310
            list_remove(&new->inactive_as_with_asid_link);
311
        else
311
        else
312
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
312
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
313
    }
313
    }
314
    SET_PTL0_ADDRESS(new->page_table);
314
    SET_PTL0_ADDRESS(new->page_table);
315
    spinlock_unlock(&new->lock);
315
    spinlock_unlock(&new->lock);
316
 
316
 
317
    if (needs_asid) {
317
    if (needs_asid) {
318
        /*
318
        /*
319
         * Allocation of new ASID was deferred
319
         * Allocation of new ASID was deferred
320
         * until now in order to avoid deadlock.
320
         * until now in order to avoid deadlock.
321
         */
321
         */
322
        asid_t asid;
322
        asid_t asid;
323
       
323
       
324
        asid = asid_get();
324
        asid = asid_get();
325
        spinlock_lock(&new->lock);
325
        spinlock_lock(&new->lock);
326
        new->asid = asid;
326
        new->asid = asid;
327
        spinlock_unlock(&new->lock);
327
        spinlock_unlock(&new->lock);
328
    }
328
    }
329
    spinlock_unlock(&as_lock);
329
    spinlock_unlock(&as_lock);
330
    interrupts_restore(ipl);
330
    interrupts_restore(ipl);
331
   
331
   
332
    /*
332
    /*
333
     * Perform architecture-specific steps.
333
     * Perform architecture-specific steps.
334
     * (e.g. write ASID to hardware register etc.)
334
     * (e.g. write ASID to hardware register etc.)
335
     */
335
     */
336
    as_install_arch(new);
336
    as_install_arch(new);
337
   
337
   
338
    AS = new;
338
    AS = new;
339
}
339
}
340
 
340
 
341
/** Compute flags for virtual address translation subsytem.
341
/** Compute flags for virtual address translation subsytem.
342
 *
342
 *
343
 * The address space area must be locked.
343
 * The address space area must be locked.
344
 * Interrupts must be disabled.
344
 * Interrupts must be disabled.
345
 *
345
 *
346
 * @param a Address space area.
346
 * @param a Address space area.
347
 *
347
 *
348
 * @return Flags to be used in page_mapping_insert().
348
 * @return Flags to be used in page_mapping_insert().
349
 */
349
 */
350
int get_area_flags(as_area_t *a)
350
int get_area_flags(as_area_t *a)
351
{
351
{
352
    int flags;
352
    int flags;
353
 
353
 
354
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
354
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
355
   
355
   
356
    if (a->flags & AS_AREA_READ)
356
    if (a->flags & AS_AREA_READ)
357
        flags |= PAGE_READ;
357
        flags |= PAGE_READ;
358
       
358
       
359
    if (a->flags & AS_AREA_WRITE)
359
    if (a->flags & AS_AREA_WRITE)
360
        flags |= PAGE_WRITE;
360
        flags |= PAGE_WRITE;
361
   
361
   
362
    if (a->flags & AS_AREA_EXEC)
362
    if (a->flags & AS_AREA_EXEC)
363
        flags |= PAGE_EXEC;
363
        flags |= PAGE_EXEC;
364
   
364
   
365
    return flags;
365
    return flags;
366
}
366
}
367
 
367
 
368
/** Create page table.
368
/** Create page table.
369
 *
369
 *
370
 * Depending on architecture, create either address space
370
 * Depending on architecture, create either address space
371
 * private or global page table.
371
 * private or global page table.
372
 *
372
 *
373
 * @param flags Flags saying whether the page table is for kernel address space.
373
 * @param flags Flags saying whether the page table is for kernel address space.
374
 *
374
 *
375
 * @return First entry of the page table.
375
 * @return First entry of the page table.
376
 */
376
 */
377
pte_t *page_table_create(int flags)
377
pte_t *page_table_create(int flags)
378
{
378
{
379
        ASSERT(as_operations);
379
        ASSERT(as_operations);
380
        ASSERT(as_operations->page_table_create);
380
        ASSERT(as_operations->page_table_create);
381
 
381
 
382
        return as_operations->page_table_create(flags);
382
        return as_operations->page_table_create(flags);
383
}
383
}
384
 
384
 
385
/** Lock page table.
385
/** Lock page table.
386
 *
386
 *
387
 * This function should be called before any page_mapping_insert(),
387
 * This function should be called before any page_mapping_insert(),
388
 * page_mapping_remove() and page_mapping_find().
388
 * page_mapping_remove() and page_mapping_find().
389
 *
389
 *
390
 * Locking order is such that address space areas must be locked
390
 * Locking order is such that address space areas must be locked
391
 * prior to this call. Address space can be locked prior to this
391
 * prior to this call. Address space can be locked prior to this
392
 * call in which case the lock argument is false.
392
 * call in which case the lock argument is false.
393
 *
393
 *
394
 * @param as Address space.
394
 * @param as Address space.
395
 * @param as_locked If false, do not attempt to lock as->lock.
395
 * @param as_locked If false, do not attempt to lock as->lock.
396
 */
396
 */
397
void page_table_lock(as_t *as, bool lock)
397
void page_table_lock(as_t *as, bool lock)
398
{
398
{
399
    ASSERT(as_operations);
399
    ASSERT(as_operations);
400
    ASSERT(as_operations->page_table_lock);
400
    ASSERT(as_operations->page_table_lock);
401
 
401
 
402
    as_operations->page_table_lock(as, lock);
402
    as_operations->page_table_lock(as, lock);
403
}
403
}
404
 
404
 
405
/** Unlock page table.
405
/** Unlock page table.
406
 *
406
 *
407
 * @param as Address space.
407
 * @param as Address space.
408
 * @param as_locked If false, do not attempt to unlock as->lock.
408
 * @param as_locked If false, do not attempt to unlock as->lock.
409
 */
409
 */
410
void page_table_unlock(as_t *as, bool unlock)
410
void page_table_unlock(as_t *as, bool unlock)
411
{
411
{
412
    ASSERT(as_operations);
412
    ASSERT(as_operations);
413
    ASSERT(as_operations->page_table_unlock);
413
    ASSERT(as_operations->page_table_unlock);
414
 
414
 
415
    as_operations->page_table_unlock(as, unlock);
415
    as_operations->page_table_unlock(as, unlock);
416
}
416
}
417
 
417
 
418
/** Find address space area and change it.
418
/** Find address space area and change it.
419
 *
419
 *
420
 * @param as Address space.
420
 * @param as Address space.
421
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
421
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
422
 * @param size New size of the virtual memory block starting at address.
422
 * @param size New size of the virtual memory block starting at address.
423
 * @param flags Flags influencing the remap operation. Currently unused.
423
 * @param flags Flags influencing the remap operation. Currently unused.
424
 *
424
 *
425
 * @return address on success, (__address) -1 otherwise.
425
 * @return address on success, (__address) -1 otherwise.
426
 */
426
 */
427
__address as_remap(as_t *as, __address address, size_t size, int flags)
427
__address as_remap(as_t *as, __address address, size_t size, int flags)
428
{
428
{
429
    as_area_t *area = NULL;
429
    as_area_t *area = NULL;
430
    ipl_t ipl;
430
    ipl_t ipl;
431
    size_t pages;
431
    size_t pages;
432
   
432
   
433
    ipl = interrupts_disable();
433
    ipl = interrupts_disable();
434
    spinlock_lock(&as->lock);
434
    spinlock_lock(&as->lock);
435
   
435
   
436
    /*
436
    /*
437
     * Locate the area.
437
     * Locate the area.
438
     */
438
     */
439
    area = find_area_and_lock(as, address);
439
    area = find_area_and_lock(as, address);
440
    if (!area) {
440
    if (!area) {
441
        spinlock_unlock(&as->lock);
441
        spinlock_unlock(&as->lock);
442
        interrupts_restore(ipl);
442
        interrupts_restore(ipl);
443
        return (__address) -1;
443
        return (__address) -1;
444
    }
444
    }
445
 
445
 
446
    pages = SIZE2FRAMES((address - area->base) + size);
446
    pages = SIZE2FRAMES((address - area->base) + size);
447
    if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
447
    if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
448
        spinlock_unlock(&area->lock);
448
        spinlock_unlock(&as->lock);
449
        spinlock_unlock(&as->lock);    
449
        interrupts_restore(ipl);
450
        interrupts_restore(ipl);
450
        return (__address) -1;
451
        return (__address) -1;
451
    }
452
    }
452
 
453
 
453
    if (pages < area->pages) {
454
    if (pages < area->pages) {
454
        int i;
455
        int i;
455
 
456
 
456
        /*
457
        /*
457
         * Shrinking the area.
458
         * Shrinking the area.
458
         */
459
         */
459
        for (i = pages; i < area->pages; i++) {
460
        for (i = pages; i < area->pages; i++) {
460
            pte_t *pte;
461
            pte_t *pte;
461
           
462
           
462
            /*
463
            /*
463
             * Releasing physical memory.
464
             * Releasing physical memory.
464
             * This depends on the fact that the memory was allocated using frame_alloc().
465
             * This depends on the fact that the memory was allocated using frame_alloc().
465
             */
466
             */
466
            page_table_lock(as, false);
467
            page_table_lock(as, false);
467
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
468
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
468
            if (pte && PTE_VALID(pte)) {
469
            if (pte && PTE_VALID(pte)) {
469
                __address frame;
470
                __address frame;
470
 
471
 
471
                ASSERT(PTE_PRESENT(pte));
472
                ASSERT(PTE_PRESENT(pte));
472
                frame = PTE_GET_FRAME(pte);
473
                frame = PTE_GET_FRAME(pte);
473
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
474
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
474
                page_table_unlock(as, false);
475
                page_table_unlock(as, false);
475
 
476
 
476
                frame_free(ADDR2PFN(frame));
477
                frame_free(ADDR2PFN(frame));
477
            } else {
478
            } else {
478
                page_table_unlock(as, false);
479
                page_table_unlock(as, false);
479
            }
480
            }
480
        }
481
        }
481
        /*
482
        /*
482
         * Invalidate TLB's.
483
         * Invalidate TLB's.
483
         */
484
         */
484
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
485
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
485
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
486
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
486
        tlb_shootdown_finalize();
487
        tlb_shootdown_finalize();
487
    }
488
    }
488
 
489
 
489
    area->pages = pages;
490
    area->pages = pages;
490
   
491
   
491
    spinlock_unlock(&area->lock);
492
    spinlock_unlock(&area->lock);
492
    spinlock_unlock(&as->lock);
493
    spinlock_unlock(&as->lock);
493
    interrupts_restore(ipl);
494
    interrupts_restore(ipl);
494
 
495
 
495
    return address;
496
    return address;
496
}
497
}
497
 
498
 
498
/** Find address space area and lock it.
499
/** Find address space area and lock it.
499
 *
500
 *
500
 * The address space must be locked and interrupts must be disabled.
501
 * The address space must be locked and interrupts must be disabled.
501
 *
502
 *
502
 * @param as Address space.
503
 * @param as Address space.
503
 * @param va Virtual address.
504
 * @param va Virtual address.
504
 *
505
 *
505
 * @return Locked address space area containing va on success or NULL on failure.
506
 * @return Locked address space area containing va on success or NULL on failure.
506
 */
507
 */
507
as_area_t *find_area_and_lock(as_t *as, __address va)
508
as_area_t *find_area_and_lock(as_t *as, __address va)
508
{
509
{
509
    link_t *cur;
510
    link_t *cur;
510
    as_area_t *a;
511
    as_area_t *a;
511
   
512
   
512
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
513
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
513
        a = list_get_instance(cur, as_area_t, link);
514
        a = list_get_instance(cur, as_area_t, link);
514
        spinlock_lock(&a->lock);
515
        spinlock_lock(&a->lock);
515
 
516
 
516
        if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
517
        if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
517
            return a;
518
            return a;
518
       
519
       
519
        spinlock_unlock(&a->lock);
520
        spinlock_unlock(&a->lock);
520
    }
521
    }
521
 
522
 
522
    return NULL;
523
    return NULL;
523
}
524
}
524
 
525
 
525
/** Check area conflicts with other areas.
526
/** Check area conflicts with other areas.
526
 *
527
 *
527
 * The address space must be locked and interrupts must be disabled.
528
 * The address space must be locked and interrupts must be disabled.
528
 *
529
 *
529
 * @param as Address space.
530
 * @param as Address space.
530
 * @param va Starting virtual address of the area being tested.
531
 * @param va Starting virtual address of the area being tested.
531
 * @param size Size of the area being tested.
532
 * @param size Size of the area being tested.
532
 * @param avoid_area Do not touch this area.
533
 * @param avoid_area Do not touch this area.
533
 *
534
 *
534
 * @return True if there is no conflict, false otherwise.
535
 * @return True if there is no conflict, false otherwise.
535
 */
536
 */
536
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
537
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
537
{
538
{
538
    link_t *cur;
539
    link_t *cur;
539
    as_area_t *a;
540
    as_area_t *a;
540
   
541
   
541
    /*
542
    /*
542
     * We don't want any area to have conflicts with NULL page.
543
     * We don't want any area to have conflicts with NULL page.
543
     */
544
     */
544
    if (overlaps(va, size, NULL, PAGE_SIZE))
545
    if (overlaps(va, size, NULL, PAGE_SIZE))
545
        return false;
546
        return false;
546
   
547
   
547
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
548
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
548
        __address a_start;
549
        __address a_start;
549
        size_t a_size;
550
        size_t a_size;
550
   
551
   
551
        a = list_get_instance(cur, as_area_t, link);
552
        a = list_get_instance(cur, as_area_t, link);
552
        if (a == avoid_area)
553
        if (a == avoid_area)
553
            continue;
554
            continue;
554
           
555
           
555
        spinlock_lock(&a->lock);
556
        spinlock_lock(&a->lock);
556
 
557
 
557
        a_start = a->base;
558
        a_start = a->base;
558
        a_size = a->pages * PAGE_SIZE;
559
        a_size = a->pages * PAGE_SIZE;
559
 
560
 
560
        spinlock_unlock(&a->lock);
561
        spinlock_unlock(&a->lock);
561
 
562
 
562
        if (overlaps(va, size, a_start, a_size))
563
        if (overlaps(va, size, a_start, a_size))
563
            return false;      
564
            return false;      
564
 
565
 
565
    }
566
    }
566
 
567
 
567
    /*
568
    /*
568
     * So far, the area does not conflict with other areas.
569
     * So far, the area does not conflict with other areas.
569
     * Check if it doesn't conflict with kernel address space.
570
     * Check if it doesn't conflict with kernel address space.
570
     */  
571
     */  
571
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
572
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
572
        return !overlaps(va, size,
573
        return !overlaps(va, size,
573
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
574
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
574
    }
575
    }
575
 
576
 
576
    return true;
577
    return true;
577
}
578
}
578
 
579