Subversion Repositories HelenOS-historic

Rev

Rev 1044 | Rev 1076 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1044 Rev 1048
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/*
29
/*
30
 * This file contains address space manipulation functions.
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
33
 */
34
 
34
 
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch/mm/as.h>
36
#include <arch/mm/as.h>
37
#include <mm/page.h>
37
#include <mm/page.h>
38
#include <mm/frame.h>
38
#include <mm/frame.h>
39
#include <mm/slab.h>
39
#include <mm/slab.h>
40
#include <mm/tlb.h>
40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
42
#include <genarch/mm/page_pt.h>
43
#include <mm/asid.h>
43
#include <mm/asid.h>
44
#include <arch/mm/asid.h>
44
#include <arch/mm/asid.h>
45
#include <arch/types.h>
45
#include <arch/types.h>
46
#include <typedefs.h>
46
#include <typedefs.h>
47
#include <synch/spinlock.h>
47
#include <synch/spinlock.h>
48
#include <config.h>
48
#include <config.h>
49
#include <adt/list.h>
49
#include <adt/list.h>
50
#include <panic.h>
50
#include <panic.h>
51
#include <arch/asm.h>
51
#include <arch/asm.h>
52
#include <debug.h>
52
#include <debug.h>
53
#include <memstr.h>
53
#include <memstr.h>
54
#include <arch.h>
54
#include <arch.h>
55
#include <print.h>
55
#include <print.h>
56
 
56
 
57
as_operations_t *as_operations = NULL;
57
as_operations_t *as_operations = NULL;
58
 
58
 
59
/** Address space lock. It protects inactive_as_with_asid_head. */
59
/** Address space lock. It protects inactive_as_with_asid_head. */
60
SPINLOCK_INITIALIZE(as_lock);
60
SPINLOCK_INITIALIZE(as_lock);
61
 
61
 
62
/**
62
/**
63
 * This list contains address spaces that are not active on any
63
 * This list contains address spaces that are not active on any
64
 * processor and that have valid ASID.
64
 * processor and that have valid ASID.
65
 */
65
 */
66
LIST_INITIALIZE(inactive_as_with_asid_head);
66
LIST_INITIALIZE(inactive_as_with_asid_head);
67
 
67
 
68
/** Kernel address space. */
68
/** Kernel address space. */
69
as_t *AS_KERNEL = NULL;
69
as_t *AS_KERNEL = NULL;
70
 
70
 
71
static int get_area_flags(as_area_t *a);
71
static int get_area_flags(as_area_t *a);
72
static as_area_t *find_area_and_lock(as_t *as, __address va);
72
static as_area_t *find_area_and_lock(as_t *as, __address va);
-
 
73
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
73
 
74
 
74
/** Initialize address space subsystem. */
75
/** Initialize address space subsystem. */
75
void as_init(void)
76
void as_init(void)
76
{
77
{
77
    as_arch_init();
78
    as_arch_init();
78
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
79
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
79
        if (!AS_KERNEL)
80
        if (!AS_KERNEL)
80
                panic("can't create kernel address space\n");
81
                panic("can't create kernel address space\n");
81
}
82
}
82
 
83
 
83
/** Create address space.
84
/** Create address space.
84
 *
85
 *
85
 * @param flags Flags that influence way in wich the address space is created.
86
 * @param flags Flags that influence way in wich the address space is created.
86
 */
87
 */
87
as_t *as_create(int flags)
88
as_t *as_create(int flags)
88
{
89
{
89
    as_t *as;
90
    as_t *as;
90
 
91
 
91
    as = (as_t *) malloc(sizeof(as_t), 0);
92
    as = (as_t *) malloc(sizeof(as_t), 0);
92
    link_initialize(&as->inactive_as_with_asid_link);
93
    link_initialize(&as->inactive_as_with_asid_link);
93
    spinlock_initialize(&as->lock, "as_lock");
94
    spinlock_initialize(&as->lock, "as_lock");
94
    list_initialize(&as->as_area_head);
95
    list_initialize(&as->as_area_head);
95
   
96
   
96
    if (flags & FLAG_AS_KERNEL)
97
    if (flags & FLAG_AS_KERNEL)
97
        as->asid = ASID_KERNEL;
98
        as->asid = ASID_KERNEL;
98
    else
99
    else
99
        as->asid = ASID_INVALID;
100
        as->asid = ASID_INVALID;
100
   
101
   
101
    as->refcount = 0;
102
    as->refcount = 0;
102
    as->page_table = page_table_create(flags);
103
    as->page_table = page_table_create(flags);
103
 
104
 
104
    return as;
105
    return as;
105
}
106
}
106
 
107
 
107
/** Free Adress space */
108
/** Free Adress space */
108
void as_free(as_t *as)
109
void as_free(as_t *as)
109
{
110
{
110
    ASSERT(as->refcount == 0);
111
    ASSERT(as->refcount == 0);
111
 
112
 
112
    /* TODO: free as_areas and other resources held by as */
113
    /* TODO: free as_areas and other resources held by as */
113
    /* TODO: free page table */
114
    /* TODO: free page table */
114
    free(as);
115
    free(as);
115
}
116
}
116
 
117
 
117
/** Create address space area of common attributes.
118
/** Create address space area of common attributes.
118
 *
119
 *
119
 * The created address space area is added to the target address space.
120
 * The created address space area is added to the target address space.
120
 *
121
 *
121
 * @param as Target address space.
122
 * @param as Target address space.
122
 * @param flags Flags of the area.
123
 * @param flags Flags of the area.
123
 * @param size Size of area in multiples of PAGE_SIZE.
124
 * @param size Size of area.
124
 * @param base Base address of area.
125
 * @param base Base address of area.
125
 *
126
 *
126
 * @return Address space area on success or NULL on failure.
127
 * @return Address space area on success or NULL on failure.
127
 */
128
 */
128
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
129
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
129
{
130
{
130
    ipl_t ipl;
131
    ipl_t ipl;
131
    as_area_t *a;
132
    as_area_t *a;
132
   
133
   
133
    if (base % PAGE_SIZE)
134
    if (base % PAGE_SIZE)
-
 
135
        return NULL;
-
 
136
 
134
        panic("addr not aligned to a page boundary");
137
    /* Writeable executable areas are not supported. */
-
 
138
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
-
 
139
        return NULL;
135
   
140
   
136
    ipl = interrupts_disable();
141
    ipl = interrupts_disable();
137
    spinlock_lock(&as->lock);
142
    spinlock_lock(&as->lock);
138
   
143
   
139
    /*
-
 
140
     * TODO: test as_area which is to be created doesn't overlap with an existing one.
144
    if (!check_area_conflicts(as, base, size, NULL)) {
-
 
145
        spinlock_unlock(&as->lock);
-
 
146
        interrupts_restore(ipl);
-
 
147
        return NULL;
141
     */
148
    }
142
   
149
   
143
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
150
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
144
 
151
 
145
    spinlock_initialize(&a->lock, "as_area_lock");
152
    spinlock_initialize(&a->lock, "as_area_lock");
146
   
153
   
147
    link_initialize(&a->link);         
154
    link_initialize(&a->link);         
148
    a->flags = flags;
155
    a->flags = flags;
149
    a->size = size;
156
    a->pages = SIZE2FRAMES(size);
150
    a->base = base;
157
    a->base = base;
151
   
158
   
152
    list_append(&a->link, &as->as_area_head);
159
    list_append(&a->link, &as->as_area_head);
153
 
160
 
154
    spinlock_unlock(&as->lock);
161
    spinlock_unlock(&as->lock);
155
    interrupts_restore(ipl);
162
    interrupts_restore(ipl);
156
 
163
 
157
    return a;
164
    return a;
158
}
165
}
159
 
166
 
160
/** Initialize mapping for one page of address space.
167
/** Initialize mapping for one page of address space.
161
 *
168
 *
162
 * This functions maps 'page' to 'frame' according
169
 * This functions maps 'page' to 'frame' according
163
 * to attributes of the address space area to
170
 * to attributes of the address space area to
164
 * wich 'page' belongs.
171
 * wich 'page' belongs.
165
 *
172
 *
166
 * @param as Target address space.
173
 * @param as Target address space.
167
 * @param page Virtual page within the area.
174
 * @param page Virtual page within the area.
168
 * @param frame Physical frame to which page will be mapped.
175
 * @param frame Physical frame to which page will be mapped.
169
 */
176
 */
170
void as_set_mapping(as_t *as, __address page, __address frame)
177
void as_set_mapping(as_t *as, __address page, __address frame)
171
{
178
{
172
    as_area_t *area;
179
    as_area_t *area;
173
    ipl_t ipl;
180
    ipl_t ipl;
174
   
181
   
175
    ipl = interrupts_disable();
182
    ipl = interrupts_disable();
176
    page_table_lock(as, true);
183
    page_table_lock(as, true);
177
   
184
   
178
    area = find_area_and_lock(as, page);
185
    area = find_area_and_lock(as, page);
179
    if (!area) {
186
    if (!area) {
180
        panic("page not part of any as_area\n");
187
        panic("page not part of any as_area\n");
181
    }
188
    }
182
 
189
 
183
    page_mapping_insert(as, page, frame, get_area_flags(area));
190
    page_mapping_insert(as, page, frame, get_area_flags(area));
184
   
191
   
185
    spinlock_unlock(&area->lock);
192
    spinlock_unlock(&area->lock);
186
    page_table_unlock(as, true);
193
    page_table_unlock(as, true);
187
    interrupts_restore(ipl);
194
    interrupts_restore(ipl);
188
}
195
}
189
 
196
 
190
/** Handle page fault within the current address space.
197
/** Handle page fault within the current address space.
191
 *
198
 *
192
 * This is the high-level page fault handler.
199
 * This is the high-level page fault handler.
193
 * Interrupts are assumed disabled.
200
 * Interrupts are assumed disabled.
194
 *
201
 *
195
 * @param page Faulting page.
202
 * @param page Faulting page.
196
 *
203
 *
197
 * @return 0 on page fault, 1 on success.
204
 * @return 0 on page fault, 1 on success.
198
 */
205
 */
199
int as_page_fault(__address page)
206
int as_page_fault(__address page)
200
{
207
{
201
    pte_t *pte;
208
    pte_t *pte;
202
    as_area_t *area;
209
    as_area_t *area;
203
    __address frame;
210
    __address frame;
204
   
211
   
205
    ASSERT(AS);
212
    ASSERT(AS);
206
 
213
 
207
    spinlock_lock(&AS->lock);
214
    spinlock_lock(&AS->lock);
208
    area = find_area_and_lock(AS, page);   
215
    area = find_area_and_lock(AS, page);   
209
    if (!area) {
216
    if (!area) {
210
        /*
217
        /*
211
         * No area contained mapping for 'page'.
218
         * No area contained mapping for 'page'.
212
         * Signal page fault to low-level handler.
219
         * Signal page fault to low-level handler.
213
         */
220
         */
214
        spinlock_unlock(&AS->lock);
221
        spinlock_unlock(&AS->lock);
215
        return 0;
222
        return 0;
216
    }
223
    }
217
 
224
 
218
    page_table_lock(AS, false);
225
    page_table_lock(AS, false);
219
   
226
   
220
    /*
227
    /*
221
     * To avoid race condition between two page faults
228
     * To avoid race condition between two page faults
222
     * on the same address, we need to make sure
229
     * on the same address, we need to make sure
223
     * the mapping has not been already inserted.
230
     * the mapping has not been already inserted.
224
     */
231
     */
225
    if ((pte = page_mapping_find(AS, page))) {
232
    if ((pte = page_mapping_find(AS, page))) {
226
        if (PTE_PRESENT(pte)) {
233
        if (PTE_PRESENT(pte)) {
227
            page_table_unlock(AS, false);
234
            page_table_unlock(AS, false);
228
            spinlock_unlock(&area->lock);
235
            spinlock_unlock(&area->lock);
229
            spinlock_unlock(&AS->lock);
236
            spinlock_unlock(&AS->lock);
230
            return 1;
237
            return 1;
231
        }
238
        }
232
    }
239
    }
233
 
240
 
234
    /*
241
    /*
235
     * In general, there can be several reasons that
242
     * In general, there can be several reasons that
236
     * can have caused this fault.
243
     * can have caused this fault.
237
     *
244
     *
238
     * - non-existent mapping: the area is a scratch
245
     * - non-existent mapping: the area is a scratch
239
     *   area (e.g. stack) and so far has not been
246
     *   area (e.g. stack) and so far has not been
240
     *   allocated a frame for the faulting page
247
     *   allocated a frame for the faulting page
241
     *
248
     *
242
     * - non-present mapping: another possibility,
249
     * - non-present mapping: another possibility,
243
     *   currently not implemented, would be frame
250
     *   currently not implemented, would be frame
244
     *   reuse; when this becomes a possibility,
251
     *   reuse; when this becomes a possibility,
245
     *   do not forget to distinguish between
252
     *   do not forget to distinguish between
246
     *   the different causes
253
     *   the different causes
247
     */
254
     */
248
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
255
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
249
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
256
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
250
   
257
   
251
    /*
258
    /*
252
     * Map 'page' to 'frame'.
259
     * Map 'page' to 'frame'.
253
     * Note that TLB shootdown is not attempted as only new information is being
260
     * Note that TLB shootdown is not attempted as only new information is being
254
     * inserted into page tables.
261
     * inserted into page tables.
255
     */
262
     */
256
    page_mapping_insert(AS, page, frame, get_area_flags(area));
263
    page_mapping_insert(AS, page, frame, get_area_flags(area));
257
    page_table_unlock(AS, false);
264
    page_table_unlock(AS, false);
258
   
265
   
259
    spinlock_unlock(&area->lock);
266
    spinlock_unlock(&area->lock);
260
    spinlock_unlock(&AS->lock);
267
    spinlock_unlock(&AS->lock);
261
    return 1;
268
    return 1;
262
}
269
}
263
 
270
 
264
/** Switch address spaces.
271
/** Switch address spaces.
265
 *
272
 *
266
 * @param old Old address space or NULL.
273
 * @param old Old address space or NULL.
267
 * @param new New address space.
274
 * @param new New address space.
268
 */
275
 */
269
void as_switch(as_t *old, as_t *new)
276
void as_switch(as_t *old, as_t *new)
270
{
277
{
271
    ipl_t ipl;
278
    ipl_t ipl;
272
    bool needs_asid = false;
279
    bool needs_asid = false;
273
   
280
   
274
    ipl = interrupts_disable();
281
    ipl = interrupts_disable();
275
    spinlock_lock(&as_lock);
282
    spinlock_lock(&as_lock);
276
 
283
 
277
    /*
284
    /*
278
     * First, take care of the old address space.
285
     * First, take care of the old address space.
279
     */
286
     */
280
    if (old) {
287
    if (old) {
281
        spinlock_lock(&old->lock);
288
        spinlock_lock(&old->lock);
282
        ASSERT(old->refcount);
289
        ASSERT(old->refcount);
283
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
290
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
284
            /*
291
            /*
285
             * The old address space is no longer active on
292
             * The old address space is no longer active on
286
             * any processor. It can be appended to the
293
             * any processor. It can be appended to the
287
             * list of inactive address spaces with assigned
294
             * list of inactive address spaces with assigned
288
             * ASID.
295
             * ASID.
289
             */
296
             */
290
             ASSERT(old->asid != ASID_INVALID);
297
             ASSERT(old->asid != ASID_INVALID);
291
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
298
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
292
        }
299
        }
293
        spinlock_unlock(&old->lock);
300
        spinlock_unlock(&old->lock);
294
    }
301
    }
295
 
302
 
296
    /*
303
    /*
297
     * Second, prepare the new address space.
304
     * Second, prepare the new address space.
298
     */
305
     */
299
    spinlock_lock(&new->lock);
306
    spinlock_lock(&new->lock);
300
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
307
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
301
        if (new->asid != ASID_INVALID)
308
        if (new->asid != ASID_INVALID)
302
            list_remove(&new->inactive_as_with_asid_link);
309
            list_remove(&new->inactive_as_with_asid_link);
303
        else
310
        else
304
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
311
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
305
    }
312
    }
306
    SET_PTL0_ADDRESS(new->page_table);
313
    SET_PTL0_ADDRESS(new->page_table);
307
    spinlock_unlock(&new->lock);
314
    spinlock_unlock(&new->lock);
308
 
315
 
309
    if (needs_asid) {
316
    if (needs_asid) {
310
        /*
317
        /*
311
         * Allocation of new ASID was deferred
318
         * Allocation of new ASID was deferred
312
         * until now in order to avoid deadlock.
319
         * until now in order to avoid deadlock.
313
         */
320
         */
314
        asid_t asid;
321
        asid_t asid;
315
       
322
       
316
        asid = asid_get();
323
        asid = asid_get();
317
        spinlock_lock(&new->lock);
324
        spinlock_lock(&new->lock);
318
        new->asid = asid;
325
        new->asid = asid;
319
        spinlock_unlock(&new->lock);
326
        spinlock_unlock(&new->lock);
320
    }
327
    }
321
    spinlock_unlock(&as_lock);
328
    spinlock_unlock(&as_lock);
322
    interrupts_restore(ipl);
329
    interrupts_restore(ipl);
323
   
330
   
324
    /*
331
    /*
325
     * Perform architecture-specific steps.
332
     * Perform architecture-specific steps.
326
     * (e.g. write ASID to hardware register etc.)
333
     * (e.g. write ASID to hardware register etc.)
327
     */
334
     */
328
    as_install_arch(new);
335
    as_install_arch(new);
329
   
336
   
330
    AS = new;
337
    AS = new;
331
}
338
}
332
 
339
 
333
/** Compute flags for virtual address translation subsytem.
340
/** Compute flags for virtual address translation subsytem.
334
 *
341
 *
335
 * The address space area must be locked.
342
 * The address space area must be locked.
336
 * Interrupts must be disabled.
343
 * Interrupts must be disabled.
337
 *
344
 *
338
 * @param a Address space area.
345
 * @param a Address space area.
339
 *
346
 *
340
 * @return Flags to be used in page_mapping_insert().
347
 * @return Flags to be used in page_mapping_insert().
341
 */
348
 */
342
int get_area_flags(as_area_t *a)
349
int get_area_flags(as_area_t *a)
343
{
350
{
344
    int flags;
351
    int flags;
345
 
352
 
346
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
353
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
347
   
354
   
348
    if (a->flags & AS_AREA_READ)
355
    if (a->flags & AS_AREA_READ)
349
        flags |= PAGE_READ;
356
        flags |= PAGE_READ;
350
       
357
       
351
    if (a->flags & AS_AREA_WRITE)
358
    if (a->flags & AS_AREA_WRITE)
352
        flags |= PAGE_WRITE;
359
        flags |= PAGE_WRITE;
353
   
360
   
354
    if (a->flags & AS_AREA_EXEC)
361
    if (a->flags & AS_AREA_EXEC)
355
        flags |= PAGE_EXEC;
362
        flags |= PAGE_EXEC;
356
   
363
   
357
    return flags;
364
    return flags;
358
}
365
}
359
 
366
 
360
/** Create page table.
367
/** Create page table.
361
 *
368
 *
362
 * Depending on architecture, create either address space
369
 * Depending on architecture, create either address space
363
 * private or global page table.
370
 * private or global page table.
364
 *
371
 *
365
 * @param flags Flags saying whether the page table is for kernel address space.
372
 * @param flags Flags saying whether the page table is for kernel address space.
366
 *
373
 *
367
 * @return First entry of the page table.
374
 * @return First entry of the page table.
368
 */
375
 */
369
pte_t *page_table_create(int flags)
376
pte_t *page_table_create(int flags)
370
{
377
{
371
        ASSERT(as_operations);
378
        ASSERT(as_operations);
372
        ASSERT(as_operations->page_table_create);
379
        ASSERT(as_operations->page_table_create);
373
 
380
 
374
        return as_operations->page_table_create(flags);
381
        return as_operations->page_table_create(flags);
375
}
382
}
376
 
383
 
377
/** Lock page table.
384
/** Lock page table.
378
 *
385
 *
379
 * This function should be called before any page_mapping_insert(),
386
 * This function should be called before any page_mapping_insert(),
380
 * page_mapping_remove() and page_mapping_find().
387
 * page_mapping_remove() and page_mapping_find().
381
 *
388
 *
382
 * Locking order is such that address space areas must be locked
389
 * Locking order is such that address space areas must be locked
383
 * prior to this call. Address space can be locked prior to this
390
 * prior to this call. Address space can be locked prior to this
384
 * call in which case the lock argument is false.
391
 * call in which case the lock argument is false.
385
 *
392
 *
386
 * @param as Address space.
393
 * @param as Address space.
387
 * @param as_locked If false, do not attempt to lock as->lock.
394
 * @param as_locked If false, do not attempt to lock as->lock.
388
 */
395
 */
389
void page_table_lock(as_t *as, bool lock)
396
void page_table_lock(as_t *as, bool lock)
390
{
397
{
391
    ASSERT(as_operations);
398
    ASSERT(as_operations);
392
    ASSERT(as_operations->page_table_lock);
399
    ASSERT(as_operations->page_table_lock);
393
 
400
 
394
    as_operations->page_table_lock(as, lock);
401
    as_operations->page_table_lock(as, lock);
395
}
402
}
396
 
403
 
397
/** Unlock page table.
404
/** Unlock page table.
398
 *
405
 *
399
 * @param as Address space.
406
 * @param as Address space.
400
 * @param as_locked If false, do not attempt to unlock as->lock.
407
 * @param as_locked If false, do not attempt to unlock as->lock.
401
 */
408
 */
402
void page_table_unlock(as_t *as, bool unlock)
409
void page_table_unlock(as_t *as, bool unlock)
403
{
410
{
404
    ASSERT(as_operations);
411
    ASSERT(as_operations);
405
    ASSERT(as_operations->page_table_unlock);
412
    ASSERT(as_operations->page_table_unlock);
406
 
413
 
407
    as_operations->page_table_unlock(as, unlock);
414
    as_operations->page_table_unlock(as, unlock);
408
}
415
}
409
 
416
 
410
/** Find address space area and change it.
417
/** Find address space area and change it.
411
 *
418
 *
412
 * @param as Address space.
419
 * @param as Address space.
413
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
420
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
414
 * @param size New size of the virtual memory block starting at address.
421
 * @param size New size of the virtual memory block starting at address.
415
 * @param flags Flags influencing the remap operation. Currently unused.
422
 * @param flags Flags influencing the remap operation. Currently unused.
416
 *
423
 *
417
 * @return address on success, (__address) -1 otherwise.
424
 * @return address on success, (__address) -1 otherwise.
418
 */
425
 */
419
__address as_remap(as_t *as, __address address, size_t size, int flags)
426
__address as_remap(as_t *as, __address address, size_t size, int flags)
420
{
427
{
421
    as_area_t *area = NULL;
428
    as_area_t *area = NULL;
422
    ipl_t ipl;
429
    ipl_t ipl;
423
    size_t pages;
430
    size_t pages;
424
   
431
   
425
    ipl = interrupts_disable();
432
    ipl = interrupts_disable();
426
    spinlock_lock(&as->lock);
433
    spinlock_lock(&as->lock);
427
   
434
   
428
    /*
435
    /*
429
     * Locate the area.
436
     * Locate the area.
430
     */
437
     */
431
    area = find_area_and_lock(as, address);
438
    area = find_area_and_lock(as, address);
432
    if (!area) {
439
    if (!area) {
433
        spinlock_unlock(&as->lock);
440
        spinlock_unlock(&as->lock);
-
 
441
        interrupts_restore(ipl);
434
        return (__address) -1;
442
        return (__address) -1;
435
    }
443
    }
436
 
444
 
437
    pages = SIZE2FRAMES((address - area->base) + size);
445
    pages = SIZE2FRAMES((address - area->base) + size);
-
 
446
    if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
447
        spinlock_unlock(&as->lock);
-
 
448
        interrupts_restore(ipl);
-
 
449
        return (__address) -1;
-
 
450
    }
-
 
451
 
438
    if (pages < area->size) {
452
    if (pages < area->pages) {
439
        int i;
453
        int i;
440
 
454
 
441
        /*
455
        /*
442
         * Shrinking the area.
456
         * Shrinking the area.
443
         */
457
         */
444
        for (i = pages; i < area->size; i++) {
458
        for (i = pages; i < area->pages; i++) {
445
            pte_t *pte;
459
            pte_t *pte;
446
           
460
           
447
            /*
461
            /*
448
             * Releasing physical memory.
462
             * Releasing physical memory.
449
             * This depends on the fact that the memory was allocated using frame_alloc().
463
             * This depends on the fact that the memory was allocated using frame_alloc().
450
             */
464
             */
451
            page_table_lock(as, false);
465
            page_table_lock(as, false);
452
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
466
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
453
            if (pte && PTE_VALID(pte)) {
467
            if (pte && PTE_VALID(pte)) {
454
                __address frame;
468
                __address frame;
455
 
469
 
456
                ASSERT(PTE_PRESENT(pte));
470
                ASSERT(PTE_PRESENT(pte));
457
                frame = PTE_GET_FRAME(pte);
471
                frame = PTE_GET_FRAME(pte);
458
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
472
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
459
                page_table_unlock(as, false);
473
                page_table_unlock(as, false);
460
 
474
 
461
                frame_free(ADDR2PFN(frame));
475
                frame_free(ADDR2PFN(frame));
462
            } else {
476
            } else {
463
                page_table_unlock(as, false);
477
                page_table_unlock(as, false);
464
            }
478
            }
465
        }
479
        }
466
        /*
480
        /*
467
         * Invalidate TLB's.
481
         * Invalidate TLB's.
468
         */
482
         */
469
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->size - pages);
483
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
470
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->size - pages);
484
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
471
        tlb_shootdown_finalize();
485
        tlb_shootdown_finalize();
472
    }
486
    }
473
 
487
 
474
    area->size = pages;
488
    area->pages = pages;
475
   
489
   
476
    spinlock_unlock(&area->lock);
490
    spinlock_unlock(&area->lock);
477
    spinlock_unlock(&as->lock);
491
    spinlock_unlock(&as->lock);
478
    interrupts_restore(ipl);
492
    interrupts_restore(ipl);
479
 
493
 
480
    return address;
494
    return address;
481
}
495
}
482
 
496
 
483
/** Find address space area and lock it.
497
/** Find address space area and lock it.
484
 *
498
 *
485
 * The address space must be locked and interrupts must be disabled.
499
 * The address space must be locked and interrupts must be disabled.
486
 *
500
 *
487
 * @param as Address space.
501
 * @param as Address space.
488
 * @param va Virtual address.
502
 * @param va Virtual address.
489
 *
503
 *
490
 * @return Locked address space area containing va on success or NULL on failure.
504
 * @return Locked address space area containing va on success or NULL on failure.
491
 */
505
 */
492
as_area_t *find_area_and_lock(as_t *as, __address va)
506
as_area_t *find_area_and_lock(as_t *as, __address va)
493
{
507
{
494
    link_t *cur;
508
    link_t *cur;
495
    as_area_t *a;
509
    as_area_t *a;
496
   
510
   
497
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
511
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
498
        a = list_get_instance(cur, as_area_t, link);
512
        a = list_get_instance(cur, as_area_t, link);
499
        spinlock_lock(&a->lock);
513
        spinlock_lock(&a->lock);
500
 
514
 
501
        if ((va >= a->base) && (va < a->base + a->size * PAGE_SIZE))
515
        if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
502
             return a;
516
            return a;
503
       
517
       
504
        spinlock_unlock(&a->lock);
518
        spinlock_unlock(&a->lock);
505
    }
519
    }
506
 
520
 
507
    return NULL;
521
    return NULL;
508
}
522
}
-
 
523
 
-
 
524
/** Check area conflicts with other areas.
-
 
525
 *
-
 
526
 * The address space must be locked and interrupts must be disabled.
-
 
527
 *
-
 
528
 * @param as Address space.
-
 
529
 * @param va Starting virtual address of the area being tested.
-
 
530
 * @param size Size of the area being tested.
-
 
531
 * @param avoid_area Do not touch this area.
-
 
532
 *
-
 
533
 * @return True if there is no conflict, false otherwise.
-
 
534
 */
-
 
535
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
-
 
536
{
-
 
537
    link_t *cur;
-
 
538
    as_area_t *a;
-
 
539
   
-
 
540
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
-
 
541
        __address start;
-
 
542
        __address end;
-
 
543
   
-
 
544
        a = list_get_instance(cur, as_area_t, link);
-
 
545
        if (a == avoid_area)
-
 
546
            continue;
-
 
547
           
-
 
548
        spinlock_lock(&a->lock);
-
 
549
 
-
 
550
        start = a->base;
-
 
551
        end = a->base + a->pages * PAGE_SIZE - 1;
-
 
552
 
-
 
553
        spinlock_unlock(&a->lock);
-
 
554
 
-
 
555
        if ((va >= start) && (va <= end)) {
-
 
556
            /*
-
 
557
             * Tested area is inside another area.
-
 
558
             */
-
 
559
            return false;
-
 
560
        }
-
 
561
       
-
 
562
        if ((start >= va) && (start < va + size)) {
-
 
563
            /*
-
 
564
             * Another area starts in tested area.
-
 
565
             */
-
 
566
            return false;
-
 
567
        }
-
 
568
       
-
 
569
        if ((end >= va) && (end < va + size)) {
-
 
570
            /*
-
 
571
             * Another area ends in tested area.
-
 
572
             */
-
 
573
            return false;
-
 
574
        }
-
 
575
 
-
 
576
    }
-
 
577
 
-
 
578
    return true;
-
 
579
}
509
 
580