Subversion Repositories HelenOS-historic

Rev

Rev 1076 | Rev 1147 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1076 Rev 1108
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/*
29
/*
30
 * This file contains address space manipulation functions.
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
33
 */
34
 
34
 
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch/mm/as.h>
36
#include <arch/mm/as.h>
37
#include <mm/page.h>
37
#include <mm/page.h>
38
#include <mm/frame.h>
38
#include <mm/frame.h>
39
#include <mm/slab.h>
39
#include <mm/slab.h>
40
#include <mm/tlb.h>
40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
42
#include <genarch/mm/page_pt.h>
-
 
43
#include <genarch/mm/page_ht.h>
43
#include <mm/asid.h>
44
#include <mm/asid.h>
44
#include <arch/mm/asid.h>
45
#include <arch/mm/asid.h>
45
#include <arch/types.h>
46
#include <arch/types.h>
46
#include <typedefs.h>
47
#include <typedefs.h>
47
#include <synch/spinlock.h>
48
#include <synch/spinlock.h>
48
#include <config.h>
49
#include <config.h>
49
#include <adt/list.h>
50
#include <adt/list.h>
50
#include <panic.h>
51
#include <panic.h>
51
#include <arch/asm.h>
52
#include <arch/asm.h>
52
#include <debug.h>
53
#include <debug.h>
53
#include <memstr.h>
54
#include <memstr.h>
54
#include <macros.h>
55
#include <macros.h>
55
#include <arch.h>
56
#include <arch.h>
56
#include <print.h>
57
#include <print.h>
57
 
58
 
58
as_operations_t *as_operations = NULL;
59
as_operations_t *as_operations = NULL;
59
 
60
 
60
/** Address space lock. It protects inactive_as_with_asid_head. */
61
/** Address space lock. It protects inactive_as_with_asid_head. */
61
SPINLOCK_INITIALIZE(as_lock);
62
SPINLOCK_INITIALIZE(as_lock);
62
 
63
 
63
/**
64
/**
64
 * This list contains address spaces that are not active on any
65
 * This list contains address spaces that are not active on any
65
 * processor and that have valid ASID.
66
 * processor and that have valid ASID.
66
 */
67
 */
67
LIST_INITIALIZE(inactive_as_with_asid_head);
68
LIST_INITIALIZE(inactive_as_with_asid_head);
68
 
69
 
69
/** Kernel address space. */
70
/** Kernel address space. */
70
as_t *AS_KERNEL = NULL;
71
as_t *AS_KERNEL = NULL;
71
 
72
 
72
static int get_area_flags(as_area_t *a);
73
static int get_area_flags(as_area_t *a);
73
static as_area_t *find_area_and_lock(as_t *as, __address va);
74
static as_area_t *find_area_and_lock(as_t *as, __address va);
74
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
75
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
75
 
76
 
76
/** Initialize address space subsystem. */
77
/** Initialize address space subsystem. */
77
void as_init(void)
78
void as_init(void)
78
{
79
{
79
    as_arch_init();
80
    as_arch_init();
80
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
81
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
81
        if (!AS_KERNEL)
82
        if (!AS_KERNEL)
82
                panic("can't create kernel address space\n");
83
                panic("can't create kernel address space\n");
83
}
84
}
84
 
85
 
85
/** Create address space.
86
/** Create address space.
86
 *
87
 *
87
 * @param flags Flags that influence way in wich the address space is created.
88
 * @param flags Flags that influence way in wich the address space is created.
88
 */
89
 */
89
as_t *as_create(int flags)
90
as_t *as_create(int flags)
90
{
91
{
91
    as_t *as;
92
    as_t *as;
92
 
93
 
93
    as = (as_t *) malloc(sizeof(as_t), 0);
94
    as = (as_t *) malloc(sizeof(as_t), 0);
94
    link_initialize(&as->inactive_as_with_asid_link);
95
    link_initialize(&as->inactive_as_with_asid_link);
95
    spinlock_initialize(&as->lock, "as_lock");
96
    spinlock_initialize(&as->lock, "as_lock");
96
    list_initialize(&as->as_area_head);
97
    list_initialize(&as->as_area_head);
97
   
98
   
98
    if (flags & FLAG_AS_KERNEL)
99
    if (flags & FLAG_AS_KERNEL)
99
        as->asid = ASID_KERNEL;
100
        as->asid = ASID_KERNEL;
100
    else
101
    else
101
        as->asid = ASID_INVALID;
102
        as->asid = ASID_INVALID;
102
   
103
   
103
    as->refcount = 0;
104
    as->refcount = 0;
104
    as->page_table = page_table_create(flags);
105
    as->page_table = page_table_create(flags);
105
 
106
 
106
    return as;
107
    return as;
107
}
108
}
108
 
109
 
109
/** Free Adress space */
110
/** Free Adress space */
110
void as_free(as_t *as)
111
void as_free(as_t *as)
111
{
112
{
112
    ASSERT(as->refcount == 0);
113
    ASSERT(as->refcount == 0);
113
 
114
 
114
    /* TODO: free as_areas and other resources held by as */
115
    /* TODO: free as_areas and other resources held by as */
115
    /* TODO: free page table */
116
    /* TODO: free page table */
116
    free(as);
117
    free(as);
117
}
118
}
118
 
119
 
119
/** Create address space area of common attributes.
120
/** Create address space area of common attributes.
120
 *
121
 *
121
 * The created address space area is added to the target address space.
122
 * The created address space area is added to the target address space.
122
 *
123
 *
123
 * @param as Target address space.
124
 * @param as Target address space.
124
 * @param flags Flags of the area.
125
 * @param flags Flags of the area.
125
 * @param size Size of area.
126
 * @param size Size of area.
126
 * @param base Base address of area.
127
 * @param base Base address of area.
127
 *
128
 *
128
 * @return Address space area on success or NULL on failure.
129
 * @return Address space area on success or NULL on failure.
129
 */
130
 */
130
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
131
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
131
{
132
{
132
    ipl_t ipl;
133
    ipl_t ipl;
133
    as_area_t *a;
134
    as_area_t *a;
134
   
135
   
135
    if (base % PAGE_SIZE)
136
    if (base % PAGE_SIZE)
136
        return NULL;
137
        return NULL;
137
 
138
 
138
    /* Writeable executable areas are not supported. */
139
    /* Writeable executable areas are not supported. */
139
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
140
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
140
        return NULL;
141
        return NULL;
141
   
142
   
142
    ipl = interrupts_disable();
143
    ipl = interrupts_disable();
143
    spinlock_lock(&as->lock);
144
    spinlock_lock(&as->lock);
144
   
145
   
145
    if (!check_area_conflicts(as, base, size, NULL)) {
146
    if (!check_area_conflicts(as, base, size, NULL)) {
146
        spinlock_unlock(&as->lock);
147
        spinlock_unlock(&as->lock);
147
        interrupts_restore(ipl);
148
        interrupts_restore(ipl);
148
        return NULL;
149
        return NULL;
149
    }
150
    }
150
   
151
   
151
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
152
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
152
 
153
 
153
    spinlock_initialize(&a->lock, "as_area_lock");
154
    spinlock_initialize(&a->lock, "as_area_lock");
154
   
155
   
155
    link_initialize(&a->link);         
156
    link_initialize(&a->link);         
156
    a->flags = flags;
157
    a->flags = flags;
157
    a->pages = SIZE2FRAMES(size);
158
    a->pages = SIZE2FRAMES(size);
158
    a->base = base;
159
    a->base = base;
159
   
160
   
160
    list_append(&a->link, &as->as_area_head);
161
    list_append(&a->link, &as->as_area_head);
161
 
162
 
162
    spinlock_unlock(&as->lock);
163
    spinlock_unlock(&as->lock);
163
    interrupts_restore(ipl);
164
    interrupts_restore(ipl);
164
 
165
 
165
    return a;
166
    return a;
166
}
167
}
167
 
168
 
168
/** Initialize mapping for one page of address space.
169
/** Initialize mapping for one page of address space.
169
 *
170
 *
170
 * This functions maps 'page' to 'frame' according
171
 * This functions maps 'page' to 'frame' according
171
 * to attributes of the address space area to
172
 * to attributes of the address space area to
172
 * wich 'page' belongs.
173
 * wich 'page' belongs.
173
 *
174
 *
174
 * @param as Target address space.
175
 * @param as Target address space.
175
 * @param page Virtual page within the area.
176
 * @param page Virtual page within the area.
176
 * @param frame Physical frame to which page will be mapped.
177
 * @param frame Physical frame to which page will be mapped.
177
 */
178
 */
178
void as_set_mapping(as_t *as, __address page, __address frame)
179
void as_set_mapping(as_t *as, __address page, __address frame)
179
{
180
{
180
    as_area_t *area;
181
    as_area_t *area;
181
    ipl_t ipl;
182
    ipl_t ipl;
182
   
183
   
183
    ipl = interrupts_disable();
184
    ipl = interrupts_disable();
184
    page_table_lock(as, true);
185
    page_table_lock(as, true);
185
   
186
   
186
    area = find_area_and_lock(as, page);
187
    area = find_area_and_lock(as, page);
187
    if (!area) {
188
    if (!area) {
188
        panic("page not part of any as_area\n");
189
        panic("page not part of any as_area\n");
189
    }
190
    }
190
 
191
 
191
    page_mapping_insert(as, page, frame, get_area_flags(area));
192
    page_mapping_insert(as, page, frame, get_area_flags(area));
192
   
193
   
193
    spinlock_unlock(&area->lock);
194
    spinlock_unlock(&area->lock);
194
    page_table_unlock(as, true);
195
    page_table_unlock(as, true);
195
    interrupts_restore(ipl);
196
    interrupts_restore(ipl);
196
}
197
}
197
 
198
 
198
/** Handle page fault within the current address space.
199
/** Handle page fault within the current address space.
199
 *
200
 *
200
 * This is the high-level page fault handler.
201
 * This is the high-level page fault handler.
201
 * Interrupts are assumed disabled.
202
 * Interrupts are assumed disabled.
202
 *
203
 *
203
 * @param page Faulting page.
204
 * @param page Faulting page.
204
 *
205
 *
205
 * @return 0 on page fault, 1 on success.
206
 * @return 0 on page fault, 1 on success.
206
 */
207
 */
207
int as_page_fault(__address page)
208
int as_page_fault(__address page)
208
{
209
{
209
    pte_t *pte;
210
    pte_t *pte;
210
    as_area_t *area;
211
    as_area_t *area;
211
    __address frame;
212
    __address frame;
212
   
213
   
213
    ASSERT(AS);
214
    ASSERT(AS);
214
 
215
 
215
    spinlock_lock(&AS->lock);
216
    spinlock_lock(&AS->lock);
216
    area = find_area_and_lock(AS, page);   
217
    area = find_area_and_lock(AS, page);   
217
    if (!area) {
218
    if (!area) {
218
        /*
219
        /*
219
         * No area contained mapping for 'page'.
220
         * No area contained mapping for 'page'.
220
         * Signal page fault to low-level handler.
221
         * Signal page fault to low-level handler.
221
         */
222
         */
222
        spinlock_unlock(&AS->lock);
223
        spinlock_unlock(&AS->lock);
223
        return 0;
224
        return 0;
224
    }
225
    }
225
 
226
 
226
    page_table_lock(AS, false);
227
    page_table_lock(AS, false);
227
   
228
   
228
    /*
229
    /*
229
     * To avoid race condition between two page faults
230
     * To avoid race condition between two page faults
230
     * on the same address, we need to make sure
231
     * on the same address, we need to make sure
231
     * the mapping has not been already inserted.
232
     * the mapping has not been already inserted.
232
     */
233
     */
233
    if ((pte = page_mapping_find(AS, page))) {
234
    if ((pte = page_mapping_find(AS, page))) {
234
        if (PTE_PRESENT(pte)) {
235
        if (PTE_PRESENT(pte)) {
235
            page_table_unlock(AS, false);
236
            page_table_unlock(AS, false);
236
            spinlock_unlock(&area->lock);
237
            spinlock_unlock(&area->lock);
237
            spinlock_unlock(&AS->lock);
238
            spinlock_unlock(&AS->lock);
238
            return 1;
239
            return 1;
239
        }
240
        }
240
    }
241
    }
241
 
242
 
242
    /*
243
    /*
243
     * In general, there can be several reasons that
244
     * In general, there can be several reasons that
244
     * can have caused this fault.
245
     * can have caused this fault.
245
     *
246
     *
246
     * - non-existent mapping: the area is a scratch
247
     * - non-existent mapping: the area is a scratch
247
     *   area (e.g. stack) and so far has not been
248
     *   area (e.g. stack) and so far has not been
248
     *   allocated a frame for the faulting page
249
     *   allocated a frame for the faulting page
249
     *
250
     *
250
     * - non-present mapping: another possibility,
251
     * - non-present mapping: another possibility,
251
     *   currently not implemented, would be frame
252
     *   currently not implemented, would be frame
252
     *   reuse; when this becomes a possibility,
253
     *   reuse; when this becomes a possibility,
253
     *   do not forget to distinguish between
254
     *   do not forget to distinguish between
254
     *   the different causes
255
     *   the different causes
255
     */
256
     */
256
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
257
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
257
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
258
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
258
   
259
   
259
    /*
260
    /*
260
     * Map 'page' to 'frame'.
261
     * Map 'page' to 'frame'.
261
     * Note that TLB shootdown is not attempted as only new information is being
262
     * Note that TLB shootdown is not attempted as only new information is being
262
     * inserted into page tables.
263
     * inserted into page tables.
263
     */
264
     */
264
    page_mapping_insert(AS, page, frame, get_area_flags(area));
265
    page_mapping_insert(AS, page, frame, get_area_flags(area));
265
    page_table_unlock(AS, false);
266
    page_table_unlock(AS, false);
266
   
267
   
267
    spinlock_unlock(&area->lock);
268
    spinlock_unlock(&area->lock);
268
    spinlock_unlock(&AS->lock);
269
    spinlock_unlock(&AS->lock);
269
    return 1;
270
    return 1;
270
}
271
}
271
 
272
 
272
/** Switch address spaces.
273
/** Switch address spaces.
273
 *
274
 *
274
 * @param old Old address space or NULL.
275
 * @param old Old address space or NULL.
275
 * @param new New address space.
276
 * @param new New address space.
276
 */
277
 */
277
void as_switch(as_t *old, as_t *new)
278
void as_switch(as_t *old, as_t *new)
278
{
279
{
279
    ipl_t ipl;
280
    ipl_t ipl;
280
    bool needs_asid = false;
281
    bool needs_asid = false;
281
   
282
   
282
    ipl = interrupts_disable();
283
    ipl = interrupts_disable();
283
    spinlock_lock(&as_lock);
284
    spinlock_lock(&as_lock);
284
 
285
 
285
    /*
286
    /*
286
     * First, take care of the old address space.
287
     * First, take care of the old address space.
287
     */
288
     */
288
    if (old) {
289
    if (old) {
289
        spinlock_lock(&old->lock);
290
        spinlock_lock(&old->lock);
290
        ASSERT(old->refcount);
291
        ASSERT(old->refcount);
291
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
292
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
292
            /*
293
            /*
293
             * The old address space is no longer active on
294
             * The old address space is no longer active on
294
             * any processor. It can be appended to the
295
             * any processor. It can be appended to the
295
             * list of inactive address spaces with assigned
296
             * list of inactive address spaces with assigned
296
             * ASID.
297
             * ASID.
297
             */
298
             */
298
             ASSERT(old->asid != ASID_INVALID);
299
             ASSERT(old->asid != ASID_INVALID);
299
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
300
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
300
        }
301
        }
301
        spinlock_unlock(&old->lock);
302
        spinlock_unlock(&old->lock);
302
    }
303
    }
303
 
304
 
304
    /*
305
    /*
305
     * Second, prepare the new address space.
306
     * Second, prepare the new address space.
306
     */
307
     */
307
    spinlock_lock(&new->lock);
308
    spinlock_lock(&new->lock);
308
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
309
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
309
        if (new->asid != ASID_INVALID)
310
        if (new->asid != ASID_INVALID)
310
            list_remove(&new->inactive_as_with_asid_link);
311
            list_remove(&new->inactive_as_with_asid_link);
311
        else
312
        else
312
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
313
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
313
    }
314
    }
314
    SET_PTL0_ADDRESS(new->page_table);
315
    SET_PTL0_ADDRESS(new->page_table);
315
    spinlock_unlock(&new->lock);
316
    spinlock_unlock(&new->lock);
316
 
317
 
317
    if (needs_asid) {
318
    if (needs_asid) {
318
        /*
319
        /*
319
         * Allocation of new ASID was deferred
320
         * Allocation of new ASID was deferred
320
         * until now in order to avoid deadlock.
321
         * until now in order to avoid deadlock.
321
         */
322
         */
322
        asid_t asid;
323
        asid_t asid;
323
       
324
       
324
        asid = asid_get();
325
        asid = asid_get();
325
        spinlock_lock(&new->lock);
326
        spinlock_lock(&new->lock);
326
        new->asid = asid;
327
        new->asid = asid;
327
        spinlock_unlock(&new->lock);
328
        spinlock_unlock(&new->lock);
328
    }
329
    }
329
    spinlock_unlock(&as_lock);
330
    spinlock_unlock(&as_lock);
330
    interrupts_restore(ipl);
331
    interrupts_restore(ipl);
331
   
332
   
332
    /*
333
    /*
333
     * Perform architecture-specific steps.
334
     * Perform architecture-specific steps.
334
     * (e.g. write ASID to hardware register etc.)
335
     * (e.g. write ASID to hardware register etc.)
335
     */
336
     */
336
    as_install_arch(new);
337
    as_install_arch(new);
337
   
338
   
338
    AS = new;
339
    AS = new;
339
}
340
}
340
 
341
 
341
/** Compute flags for virtual address translation subsytem.
342
/** Compute flags for virtual address translation subsytem.
342
 *
343
 *
343
 * The address space area must be locked.
344
 * The address space area must be locked.
344
 * Interrupts must be disabled.
345
 * Interrupts must be disabled.
345
 *
346
 *
346
 * @param a Address space area.
347
 * @param a Address space area.
347
 *
348
 *
348
 * @return Flags to be used in page_mapping_insert().
349
 * @return Flags to be used in page_mapping_insert().
349
 */
350
 */
350
int get_area_flags(as_area_t *a)
351
int get_area_flags(as_area_t *a)
351
{
352
{
352
    int flags;
353
    int flags;
353
 
354
 
354
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
355
    flags = PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
355
   
356
   
356
    if (a->flags & AS_AREA_READ)
357
    if (a->flags & AS_AREA_READ)
357
        flags |= PAGE_READ;
358
        flags |= PAGE_READ;
358
       
359
       
359
    if (a->flags & AS_AREA_WRITE)
360
    if (a->flags & AS_AREA_WRITE)
360
        flags |= PAGE_WRITE;
361
        flags |= PAGE_WRITE;
361
   
362
   
362
    if (a->flags & AS_AREA_EXEC)
363
    if (a->flags & AS_AREA_EXEC)
363
        flags |= PAGE_EXEC;
364
        flags |= PAGE_EXEC;
364
   
365
   
365
    return flags;
366
    return flags;
366
}
367
}
367
 
368
 
368
/** Create page table.
369
/** Create page table.
369
 *
370
 *
370
 * Depending on architecture, create either address space
371
 * Depending on architecture, create either address space
371
 * private or global page table.
372
 * private or global page table.
372
 *
373
 *
373
 * @param flags Flags saying whether the page table is for kernel address space.
374
 * @param flags Flags saying whether the page table is for kernel address space.
374
 *
375
 *
375
 * @return First entry of the page table.
376
 * @return First entry of the page table.
376
 */
377
 */
377
pte_t *page_table_create(int flags)
378
pte_t *page_table_create(int flags)
378
{
379
{
379
        ASSERT(as_operations);
380
        ASSERT(as_operations);
380
        ASSERT(as_operations->page_table_create);
381
        ASSERT(as_operations->page_table_create);
381
 
382
 
382
        return as_operations->page_table_create(flags);
383
        return as_operations->page_table_create(flags);
383
}
384
}
384
 
385
 
385
/** Lock page table.
386
/** Lock page table.
386
 *
387
 *
387
 * This function should be called before any page_mapping_insert(),
388
 * This function should be called before any page_mapping_insert(),
388
 * page_mapping_remove() and page_mapping_find().
389
 * page_mapping_remove() and page_mapping_find().
389
 *
390
 *
390
 * Locking order is such that address space areas must be locked
391
 * Locking order is such that address space areas must be locked
391
 * prior to this call. Address space can be locked prior to this
392
 * prior to this call. Address space can be locked prior to this
392
 * call in which case the lock argument is false.
393
 * call in which case the lock argument is false.
393
 *
394
 *
394
 * @param as Address space.
395
 * @param as Address space.
395
 * @param as_locked If false, do not attempt to lock as->lock.
396
 * @param as_locked If false, do not attempt to lock as->lock.
396
 */
397
 */
397
void page_table_lock(as_t *as, bool lock)
398
void page_table_lock(as_t *as, bool lock)
398
{
399
{
399
    ASSERT(as_operations);
400
    ASSERT(as_operations);
400
    ASSERT(as_operations->page_table_lock);
401
    ASSERT(as_operations->page_table_lock);
401
 
402
 
402
    as_operations->page_table_lock(as, lock);
403
    as_operations->page_table_lock(as, lock);
403
}
404
}
404
 
405
 
405
/** Unlock page table.
406
/** Unlock page table.
406
 *
407
 *
407
 * @param as Address space.
408
 * @param as Address space.
408
 * @param as_locked If false, do not attempt to unlock as->lock.
409
 * @param as_locked If false, do not attempt to unlock as->lock.
409
 */
410
 */
410
void page_table_unlock(as_t *as, bool unlock)
411
void page_table_unlock(as_t *as, bool unlock)
411
{
412
{
412
    ASSERT(as_operations);
413
    ASSERT(as_operations);
413
    ASSERT(as_operations->page_table_unlock);
414
    ASSERT(as_operations->page_table_unlock);
414
 
415
 
415
    as_operations->page_table_unlock(as, unlock);
416
    as_operations->page_table_unlock(as, unlock);
416
}
417
}
417
 
418
 
418
/** Find address space area and change it.
419
/** Find address space area and change it.
419
 *
420
 *
420
 * @param as Address space.
421
 * @param as Address space.
421
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
422
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
422
 * @param size New size of the virtual memory block starting at address.
423
 * @param size New size of the virtual memory block starting at address.
423
 * @param flags Flags influencing the remap operation. Currently unused.
424
 * @param flags Flags influencing the remap operation. Currently unused.
424
 *
425
 *
425
 * @return address on success, (__address) -1 otherwise.
426
 * @return address on success, (__address) -1 otherwise.
426
 */
427
 */
427
__address as_remap(as_t *as, __address address, size_t size, int flags)
428
__address as_remap(as_t *as, __address address, size_t size, int flags)
428
{
429
{
429
    as_area_t *area = NULL;
430
    as_area_t *area = NULL;
430
    ipl_t ipl;
431
    ipl_t ipl;
431
    size_t pages;
432
    size_t pages;
432
   
433
   
433
    ipl = interrupts_disable();
434
    ipl = interrupts_disable();
434
    spinlock_lock(&as->lock);
435
    spinlock_lock(&as->lock);
435
   
436
   
436
    /*
437
    /*
437
     * Locate the area.
438
     * Locate the area.
438
     */
439
     */
439
    area = find_area_and_lock(as, address);
440
    area = find_area_and_lock(as, address);
440
    if (!area) {
441
    if (!area) {
441
        spinlock_unlock(&as->lock);
442
        spinlock_unlock(&as->lock);
442
        interrupts_restore(ipl);
443
        interrupts_restore(ipl);
443
        return (__address) -1;
444
        return (__address) -1;
444
    }
445
    }
445
 
446
 
446
    pages = SIZE2FRAMES((address - area->base) + size);
447
    pages = SIZE2FRAMES((address - area->base) + size);
447
    if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
448
    if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
448
        spinlock_unlock(&area->lock);
449
        spinlock_unlock(&area->lock);
449
        spinlock_unlock(&as->lock);    
450
        spinlock_unlock(&as->lock);    
450
        interrupts_restore(ipl);
451
        interrupts_restore(ipl);
451
        return (__address) -1;
452
        return (__address) -1;
452
    }
453
    }
453
 
454
 
454
    if (pages < area->pages) {
455
    if (pages < area->pages) {
455
        int i;
456
        int i;
456
 
457
 
457
        /*
458
        /*
458
         * Shrinking the area.
459
         * Shrinking the area.
459
         */
460
         */
460
        for (i = pages; i < area->pages; i++) {
461
        for (i = pages; i < area->pages; i++) {
461
            pte_t *pte;
462
            pte_t *pte;
462
           
463
           
463
            /*
464
            /*
464
             * Releasing physical memory.
465
             * Releasing physical memory.
465
             * This depends on the fact that the memory was allocated using frame_alloc().
466
             * This depends on the fact that the memory was allocated using frame_alloc().
466
             */
467
             */
467
            page_table_lock(as, false);
468
            page_table_lock(as, false);
468
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
469
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
469
            if (pte && PTE_VALID(pte)) {
470
            if (pte && PTE_VALID(pte)) {
470
                __address frame;
471
                __address frame;
471
 
472
 
472
                ASSERT(PTE_PRESENT(pte));
473
                ASSERT(PTE_PRESENT(pte));
473
                frame = PTE_GET_FRAME(pte);
474
                frame = PTE_GET_FRAME(pte);
474
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
475
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
475
                page_table_unlock(as, false);
476
                page_table_unlock(as, false);
476
 
477
 
477
                frame_free(ADDR2PFN(frame));
478
                frame_free(ADDR2PFN(frame));
478
            } else {
479
            } else {
479
                page_table_unlock(as, false);
480
                page_table_unlock(as, false);
480
            }
481
            }
481
        }
482
        }
482
        /*
483
        /*
483
         * Invalidate TLB's.
484
         * Invalidate TLB's.
484
         */
485
         */
485
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
486
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
486
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
487
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
487
        tlb_shootdown_finalize();
488
        tlb_shootdown_finalize();
488
    }
489
    }
489
 
490
 
490
    area->pages = pages;
491
    area->pages = pages;
491
   
492
   
492
    spinlock_unlock(&area->lock);
493
    spinlock_unlock(&area->lock);
493
    spinlock_unlock(&as->lock);
494
    spinlock_unlock(&as->lock);
494
    interrupts_restore(ipl);
495
    interrupts_restore(ipl);
495
 
496
 
496
    return address;
497
    return address;
497
}
498
}
498
 
499
 
499
/** Find address space area and lock it.
500
/** Find address space area and lock it.
500
 *
501
 *
501
 * The address space must be locked and interrupts must be disabled.
502
 * The address space must be locked and interrupts must be disabled.
502
 *
503
 *
503
 * @param as Address space.
504
 * @param as Address space.
504
 * @param va Virtual address.
505
 * @param va Virtual address.
505
 *
506
 *
506
 * @return Locked address space area containing va on success or NULL on failure.
507
 * @return Locked address space area containing va on success or NULL on failure.
507
 */
508
 */
508
as_area_t *find_area_and_lock(as_t *as, __address va)
509
as_area_t *find_area_and_lock(as_t *as, __address va)
509
{
510
{
510
    link_t *cur;
511
    link_t *cur;
511
    as_area_t *a;
512
    as_area_t *a;
512
   
513
   
513
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
514
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
514
        a = list_get_instance(cur, as_area_t, link);
515
        a = list_get_instance(cur, as_area_t, link);
515
        spinlock_lock(&a->lock);
516
        spinlock_lock(&a->lock);
516
 
517
 
517
        if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
518
        if ((va >= a->base) && (va < a->base + a->pages * PAGE_SIZE))
518
            return a;
519
            return a;
519
       
520
       
520
        spinlock_unlock(&a->lock);
521
        spinlock_unlock(&a->lock);
521
    }
522
    }
522
 
523
 
523
    return NULL;
524
    return NULL;
524
}
525
}
525
 
526
 
526
/** Check area conflicts with other areas.
527
/** Check area conflicts with other areas.
527
 *
528
 *
528
 * The address space must be locked and interrupts must be disabled.
529
 * The address space must be locked and interrupts must be disabled.
529
 *
530
 *
530
 * @param as Address space.
531
 * @param as Address space.
531
 * @param va Starting virtual address of the area being tested.
532
 * @param va Starting virtual address of the area being tested.
532
 * @param size Size of the area being tested.
533
 * @param size Size of the area being tested.
533
 * @param avoid_area Do not touch this area.
534
 * @param avoid_area Do not touch this area.
534
 *
535
 *
535
 * @return True if there is no conflict, false otherwise.
536
 * @return True if there is no conflict, false otherwise.
536
 */
537
 */
537
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
538
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
538
{
539
{
539
    link_t *cur;
540
    link_t *cur;
540
    as_area_t *a;
541
    as_area_t *a;
541
   
542
   
542
    /*
543
    /*
543
     * We don't want any area to have conflicts with NULL page.
544
     * We don't want any area to have conflicts with NULL page.
544
     */
545
     */
545
    if (overlaps(va, size, NULL, PAGE_SIZE))
546
    if (overlaps(va, size, NULL, PAGE_SIZE))
546
        return false;
547
        return false;
547
   
548
   
548
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
549
    for (cur = as->as_area_head.next; cur != &as->as_area_head; cur = cur->next) {
549
        __address a_start;
550
        __address a_start;
550
        size_t a_size;
551
        size_t a_size;
551
   
552
   
552
        a = list_get_instance(cur, as_area_t, link);
553
        a = list_get_instance(cur, as_area_t, link);
553
        if (a == avoid_area)
554
        if (a == avoid_area)
554
            continue;
555
            continue;
555
           
556
           
556
        spinlock_lock(&a->lock);
557
        spinlock_lock(&a->lock);
557
 
558
 
558
        a_start = a->base;
559
        a_start = a->base;
559
        a_size = a->pages * PAGE_SIZE;
560
        a_size = a->pages * PAGE_SIZE;
560
 
561
 
561
        spinlock_unlock(&a->lock);
562
        spinlock_unlock(&a->lock);
562
 
563
 
563
        if (overlaps(va, size, a_start, a_size))
564
        if (overlaps(va, size, a_start, a_size))
564
            return false;      
565
            return false;      
565
 
566
 
566
    }
567
    }
567
 
568
 
568
    /*
569
    /*
569
     * So far, the area does not conflict with other areas.
570
     * So far, the area does not conflict with other areas.
570
     * Check if it doesn't conflict with kernel address space.
571
     * Check if it doesn't conflict with kernel address space.
571
     */  
572
     */  
572
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
573
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
573
        return !overlaps(va, size,
574
        return !overlaps(va, size,
574
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
575
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
575
    }
576
    }
576
 
577
 
577
    return true;
578
    return true;
578
}
579
}
579
 
580