Subversion Repositories HelenOS-historic

Rev

Rev 1233 | Rev 1236 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1233 Rev 1235
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/*
29
/*
30
 * This file contains address space manipulation functions.
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
33
 */
34
 
34
 
35
#include <mm/as.h>
35
#include <mm/as.h>
36
#include <arch/mm/as.h>
36
#include <arch/mm/as.h>
37
#include <mm/page.h>
37
#include <mm/page.h>
38
#include <mm/frame.h>
38
#include <mm/frame.h>
39
#include <mm/slab.h>
39
#include <mm/slab.h>
40
#include <mm/tlb.h>
40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
42
#include <genarch/mm/page_pt.h>
43
#include <genarch/mm/page_ht.h>
43
#include <genarch/mm/page_ht.h>
44
#include <mm/asid.h>
44
#include <mm/asid.h>
45
#include <arch/mm/asid.h>
45
#include <arch/mm/asid.h>
46
#include <arch/types.h>
-
 
47
#include <typedefs.h>
-
 
48
#include <synch/spinlock.h>
46
#include <synch/spinlock.h>
49
#include <config.h>
-
 
50
#include <adt/list.h>
47
#include <adt/list.h>
51
#include <adt/btree.h>
48
#include <adt/btree.h>
52
#include <panic.h>
49
#include <proc/task.h>
53
#include <arch/asm.h>
50
#include <arch/asm.h>
-
 
51
#include <panic.h>
54
#include <debug.h>
52
#include <debug.h>
-
 
53
#include <print.h>
55
#include <memstr.h>
54
#include <memstr.h>
56
#include <macros.h>
55
#include <macros.h>
57
#include <arch.h>
56
#include <arch.h>
58
#include <print.h>
57
#include <errno.h>
-
 
58
#include <config.h>
-
 
59
#include <arch/types.h>
-
 
60
#include <typedefs.h>
59
 
61
 
60
as_operations_t *as_operations = NULL;
62
as_operations_t *as_operations = NULL;
61
 
63
 
62
/** Address space lock. It protects inactive_as_with_asid_head. */
64
/** Address space lock. It protects inactive_as_with_asid_head. */
63
SPINLOCK_INITIALIZE(as_lock);
65
SPINLOCK_INITIALIZE(as_lock);
64
 
66
 
65
/**
67
/**
66
 * This list contains address spaces that are not active on any
68
 * This list contains address spaces that are not active on any
67
 * processor and that have valid ASID.
69
 * processor and that have valid ASID.
68
 */
70
 */
69
LIST_INITIALIZE(inactive_as_with_asid_head);
71
LIST_INITIALIZE(inactive_as_with_asid_head);
70
 
72
 
71
/** Kernel address space. */
73
/** Kernel address space. */
72
as_t *AS_KERNEL = NULL;
74
as_t *AS_KERNEL = NULL;
73
 
75
 
-
 
76
static int area_flags_to_page_flags(int aflags);
74
static int get_area_flags(as_area_t *a);
77
static int get_area_flags(as_area_t *a);
75
static as_area_t *find_area_and_lock(as_t *as, __address va);
78
static as_area_t *find_area_and_lock(as_t *as, __address va);
76
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
79
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
77
 
80
 
78
/** Initialize address space subsystem. */
81
/** Initialize address space subsystem. */
79
void as_init(void)
82
void as_init(void)
80
{
83
{
81
    as_arch_init();
84
    as_arch_init();
82
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
85
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
83
        if (!AS_KERNEL)
86
        if (!AS_KERNEL)
84
                panic("can't create kernel address space\n");
87
                panic("can't create kernel address space\n");
85
}
88
}
86
 
89
 
87
/** Create address space.
90
/** Create address space.
88
 *
91
 *
89
 * @param flags Flags that influence way in wich the address space is created.
92
 * @param flags Flags that influence way in wich the address space is created.
90
 */
93
 */
91
as_t *as_create(int flags)
94
as_t *as_create(int flags)
92
{
95
{
93
    as_t *as;
96
    as_t *as;
94
 
97
 
95
    as = (as_t *) malloc(sizeof(as_t), 0);
98
    as = (as_t *) malloc(sizeof(as_t), 0);
96
    link_initialize(&as->inactive_as_with_asid_link);
99
    link_initialize(&as->inactive_as_with_asid_link);
97
    spinlock_initialize(&as->lock, "as_lock");
100
    spinlock_initialize(&as->lock, "as_lock");
98
    btree_create(&as->as_area_btree);
101
    btree_create(&as->as_area_btree);
99
   
102
   
100
    if (flags & FLAG_AS_KERNEL)
103
    if (flags & FLAG_AS_KERNEL)
101
        as->asid = ASID_KERNEL;
104
        as->asid = ASID_KERNEL;
102
    else
105
    else
103
        as->asid = ASID_INVALID;
106
        as->asid = ASID_INVALID;
104
   
107
   
105
    as->refcount = 0;
108
    as->refcount = 0;
106
    as->page_table = page_table_create(flags);
109
    as->page_table = page_table_create(flags);
107
 
110
 
108
    return as;
111
    return as;
109
}
112
}
110
 
113
 
111
/** Free Adress space */
114
/** Free Adress space */
112
void as_free(as_t *as)
115
void as_free(as_t *as)
113
{
116
{
114
    ASSERT(as->refcount == 0);
117
    ASSERT(as->refcount == 0);
115
 
118
 
116
    /* TODO: free as_areas and other resources held by as */
119
    /* TODO: free as_areas and other resources held by as */
117
    /* TODO: free page table */
120
    /* TODO: free page table */
118
    free(as);
121
    free(as);
119
}
122
}
120
 
123
 
121
/** Create address space area of common attributes.
124
/** Create address space area of common attributes.
122
 *
125
 *
123
 * The created address space area is added to the target address space.
126
 * The created address space area is added to the target address space.
124
 *
127
 *
125
 * @param as Target address space.
128
 * @param as Target address space.
126
 * @param flags Flags of the area.
129
 * @param flags Flags of the area.
127
 * @param size Size of area.
130
 * @param size Size of area.
128
 * @param base Base address of area.
131
 * @param base Base address of area.
129
 *
132
 *
130
 * @return Address space area on success or NULL on failure.
133
 * @return Address space area on success or NULL on failure.
131
 */
134
 */
132
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
135
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
133
{
136
{
134
    ipl_t ipl;
137
    ipl_t ipl;
135
    as_area_t *a;
138
    as_area_t *a;
136
   
139
   
137
    if (base % PAGE_SIZE)
140
    if (base % PAGE_SIZE)
138
        return NULL;
141
        return NULL;
139
 
142
 
140
    if (!size)
143
    if (!size)
141
        return NULL;
144
        return NULL;
142
 
145
 
143
    /* Writeable executable areas are not supported. */
146
    /* Writeable executable areas are not supported. */
144
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
147
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
145
        return NULL;
148
        return NULL;
146
   
149
   
147
    ipl = interrupts_disable();
150
    ipl = interrupts_disable();
148
    spinlock_lock(&as->lock);
151
    spinlock_lock(&as->lock);
149
   
152
   
150
    if (!check_area_conflicts(as, base, size, NULL)) {
153
    if (!check_area_conflicts(as, base, size, NULL)) {
151
        spinlock_unlock(&as->lock);
154
        spinlock_unlock(&as->lock);
152
        interrupts_restore(ipl);
155
        interrupts_restore(ipl);
153
        return NULL;
156
        return NULL;
154
    }
157
    }
155
   
158
   
156
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
159
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
157
 
160
 
158
    spinlock_initialize(&a->lock, "as_area_lock");
161
    spinlock_initialize(&a->lock, "as_area_lock");
159
   
162
   
160
    a->flags = flags;
163
    a->flags = flags;
161
    a->pages = SIZE2FRAMES(size);
164
    a->pages = SIZE2FRAMES(size);
162
    a->base = base;
165
    a->base = base;
163
   
166
   
164
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
167
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
165
 
168
 
166
    spinlock_unlock(&as->lock);
169
    spinlock_unlock(&as->lock);
167
    interrupts_restore(ipl);
170
    interrupts_restore(ipl);
168
 
171
 
169
    return a;
172
    return a;
170
}
173
}
171
 
174
 
-
 
175
/** Find address space area and change it.
-
 
176
 *
-
 
177
 * @param as Address space.
-
 
178
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
-
 
179
 * @param size New size of the virtual memory block starting at address.
-
 
180
 * @param flags Flags influencing the remap operation. Currently unused.
-
 
181
 *
-
 
182
 * @return address on success, (__address) -1 otherwise.
-
 
183
 */
-
 
184
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
-
 
185
{
-
 
186
    as_area_t *area = NULL;
-
 
187
    ipl_t ipl;
-
 
188
    size_t pages;
-
 
189
   
-
 
190
    ipl = interrupts_disable();
-
 
191
    spinlock_lock(&as->lock);
-
 
192
   
-
 
193
    /*
-
 
194
     * Locate the area.
-
 
195
     */
-
 
196
    area = find_area_and_lock(as, address);
-
 
197
    if (!area) {
-
 
198
        spinlock_unlock(&as->lock);
-
 
199
        interrupts_restore(ipl);
-
 
200
        return (__address) -1;
-
 
201
    }
-
 
202
 
-
 
203
    if (area->flags & AS_AREA_DEVICE) {
-
 
204
        /*
-
 
205
         * Remapping of address space areas associated
-
 
206
         * with memory mapped devices is not supported.
-
 
207
         */
-
 
208
        spinlock_unlock(&area->lock);
-
 
209
        spinlock_unlock(&as->lock);
-
 
210
        interrupts_restore(ipl);
-
 
211
        return (__address) -1;
-
 
212
    }
-
 
213
 
-
 
214
    pages = SIZE2FRAMES((address - area->base) + size);
-
 
215
    if (!pages) {
-
 
216
        /*
-
 
217
         * Zero size address space areas are not allowed.
-
 
218
         */
-
 
219
        spinlock_unlock(&area->lock);
-
 
220
        spinlock_unlock(&as->lock);
-
 
221
        interrupts_restore(ipl);
-
 
222
        return (__address) -1;
-
 
223
    }
-
 
224
   
-
 
225
    if (pages < area->pages) {
-
 
226
        int i;
-
 
227
 
-
 
228
        /*
-
 
229
         * Shrinking the area.
-
 
230
         * No need to check for overlaps.
-
 
231
         */
-
 
232
        for (i = pages; i < area->pages; i++) {
-
 
233
            pte_t *pte;
-
 
234
           
-
 
235
            /*
-
 
236
             * Releasing physical memory.
-
 
237
             * This depends on the fact that the memory was allocated using frame_alloc().
-
 
238
             */
-
 
239
            page_table_lock(as, false);
-
 
240
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
-
 
241
            if (pte && PTE_VALID(pte)) {
-
 
242
                __address frame;
-
 
243
 
-
 
244
                ASSERT(PTE_PRESENT(pte));
-
 
245
                frame = PTE_GET_FRAME(pte);
-
 
246
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
-
 
247
                page_table_unlock(as, false);
-
 
248
 
-
 
249
                frame_free(ADDR2PFN(frame));
-
 
250
            } else {
-
 
251
                page_table_unlock(as, false);
-
 
252
            }
-
 
253
        }
-
 
254
        /*
-
 
255
         * Invalidate TLB's.
-
 
256
         */
-
 
257
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
258
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
259
        tlb_shootdown_finalize();
-
 
260
    } else {
-
 
261
        /*
-
 
262
         * Growing the area.
-
 
263
         * Check for overlaps with other address space areas.
-
 
264
         */
-
 
265
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
266
            spinlock_unlock(&area->lock);
-
 
267
            spinlock_unlock(&as->lock);    
-
 
268
            interrupts_restore(ipl);
-
 
269
            return (__address) -1;
-
 
270
        }
-
 
271
    }
-
 
272
 
-
 
273
    area->pages = pages;
-
 
274
   
-
 
275
    spinlock_unlock(&area->lock);
-
 
276
    spinlock_unlock(&as->lock);
-
 
277
    interrupts_restore(ipl);
-
 
278
 
-
 
279
    return address;
-
 
280
}
-
 
281
 
-
 
282
/** Send address space area to another task.
-
 
283
 *
-
 
284
 * Address space area is sent to the specified task.
-
 
285
 * If the destination task is willing to accept the
-
 
286
 * area, a new area is created according to the
-
 
287
 * source area. Moreover, any existing mapping
-
 
288
 * is copied as well, providing thus a mechanism
-
 
289
 * for sharing group of pages. The source address
-
 
290
 * space area and any associated mapping is preserved.
-
 
291
 *
-
 
292
 * @param id Task ID of the accepting task.
-
 
293
 * @param base Base address of the source address space area.
-
 
294
 * @param size Size of the source address space area.
-
 
295
 * @param flags Flags of the source address space area.
-
 
296
 *
-
 
297
 * @return 0 on success or ENOENT if there is no such task or
-
 
298
 *     if there is no such address space area,
-
 
299
 *     EPERM if there was a problem in accepting the area or
-
 
300
 *     ENOMEM if there was a problem in allocating destination
-
 
301
 *     address space area.
-
 
302
 */
-
 
303
int as_area_send(task_id_t id, __address base, size_t size, int flags)
-
 
304
{
-
 
305
    ipl_t ipl;
-
 
306
    task_t *t;
-
 
307
    count_t i;
-
 
308
    as_t *as;
-
 
309
    __address dst_base;
-
 
310
   
-
 
311
    ipl = interrupts_disable();
-
 
312
    spinlock_lock(&tasks_lock);
-
 
313
   
-
 
314
    t = task_find_by_id(id);
-
 
315
    if (!NULL) {
-
 
316
        spinlock_unlock(&tasks_lock);
-
 
317
        interrupts_restore(ipl);
-
 
318
        return ENOENT;
-
 
319
    }
-
 
320
 
-
 
321
    spinlock_lock(&t->lock);
-
 
322
    spinlock_unlock(&tasks_lock);
-
 
323
 
-
 
324
    as = t->as;
-
 
325
    dst_base = (__address) t->accept_arg.base;
-
 
326
   
-
 
327
    if (as == AS) {
-
 
328
        /*
-
 
329
         * The two tasks share the entire address space.
-
 
330
         * Return error since there is no point in continuing.
-
 
331
         */
-
 
332
        spinlock_unlock(&t->lock);
-
 
333
        interrupts_restore(ipl);
-
 
334
        return EPERM;
-
 
335
    }
-
 
336
 
-
 
337
    if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) ||
-
 
338
        (t->accept_arg.flags != flags)) {
-
 
339
        /*
-
 
340
         * Discrepancy in either task ID, size or flags.
-
 
341
         */
-
 
342
        spinlock_unlock(&t->lock);
-
 
343
        interrupts_restore(ipl);
-
 
344
        return EPERM;
-
 
345
    }
-
 
346
   
-
 
347
    /*
-
 
348
     * Create copy of the address space area.
-
 
349
     */
-
 
350
    if (!as_area_create(as, flags, size, dst_base)) {
-
 
351
        /*
-
 
352
         * Destination address space area could not be created.
-
 
353
         */
-
 
354
        spinlock_unlock(&t->lock);
-
 
355
        interrupts_restore(ipl);
-
 
356
        return ENOMEM;
-
 
357
    }
-
 
358
   
-
 
359
    /*
-
 
360
     * NOTE: we have just introduced a race condition.
-
 
361
     * The destination task can try to attempt the newly
-
 
362
     * created area before its mapping is copied from
-
 
363
     * the source address space area. In result, frames
-
 
364
     * can get lost.
-
 
365
     *
-
 
366
     * Currently, this race is not solved, but one of the
-
 
367
     * possible solutions would be to sleep in as_page_fault()
-
 
368
     * when this situation is detected.
-
 
369
     */
-
 
370
 
-
 
371
    memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
-
 
372
    spinlock_unlock(&t->lock);
-
 
373
   
-
 
374
    /*
-
 
375
     * Avoid deadlock by first locking the address space with lower address.
-
 
376
     */
-
 
377
    if (as < AS) {
-
 
378
        spinlock_lock(&as->lock);
-
 
379
        spinlock_lock(&AS->lock);
-
 
380
    } else {
-
 
381
        spinlock_lock(&AS->lock);
-
 
382
        spinlock_lock(&as->lock);
-
 
383
    }
-
 
384
   
-
 
385
    for (i = 0; i < SIZE2FRAMES(size); i++) {
-
 
386
        pte_t *pte;
-
 
387
        __address frame;
-
 
388
           
-
 
389
        page_table_lock(AS, false);
-
 
390
        pte = page_mapping_find(AS, base + i*PAGE_SIZE);
-
 
391
        if (pte && PTE_VALID(pte)) {
-
 
392
            ASSERT(PTE_PRESENT(pte));
-
 
393
            frame = PTE_GET_FRAME(pte);
-
 
394
            if (!(flags & AS_AREA_DEVICE)) {
-
 
395
                /* TODO: increment frame reference count */
-
 
396
            }
-
 
397
            page_table_unlock(AS, false);
-
 
398
        } else {
-
 
399
            page_table_unlock(AS, false);
-
 
400
            continue;
-
 
401
        }
-
 
402
       
-
 
403
        page_table_lock(as, false);
-
 
404
        page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags));
-
 
405
        page_table_unlock(as, false);
-
 
406
    }
-
 
407
   
-
 
408
    spinlock_unlock(&AS->lock);
-
 
409
    spinlock_unlock(&as->lock);
-
 
410
    interrupts_restore(ipl);
-
 
411
   
-
 
412
    return 0;
-
 
413
}
-
 
414
 
172
/** Initialize mapping for one page of address space.
415
/** Initialize mapping for one page of address space.
173
 *
416
 *
174
 * This functions maps 'page' to 'frame' according
417
 * This functions maps 'page' to 'frame' according
175
 * to attributes of the address space area to
418
 * to attributes of the address space area to
176
 * wich 'page' belongs.
419
 * wich 'page' belongs.
177
 *
420
 *
178
 * @param as Target address space.
421
 * @param as Target address space.
179
 * @param page Virtual page within the area.
422
 * @param page Virtual page within the area.
180
 * @param frame Physical frame to which page will be mapped.
423
 * @param frame Physical frame to which page will be mapped.
181
 */
424
 */
182
void as_set_mapping(as_t *as, __address page, __address frame)
425
void as_set_mapping(as_t *as, __address page, __address frame)
183
{
426
{
184
    as_area_t *area;
427
    as_area_t *area;
185
    ipl_t ipl;
428
    ipl_t ipl;
186
   
429
   
187
    ipl = interrupts_disable();
430
    ipl = interrupts_disable();
188
    page_table_lock(as, true);
431
    page_table_lock(as, true);
189
   
432
   
190
    area = find_area_and_lock(as, page);
433
    area = find_area_and_lock(as, page);
191
    if (!area) {
434
    if (!area) {
192
        panic("page not part of any as_area\n");
435
        panic("page not part of any as_area\n");
193
    }
436
    }
194
 
437
 
195
    page_mapping_insert(as, page, frame, get_area_flags(area));
438
    page_mapping_insert(as, page, frame, get_area_flags(area));
196
   
439
   
197
    spinlock_unlock(&area->lock);
440
    spinlock_unlock(&area->lock);
198
    page_table_unlock(as, true);
441
    page_table_unlock(as, true);
199
    interrupts_restore(ipl);
442
    interrupts_restore(ipl);
200
}
443
}
201
 
444
 
202
/** Handle page fault within the current address space.
445
/** Handle page fault within the current address space.
203
 *
446
 *
204
 * This is the high-level page fault handler.
447
 * This is the high-level page fault handler.
205
 * Interrupts are assumed disabled.
448
 * Interrupts are assumed disabled.
206
 *
449
 *
207
 * @param page Faulting page.
450
 * @param page Faulting page.
208
 *
451
 *
209
 * @return 0 on page fault, 1 on success.
452
 * @return 0 on page fault, 1 on success.
210
 */
453
 */
211
int as_page_fault(__address page)
454
int as_page_fault(__address page)
212
{
455
{
213
    pte_t *pte;
456
    pte_t *pte;
214
    as_area_t *area;
457
    as_area_t *area;
215
    __address frame;
458
    __address frame;
216
   
459
   
217
    ASSERT(AS);
460
    ASSERT(AS);
218
 
461
 
219
    spinlock_lock(&AS->lock);
462
    spinlock_lock(&AS->lock);
220
    area = find_area_and_lock(AS, page);   
463
    area = find_area_and_lock(AS, page);   
221
    if (!area) {
464
    if (!area) {
222
        /*
465
        /*
223
         * No area contained mapping for 'page'.
466
         * No area contained mapping for 'page'.
224
         * Signal page fault to low-level handler.
467
         * Signal page fault to low-level handler.
225
         */
468
         */
226
        spinlock_unlock(&AS->lock);
469
        spinlock_unlock(&AS->lock);
227
        return 0;
470
        return 0;
228
    }
471
    }
229
 
472
 
230
    ASSERT(!(area->flags & AS_AREA_DEVICE));
473
    ASSERT(!(area->flags & AS_AREA_DEVICE));
231
 
474
 
232
    page_table_lock(AS, false);
475
    page_table_lock(AS, false);
233
   
476
   
234
    /*
477
    /*
235
     * To avoid race condition between two page faults
478
     * To avoid race condition between two page faults
236
     * on the same address, we need to make sure
479
     * on the same address, we need to make sure
237
     * the mapping has not been already inserted.
480
     * the mapping has not been already inserted.
238
     */
481
     */
239
    if ((pte = page_mapping_find(AS, page))) {
482
    if ((pte = page_mapping_find(AS, page))) {
240
        if (PTE_PRESENT(pte)) {
483
        if (PTE_PRESENT(pte)) {
241
            page_table_unlock(AS, false);
484
            page_table_unlock(AS, false);
242
            spinlock_unlock(&area->lock);
485
            spinlock_unlock(&area->lock);
243
            spinlock_unlock(&AS->lock);
486
            spinlock_unlock(&AS->lock);
244
            return 1;
487
            return 1;
245
        }
488
        }
246
    }
489
    }
247
 
490
 
248
    /*
491
    /*
249
     * In general, there can be several reasons that
492
     * In general, there can be several reasons that
250
     * can have caused this fault.
493
     * can have caused this fault.
251
     *
494
     *
252
     * - non-existent mapping: the area is a scratch
495
     * - non-existent mapping: the area is a scratch
253
     *   area (e.g. stack) and so far has not been
496
     *   area (e.g. stack) and so far has not been
254
     *   allocated a frame for the faulting page
497
     *   allocated a frame for the faulting page
255
     *
498
     *
256
     * - non-present mapping: another possibility,
499
     * - non-present mapping: another possibility,
257
     *   currently not implemented, would be frame
500
     *   currently not implemented, would be frame
258
     *   reuse; when this becomes a possibility,
501
     *   reuse; when this becomes a possibility,
259
     *   do not forget to distinguish between
502
     *   do not forget to distinguish between
260
     *   the different causes
503
     *   the different causes
261
     */
504
     */
262
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
505
    frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
263
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
506
    memsetb(PA2KA(frame), FRAME_SIZE, 0);
264
   
507
   
265
    /*
508
    /*
266
     * Map 'page' to 'frame'.
509
     * Map 'page' to 'frame'.
267
     * Note that TLB shootdown is not attempted as only new information is being
510
     * Note that TLB shootdown is not attempted as only new information is being
268
     * inserted into page tables.
511
     * inserted into page tables.
269
     */
512
     */
270
    page_mapping_insert(AS, page, frame, get_area_flags(area));
513
    page_mapping_insert(AS, page, frame, get_area_flags(area));
271
    page_table_unlock(AS, false);
514
    page_table_unlock(AS, false);
272
   
515
   
273
    spinlock_unlock(&area->lock);
516
    spinlock_unlock(&area->lock);
274
    spinlock_unlock(&AS->lock);
517
    spinlock_unlock(&AS->lock);
275
    return 1;
518
    return 1;
276
}
519
}
277
 
520
 
278
/** Switch address spaces.
521
/** Switch address spaces.
279
 *
522
 *
280
 * @param old Old address space or NULL.
523
 * @param old Old address space or NULL.
281
 * @param new New address space.
524
 * @param new New address space.
282
 */
525
 */
283
void as_switch(as_t *old, as_t *new)
526
void as_switch(as_t *old, as_t *new)
284
{
527
{
285
    ipl_t ipl;
528
    ipl_t ipl;
286
    bool needs_asid = false;
529
    bool needs_asid = false;
287
   
530
   
288
    ipl = interrupts_disable();
531
    ipl = interrupts_disable();
289
    spinlock_lock(&as_lock);
532
    spinlock_lock(&as_lock);
290
 
533
 
291
    /*
534
    /*
292
     * First, take care of the old address space.
535
     * First, take care of the old address space.
293
     */
536
     */
294
    if (old) {
537
    if (old) {
295
        spinlock_lock(&old->lock);
538
        spinlock_lock(&old->lock);
296
        ASSERT(old->refcount);
539
        ASSERT(old->refcount);
297
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
540
        if((--old->refcount == 0) && (old != AS_KERNEL)) {
298
            /*
541
            /*
299
             * The old address space is no longer active on
542
             * The old address space is no longer active on
300
             * any processor. It can be appended to the
543
             * any processor. It can be appended to the
301
             * list of inactive address spaces with assigned
544
             * list of inactive address spaces with assigned
302
             * ASID.
545
             * ASID.
303
             */
546
             */
304
             ASSERT(old->asid != ASID_INVALID);
547
             ASSERT(old->asid != ASID_INVALID);
305
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
548
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
306
        }
549
        }
307
        spinlock_unlock(&old->lock);
550
        spinlock_unlock(&old->lock);
308
    }
551
    }
309
 
552
 
310
    /*
553
    /*
311
     * Second, prepare the new address space.
554
     * Second, prepare the new address space.
312
     */
555
     */
313
    spinlock_lock(&new->lock);
556
    spinlock_lock(&new->lock);
314
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
557
    if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
315
        if (new->asid != ASID_INVALID)
558
        if (new->asid != ASID_INVALID)
316
            list_remove(&new->inactive_as_with_asid_link);
559
            list_remove(&new->inactive_as_with_asid_link);
317
        else
560
        else
318
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
561
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
319
    }
562
    }
320
    SET_PTL0_ADDRESS(new->page_table);
563
    SET_PTL0_ADDRESS(new->page_table);
321
    spinlock_unlock(&new->lock);
564
    spinlock_unlock(&new->lock);
322
 
565
 
323
    if (needs_asid) {
566
    if (needs_asid) {
324
        /*
567
        /*
325
         * Allocation of new ASID was deferred
568
         * Allocation of new ASID was deferred
326
         * until now in order to avoid deadlock.
569
         * until now in order to avoid deadlock.
327
         */
570
         */
328
        asid_t asid;
571
        asid_t asid;
329
       
572
       
330
        asid = asid_get();
573
        asid = asid_get();
331
        spinlock_lock(&new->lock);
574
        spinlock_lock(&new->lock);
332
        new->asid = asid;
575
        new->asid = asid;
333
        spinlock_unlock(&new->lock);
576
        spinlock_unlock(&new->lock);
334
    }
577
    }
335
    spinlock_unlock(&as_lock);
578
    spinlock_unlock(&as_lock);
336
    interrupts_restore(ipl);
579
    interrupts_restore(ipl);
337
   
580
   
338
    /*
581
    /*
339
     * Perform architecture-specific steps.
582
     * Perform architecture-specific steps.
340
     * (e.g. write ASID to hardware register etc.)
583
     * (e.g. write ASID to hardware register etc.)
341
     */
584
     */
342
    as_install_arch(new);
585
    as_install_arch(new);
343
   
586
   
344
    AS = new;
587
    AS = new;
345
}
588
}
346
 
589
 
347
/** Compute flags for virtual address translation subsytem.
590
/** Convert address space area flags to page flags.
348
 *
591
 *
349
 * The address space area must be locked.
-
 
350
 * Interrupts must be disabled.
-
 
351
 *
-
 
352
 * @param a Address space area.
592
 * @param aflags Flags of some address space area.
353
 *
593
 *
354
 * @return Flags to be used in page_mapping_insert().
594
 * @return Flags to be passed to page_mapping_insert().
355
 */
595
 */
356
int get_area_flags(as_area_t *a)
596
int area_flags_to_page_flags(int aflags)
357
{
597
{
358
    int flags;
598
    int flags;
359
 
599
 
360
    flags = PAGE_USER | PAGE_PRESENT;
600
    flags = PAGE_USER | PAGE_PRESENT;
361
   
601
   
362
    if (a->flags & AS_AREA_READ)
602
    if (aflags & AS_AREA_READ)
363
        flags |= PAGE_READ;
603
        flags |= PAGE_READ;
364
       
604
       
365
    if (a->flags & AS_AREA_WRITE)
605
    if (aflags & AS_AREA_WRITE)
366
        flags |= PAGE_WRITE;
606
        flags |= PAGE_WRITE;
367
   
607
   
368
    if (a->flags & AS_AREA_EXEC)
608
    if (aflags & AS_AREA_EXEC)
369
        flags |= PAGE_EXEC;
609
        flags |= PAGE_EXEC;
370
   
610
   
371
    if (!(a->flags & AS_AREA_DEVICE))
611
    if (!(aflags & AS_AREA_DEVICE))
372
        flags |= PAGE_CACHEABLE;
612
        flags |= PAGE_CACHEABLE;
373
       
613
       
374
    return flags;
614
    return flags;
375
}
615
}
376
 
616
 
-
 
617
/** Compute flags for virtual address translation subsytem.
-
 
618
 *
-
 
619
 * The address space area must be locked.
-
 
620
 * Interrupts must be disabled.
-
 
621
 *
-
 
622
 * @param a Address space area.
-
 
623
 *
-
 
624
 * @return Flags to be used in page_mapping_insert().
-
 
625
 */
-
 
626
int get_area_flags(as_area_t *a)
-
 
627
{
-
 
628
    return area_flags_to_page_flags(a->flags);
-
 
629
}
-
 
630
 
377
/** Create page table.
631
/** Create page table.
378
 *
632
 *
379
 * Depending on architecture, create either address space
633
 * Depending on architecture, create either address space
380
 * private or global page table.
634
 * private or global page table.
381
 *
635
 *
382
 * @param flags Flags saying whether the page table is for kernel address space.
636
 * @param flags Flags saying whether the page table is for kernel address space.
383
 *
637
 *
384
 * @return First entry of the page table.
638
 * @return First entry of the page table.
385
 */
639
 */
386
pte_t *page_table_create(int flags)
640
pte_t *page_table_create(int flags)
387
{
641
{
388
        ASSERT(as_operations);
642
        ASSERT(as_operations);
389
        ASSERT(as_operations->page_table_create);
643
        ASSERT(as_operations->page_table_create);
390
 
644
 
391
        return as_operations->page_table_create(flags);
645
        return as_operations->page_table_create(flags);
392
}
646
}
393
 
647
 
394
/** Lock page table.
648
/** Lock page table.
395
 *
649
 *
396
 * This function should be called before any page_mapping_insert(),
650
 * This function should be called before any page_mapping_insert(),
397
 * page_mapping_remove() and page_mapping_find().
651
 * page_mapping_remove() and page_mapping_find().
398
 *
652
 *
399
 * Locking order is such that address space areas must be locked
653
 * Locking order is such that address space areas must be locked
400
 * prior to this call. Address space can be locked prior to this
654
 * prior to this call. Address space can be locked prior to this
401
 * call in which case the lock argument is false.
655
 * call in which case the lock argument is false.
402
 *
656
 *
403
 * @param as Address space.
657
 * @param as Address space.
404
 * @param as_locked If false, do not attempt to lock as->lock.
658
 * @param as_locked If false, do not attempt to lock as->lock.
405
 */
659
 */
406
void page_table_lock(as_t *as, bool lock)
660
void page_table_lock(as_t *as, bool lock)
407
{
661
{
408
    ASSERT(as_operations);
662
    ASSERT(as_operations);
409
    ASSERT(as_operations->page_table_lock);
663
    ASSERT(as_operations->page_table_lock);
410
 
664
 
411
    as_operations->page_table_lock(as, lock);
665
    as_operations->page_table_lock(as, lock);
412
}
666
}
413
 
667
 
414
/** Unlock page table.
668
/** Unlock page table.
415
 *
669
 *
416
 * @param as Address space.
670
 * @param as Address space.
417
 * @param as_locked If false, do not attempt to unlock as->lock.
671
 * @param as_locked If false, do not attempt to unlock as->lock.
418
 */
672
 */
419
void page_table_unlock(as_t *as, bool unlock)
673
void page_table_unlock(as_t *as, bool unlock)
420
{
674
{
421
    ASSERT(as_operations);
675
    ASSERT(as_operations);
422
    ASSERT(as_operations->page_table_unlock);
676
    ASSERT(as_operations->page_table_unlock);
423
 
677
 
424
    as_operations->page_table_unlock(as, unlock);
678
    as_operations->page_table_unlock(as, unlock);
425
}
679
}
426
 
680
 
427
/** Find address space area and change it.
-
 
428
 *
-
 
429
 * @param as Address space.
-
 
430
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
-
 
431
 * @param size New size of the virtual memory block starting at address.
-
 
432
 * @param flags Flags influencing the remap operation. Currently unused.
-
 
433
 *
-
 
434
 * @return address on success, (__address) -1 otherwise.
-
 
435
 */
-
 
436
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
-
 
437
{
-
 
438
    as_area_t *area = NULL;
-
 
439
    ipl_t ipl;
-
 
440
    size_t pages;
-
 
441
   
-
 
442
    ipl = interrupts_disable();
-
 
443
    spinlock_lock(&as->lock);
-
 
444
   
-
 
445
    /*
-
 
446
     * Locate the area.
-
 
447
     */
-
 
448
    area = find_area_and_lock(as, address);
-
 
449
    if (!area) {
-
 
450
        spinlock_unlock(&as->lock);
-
 
451
        interrupts_restore(ipl);
-
 
452
        return (__address) -1;
-
 
453
    }
-
 
454
 
-
 
455
    if (area->flags & AS_AREA_DEVICE) {
-
 
456
        /*
-
 
457
         * Remapping of address space areas associated
-
 
458
         * with memory mapped devices is not supported.
-
 
459
         */
-
 
460
        spinlock_unlock(&area->lock);
-
 
461
        spinlock_unlock(&as->lock);
-
 
462
        interrupts_restore(ipl);
-
 
463
        return (__address) -1;
-
 
464
    }
-
 
465
 
-
 
466
    pages = SIZE2FRAMES((address - area->base) + size);
-
 
467
    if (!pages) {
-
 
468
        /*
-
 
469
         * Zero size address space areas are not allowed.
-
 
470
         */
-
 
471
        spinlock_unlock(&area->lock);
-
 
472
        spinlock_unlock(&as->lock);
-
 
473
        interrupts_restore(ipl);
-
 
474
        return (__address) -1;
-
 
475
    }
-
 
476
   
-
 
477
    if (pages < area->pages) {
-
 
478
        int i;
-
 
479
 
-
 
480
        /*
-
 
481
         * Shrinking the area.
-
 
482
         * No need to check for overlaps.
-
 
483
         */
-
 
484
        for (i = pages; i < area->pages; i++) {
-
 
485
            pte_t *pte;
-
 
486
           
-
 
487
            /*
-
 
488
             * Releasing physical memory.
-
 
489
             * This depends on the fact that the memory was allocated using frame_alloc().
-
 
490
             */
-
 
491
            page_table_lock(as, false);
-
 
492
            pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
-
 
493
            if (pte && PTE_VALID(pte)) {
-
 
494
                __address frame;
-
 
495
 
-
 
496
                ASSERT(PTE_PRESENT(pte));
-
 
497
                frame = PTE_GET_FRAME(pte);
-
 
498
                page_mapping_remove(as, area->base + i*PAGE_SIZE);
-
 
499
                page_table_unlock(as, false);
-
 
500
 
-
 
501
                frame_free(ADDR2PFN(frame));
-
 
502
            } else {
-
 
503
                page_table_unlock(as, false);
-
 
504
            }
-
 
505
        }
-
 
506
        /*
-
 
507
         * Invalidate TLB's.
-
 
508
         */
-
 
509
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
510
        tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
-
 
511
        tlb_shootdown_finalize();
-
 
512
    } else {
-
 
513
        /*
-
 
514
         * Growing the area.
-
 
515
         * Check for overlaps with other address space areas.
-
 
516
         */
-
 
517
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
-
 
518
            spinlock_unlock(&area->lock);
-
 
519
            spinlock_unlock(&as->lock);    
-
 
520
            interrupts_restore(ipl);
-
 
521
            return (__address) -1;
-
 
522
        }
-
 
523
    }
-
 
524
 
-
 
525
    area->pages = pages;
-
 
526
   
-
 
527
    spinlock_unlock(&area->lock);
-
 
528
    spinlock_unlock(&as->lock);
-
 
529
    interrupts_restore(ipl);
-
 
530
 
-
 
531
    return address;
-
 
532
}
-
 
533
 
681
 
534
/** Find address space area and lock it.
682
/** Find address space area and lock it.
535
 *
683
 *
536
 * The address space must be locked and interrupts must be disabled.
684
 * The address space must be locked and interrupts must be disabled.
537
 *
685
 *
538
 * @param as Address space.
686
 * @param as Address space.
539
 * @param va Virtual address.
687
 * @param va Virtual address.
540
 *
688
 *
541
 * @return Locked address space area containing va on success or NULL on failure.
689
 * @return Locked address space area containing va on success or NULL on failure.
542
 */
690
 */
543
as_area_t *find_area_and_lock(as_t *as, __address va)
691
as_area_t *find_area_and_lock(as_t *as, __address va)
544
{
692
{
545
    as_area_t *a;
693
    as_area_t *a;
546
    btree_node_t *leaf, *lnode;
694
    btree_node_t *leaf, *lnode;
547
    int i;
695
    int i;
548
   
696
   
549
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
697
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
550
    if (a) {
698
    if (a) {
551
        /* va is the base address of an address space area */
699
        /* va is the base address of an address space area */
552
        spinlock_lock(&a->lock);
700
        spinlock_lock(&a->lock);
553
        return a;
701
        return a;
554
    }
702
    }
555
   
703
   
556
    /*
704
    /*
557
     * Search the leaf node and the righmost record of its left neighbour
705
     * Search the leaf node and the righmost record of its left neighbour
558
     * to find out whether this is a miss or va belongs to an address
706
     * to find out whether this is a miss or va belongs to an address
559
     * space area found there.
707
     * space area found there.
560
     */
708
     */
561
   
709
   
562
    /* First, search the leaf node itself. */
710
    /* First, search the leaf node itself. */
563
    for (i = 0; i < leaf->keys; i++) {
711
    for (i = 0; i < leaf->keys; i++) {
564
        a = (as_area_t *) leaf->value[i];
712
        a = (as_area_t *) leaf->value[i];
565
        spinlock_lock(&a->lock);
713
        spinlock_lock(&a->lock);
566
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
714
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
567
            return a;
715
            return a;
568
        }
716
        }
569
        spinlock_unlock(&a->lock);
717
        spinlock_unlock(&a->lock);
570
    }
718
    }
571
 
719
 
572
    /*
720
    /*
573
     * Second, locate the left neighbour and test its last record.
721
     * Second, locate the left neighbour and test its last record.
574
     * Because of its position in the B+tree, it must have base < va.
722
     * Because of its position in the B+tree, it must have base < va.
575
     */
723
     */
576
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
724
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
577
        a = (as_area_t *) lnode->value[lnode->keys - 1];
725
        a = (as_area_t *) lnode->value[lnode->keys - 1];
578
        spinlock_lock(&a->lock);
726
        spinlock_lock(&a->lock);
579
        if (va < a->base + a->pages * PAGE_SIZE) {
727
        if (va < a->base + a->pages * PAGE_SIZE) {
580
            return a;
728
            return a;
581
        }
729
        }
582
        spinlock_unlock(&a->lock);
730
        spinlock_unlock(&a->lock);
583
    }
731
    }
584
 
732
 
585
    return NULL;
733
    return NULL;
586
}
734
}
587
 
735
 
588
/** Check area conflicts with other areas.
736
/** Check area conflicts with other areas.
589
 *
737
 *
590
 * The address space must be locked and interrupts must be disabled.
738
 * The address space must be locked and interrupts must be disabled.
591
 *
739
 *
592
 * @param as Address space.
740
 * @param as Address space.
593
 * @param va Starting virtual address of the area being tested.
741
 * @param va Starting virtual address of the area being tested.
594
 * @param size Size of the area being tested.
742
 * @param size Size of the area being tested.
595
 * @param avoid_area Do not touch this area.
743
 * @param avoid_area Do not touch this area.
596
 *
744
 *
597
 * @return True if there is no conflict, false otherwise.
745
 * @return True if there is no conflict, false otherwise.
598
 */
746
 */
599
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
747
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
600
{
748
{
601
    as_area_t *a;
749
    as_area_t *a;
602
    btree_node_t *leaf, *node;
750
    btree_node_t *leaf, *node;
603
    int i;
751
    int i;
604
   
752
   
605
    /*
753
    /*
606
     * We don't want any area to have conflicts with NULL page.
754
     * We don't want any area to have conflicts with NULL page.
607
     */
755
     */
608
    if (overlaps(va, size, NULL, PAGE_SIZE))
756
    if (overlaps(va, size, NULL, PAGE_SIZE))
609
        return false;
757
        return false;
610
   
758
   
611
    /*
759
    /*
612
     * The leaf node is found in O(log n), where n is proportional to
760
     * The leaf node is found in O(log n), where n is proportional to
613
     * the number of address space areas belonging to as.
761
     * the number of address space areas belonging to as.
614
     * The check for conflicts is then attempted on the rightmost
762
     * The check for conflicts is then attempted on the rightmost
615
     * record in the left neighbour, the leftmost record in the right
763
     * record in the left neighbour, the leftmost record in the right
616
     * neighbour and all records in the leaf node itself.
764
     * neighbour and all records in the leaf node itself.
617
     */
765
     */
618
   
766
   
619
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
767
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
620
        if (a != avoid_area)
768
        if (a != avoid_area)
621
            return false;
769
            return false;
622
    }
770
    }
623
   
771
   
624
    /* First, check the two border cases. */
772
    /* First, check the two border cases. */
625
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
773
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
626
        a = (as_area_t *) node->value[node->keys - 1];
774
        a = (as_area_t *) node->value[node->keys - 1];
627
        spinlock_lock(&a->lock);
775
        spinlock_lock(&a->lock);
628
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
776
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
629
            spinlock_unlock(&a->lock);
777
            spinlock_unlock(&a->lock);
630
            return false;
778
            return false;
631
        }
779
        }
632
        spinlock_unlock(&a->lock);
780
        spinlock_unlock(&a->lock);
633
    }
781
    }
634
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
782
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
635
        a = (as_area_t *) node->value[0];
783
        a = (as_area_t *) node->value[0];
636
        spinlock_lock(&a->lock);
784
        spinlock_lock(&a->lock);
637
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
785
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
638
            spinlock_unlock(&a->lock);
786
            spinlock_unlock(&a->lock);
639
            return false;
787
            return false;
640
        }
788
        }
641
        spinlock_unlock(&a->lock);
789
        spinlock_unlock(&a->lock);
642
    }
790
    }
643
   
791
   
644
    /* Second, check the leaf node. */
792
    /* Second, check the leaf node. */
645
    for (i = 0; i < leaf->keys; i++) {
793
    for (i = 0; i < leaf->keys; i++) {
646
        a = (as_area_t *) leaf->value[i];
794
        a = (as_area_t *) leaf->value[i];
647
   
795
   
648
        if (a == avoid_area)
796
        if (a == avoid_area)
649
            continue;
797
            continue;
650
   
798
   
651
        spinlock_lock(&a->lock);
799
        spinlock_lock(&a->lock);
652
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
800
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
653
            spinlock_unlock(&a->lock);
801
            spinlock_unlock(&a->lock);
654
            return false;
802
            return false;
655
        }
803
        }
656
        spinlock_unlock(&a->lock);
804
        spinlock_unlock(&a->lock);
657
    }
805
    }
658
 
806
 
659
    /*
807
    /*
660
     * So far, the area does not conflict with other areas.
808
     * So far, the area does not conflict with other areas.
661
     * Check if it doesn't conflict with kernel address space.
809
     * Check if it doesn't conflict with kernel address space.
662
     */  
810
     */  
663
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
811
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
664
        return !overlaps(va, size,
812
        return !overlaps(va, size,
665
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
813
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
666
    }
814
    }
667
 
815
 
668
    return true;
816
    return true;
669
}
817
}
-
 
818
 
-
 
819
/*
-
 
820
 * Address space related syscalls.
-
 
821
 */
-
 
822
 
-
 
823
/** Wrapper for as_area_create(). */
-
 
824
__native sys_as_area_create(__address address, size_t size, int flags)
-
 
825
{
-
 
826
    if (as_area_create(AS, flags, size, address))
-
 
827
        return (__native) address;
-
 
828
    else
-
 
829
        return (__native) -1;
-
 
830
}
-
 
831
 
-
 
832
/** Wrapper for as_area_resize. */
-
 
833
__native sys_as_area_resize(__address address, size_t size, int flags)
-
 
834
{
-
 
835
    return as_area_resize(AS, address, size, 0);
-
 
836
}
-
 
837
 
-
 
838
/** Prepare task for accepting address space area from another task.
-
 
839
 *
-
 
840
 * @param uspace_accept_arg Accept structure passed from userspace.
-
 
841
 *
-
 
842
 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
-
 
843
 *     TASK. Otherwise zero is returned.
-
 
844
 */
-
 
845
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
-
 
846
{
-
 
847
    as_area_acptsnd_arg_t arg;
-
 
848
   
-
 
849
    copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
-
 
850
   
-
 
851
    if (!arg.size)
-
 
852
        return (__native) EPERM;
-
 
853
   
-
 
854
    if (arg.task_id == TASK->taskid) {
-
 
855
        /*
-
 
856
         * Accepting from itself is not allowed.
-
 
857
         */
-
 
858
        return (__native) EPERM;
-
 
859
    }
-
 
860
   
-
 
861
    memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
-
 
862
   
-
 
863
        return 0;
-
 
864
}
-
 
865
 
-
 
866
/** Wrapper for as_area_send. */
-
 
867
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
-
 
868
{
-
 
869
    as_area_acptsnd_arg_t arg;
-
 
870
   
-
 
871
    copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
-
 
872
 
-
 
873
    if (!arg.size)
-
 
874
        return (__native) EPERM;
-
 
875
   
-
 
876
    if (arg.task_id == TASK->taskid) {
-
 
877
        /*
-
 
878
         * Sending to itself is not allowed.
-
 
879
         */
-
 
880
        return (__native) EPERM;
-
 
881
    }
-
 
882
 
-
 
883
    return (__native) as_area_send(arg.task_id, (__address) arg.base, arg.size, arg.flags);
-
 
884
}
670
 
885