Subversion Repositories HelenOS

Rev

Rev 2009 | Rev 2069 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2009 Rev 2015
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericmm
29
/** @addtogroup genericmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Address space related functions.
35
 * @brief   Address space related functions.
36
 *
36
 *
37
 * This file contains address space manipulation functions.
37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
39
 * Virtual Address Translation (VAT) subsystem.
40
 *
40
 *
41
 * Functionality provided by this file allows one to
41
 * Functionality provided by this file allows one to
42
 * create address spaces and create, resize and share
42
 * create address spaces and create, resize and share
43
 * address space areas.
43
 * address space areas.
44
 *
44
 *
45
 * @see page.c
45
 * @see page.c
46
 *
46
 *
47
 */
47
 */
48
 
48
 
49
#include <mm/as.h>
49
#include <mm/as.h>
50
#include <arch/mm/as.h>
50
#include <arch/mm/as.h>
51
#include <mm/page.h>
51
#include <mm/page.h>
52
#include <mm/frame.h>
52
#include <mm/frame.h>
53
#include <mm/slab.h>
53
#include <mm/slab.h>
54
#include <mm/tlb.h>
54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
56
#include <genarch/mm/page_pt.h>
57
#include <genarch/mm/page_ht.h>
57
#include <genarch/mm/page_ht.h>
58
#include <mm/asid.h>
58
#include <mm/asid.h>
59
#include <arch/mm/asid.h>
59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
60
#include <synch/spinlock.h>
61
#include <synch/mutex.h>
61
#include <synch/mutex.h>
62
#include <adt/list.h>
62
#include <adt/list.h>
63
#include <adt/btree.h>
63
#include <adt/btree.h>
64
#include <proc/task.h>
64
#include <proc/task.h>
65
#include <proc/thread.h>
65
#include <proc/thread.h>
66
#include <arch/asm.h>
66
#include <arch/asm.h>
67
#include <panic.h>
67
#include <panic.h>
68
#include <debug.h>
68
#include <debug.h>
69
#include <print.h>
69
#include <print.h>
70
#include <memstr.h>
70
#include <memstr.h>
71
#include <macros.h>
71
#include <macros.h>
72
#include <arch.h>
72
#include <arch.h>
73
#include <errno.h>
73
#include <errno.h>
74
#include <config.h>
74
#include <config.h>
75
#include <align.h>
75
#include <align.h>
76
#include <arch/types.h>
76
#include <arch/types.h>
77
#include <typedefs.h>
77
#include <typedefs.h>
78
#include <syscall/copy.h>
78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
79
#include <arch/interrupt.h>
80
 
80
 
81
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#ifdef CONFIG_VIRT_IDX_DCACHE
82
#include <arch/mm/cache.h>
82
#include <arch/mm/cache.h>
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
84
 
84
 
85
/**
85
/**
86
 * Each architecture decides what functions will be used to carry out
86
 * Each architecture decides what functions will be used to carry out
87
 * address space operations such as creating or locking page tables.
87
 * address space operations such as creating or locking page tables.
88
 */
88
 */
89
as_operations_t *as_operations = NULL;
89
as_operations_t *as_operations = NULL;
90
 
90
 
91
/**
91
/**
92
 * Slab for as_t objects.
92
 * Slab for as_t objects.
93
 */
93
 */
94
static slab_cache_t *as_slab;
94
static slab_cache_t *as_slab;
95
 
95
 
96
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
96
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
97
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
97
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
98
 
98
 
99
/**
99
/**
100
 * This list contains address spaces that are not active on any
100
 * This list contains address spaces that are not active on any
101
 * processor and that have valid ASID.
101
 * processor and that have valid ASID.
102
 */
102
 */
103
LIST_INITIALIZE(inactive_as_with_asid_head);
103
LIST_INITIALIZE(inactive_as_with_asid_head);
104
 
104
 
105
/** Kernel address space. */
105
/** Kernel address space. */
106
as_t *AS_KERNEL = NULL;
106
as_t *AS_KERNEL = NULL;
107
 
107
 
108
static int area_flags_to_page_flags(int aflags);
108
static int area_flags_to_page_flags(int aflags);
109
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
109
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
110
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
110
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
111
static void sh_info_remove_reference(share_info_t *sh_info);
111
static void sh_info_remove_reference(share_info_t *sh_info);
112
 
112
 
113
static int as_constructor(void *obj, int flags)
113
static int as_constructor(void *obj, int flags)
114
{
114
{
115
    as_t *as = (as_t *) obj;
115
    as_t *as = (as_t *) obj;
116
    int rc;
116
    int rc;
117
 
117
 
118
    link_initialize(&as->inactive_as_with_asid_link);
118
    link_initialize(&as->inactive_as_with_asid_link);
119
    mutex_initialize(&as->lock);   
119
    mutex_initialize(&as->lock);   
120
   
120
   
121
    rc = as_constructor_arch(as, flags);
121
    rc = as_constructor_arch(as, flags);
122
   
122
   
123
    return rc;
123
    return rc;
124
}
124
}
125
 
125
 
126
static int as_destructor(void *obj)
126
static int as_destructor(void *obj)
127
{
127
{
128
    as_t *as = (as_t *) obj;
128
    as_t *as = (as_t *) obj;
129
 
129
 
130
    return as_destructor_arch(as);
130
    return as_destructor_arch(as);
131
}
131
}
132
 
132
 
133
/** Initialize address space subsystem. */
133
/** Initialize address space subsystem. */
134
void as_init(void)
134
void as_init(void)
135
{
135
{
136
    as_arch_init();
136
    as_arch_init();
137
   
137
   
138
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
138
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
139
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
139
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
140
   
140
   
141
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
141
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
142
    if (!AS_KERNEL)
142
    if (!AS_KERNEL)
143
        panic("can't create kernel address space\n");
143
        panic("can't create kernel address space\n");
144
   
144
   
145
}
145
}
146
 
146
 
147
/** Create address space.
147
/** Create address space.
148
 *
148
 *
149
 * @param flags Flags that influence way in wich the address space is created.
149
 * @param flags Flags that influence way in wich the address space is created.
150
 */
150
 */
151
as_t *as_create(int flags)
151
as_t *as_create(int flags)
152
{
152
{
153
    as_t *as;
153
    as_t *as;
154
 
154
 
155
    as = (as_t *) slab_alloc(as_slab, 0);
155
    as = (as_t *) slab_alloc(as_slab, 0);
156
    (void) as_create_arch(as, 0);
156
    (void) as_create_arch(as, 0);
157
   
157
   
158
    btree_create(&as->as_area_btree);
158
    btree_create(&as->as_area_btree);
159
   
159
   
160
    if (flags & FLAG_AS_KERNEL)
160
    if (flags & FLAG_AS_KERNEL)
161
        as->asid = ASID_KERNEL;
161
        as->asid = ASID_KERNEL;
162
    else
162
    else
163
        as->asid = ASID_INVALID;
163
        as->asid = ASID_INVALID;
164
   
164
   
165
    as->refcount = 0;
165
    as->refcount = 0;
166
    as->cpu_refcount = 0;
166
    as->cpu_refcount = 0;
167
    as->page_table = page_table_create(flags);
167
    as->page_table = page_table_create(flags);
168
 
168
 
169
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
170
    as->dcache_flush_on_install = false;
-
 
171
    as->dcache_flush_on_deinstall = false;
-
 
172
#endif  /* CONFIG_VIRT_IDX_DCACHE */
-
 
173
 
-
 
174
    return as;
169
    return as;
175
}
170
}
176
 
171
 
177
/** Destroy adress space.
172
/** Destroy adress space.
178
 *
173
 *
179
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
174
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
180
 * the address space can be destroyed.
175
 * the address space can be destroyed.
181
 */
176
 */
182
void as_destroy(as_t *as)
177
void as_destroy(as_t *as)
183
{
178
{
184
    ipl_t ipl;
179
    ipl_t ipl;
185
    bool cond;
180
    bool cond;
186
 
181
 
187
    ASSERT(as->refcount == 0);
182
    ASSERT(as->refcount == 0);
188
   
183
   
189
    /*
184
    /*
190
     * Since there is no reference to this area,
185
     * Since there is no reference to this area,
191
     * it is safe not to lock its mutex.
186
     * it is safe not to lock its mutex.
192
     */
187
     */
193
    ipl = interrupts_disable();
188
    ipl = interrupts_disable();
194
    spinlock_lock(&inactive_as_with_asid_lock);
189
    spinlock_lock(&inactive_as_with_asid_lock);
195
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
190
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
196
        if (as != AS && as->cpu_refcount == 0)
191
        if (as != AS && as->cpu_refcount == 0)
197
            list_remove(&as->inactive_as_with_asid_link);
192
            list_remove(&as->inactive_as_with_asid_link);
198
        asid_put(as->asid);
193
        asid_put(as->asid);
199
    }
194
    }
200
    spinlock_unlock(&inactive_as_with_asid_lock);
195
    spinlock_unlock(&inactive_as_with_asid_lock);
201
 
196
 
202
    /*
197
    /*
203
     * Destroy address space areas of the address space.
198
     * Destroy address space areas of the address space.
204
     * The B+tree must be walked carefully because it is
199
     * The B+tree must be walked carefully because it is
205
     * also being destroyed.
200
     * also being destroyed.
206
     */
201
     */
207
    for (cond = true; cond; ) {
202
    for (cond = true; cond; ) {
208
        btree_node_t *node;
203
        btree_node_t *node;
209
 
204
 
210
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
205
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
211
        node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
206
        node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
212
 
207
 
213
        if ((cond = node->keys)) {
208
        if ((cond = node->keys)) {
214
            as_area_destroy(as, node->key[0]);
209
            as_area_destroy(as, node->key[0]);
215
        }
210
        }
216
    }
211
    }
217
 
212
 
218
    btree_destroy(&as->as_area_btree);
213
    btree_destroy(&as->as_area_btree);
219
    page_table_destroy(as->page_table);
214
    page_table_destroy(as->page_table);
220
 
215
 
221
    interrupts_restore(ipl);
216
    interrupts_restore(ipl);
222
   
217
   
223
    slab_free(as_slab, as);
218
    slab_free(as_slab, as);
224
}
219
}
225
 
220
 
226
/** Create address space area of common attributes.
221
/** Create address space area of common attributes.
227
 *
222
 *
228
 * The created address space area is added to the target address space.
223
 * The created address space area is added to the target address space.
229
 *
224
 *
230
 * @param as Target address space.
225
 * @param as Target address space.
231
 * @param flags Flags of the area memory.
226
 * @param flags Flags of the area memory.
232
 * @param size Size of area.
227
 * @param size Size of area.
233
 * @param base Base address of area.
228
 * @param base Base address of area.
234
 * @param attrs Attributes of the area.
229
 * @param attrs Attributes of the area.
235
 * @param backend Address space area backend. NULL if no backend is used.
230
 * @param backend Address space area backend. NULL if no backend is used.
236
 * @param backend_data NULL or a pointer to an array holding two void *.
231
 * @param backend_data NULL or a pointer to an array holding two void *.
237
 *
232
 *
238
 * @return Address space area on success or NULL on failure.
233
 * @return Address space area on success or NULL on failure.
239
 */
234
 */
240
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
235
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
241
           mem_backend_t *backend, mem_backend_data_t *backend_data)
236
           mem_backend_t *backend, mem_backend_data_t *backend_data)
242
{
237
{
243
    ipl_t ipl;
238
    ipl_t ipl;
244
    as_area_t *a;
239
    as_area_t *a;
245
   
240
   
246
    if (base % PAGE_SIZE)
241
    if (base % PAGE_SIZE)
247
        return NULL;
242
        return NULL;
248
 
243
 
249
    if (!size)
244
    if (!size)
250
        return NULL;
245
        return NULL;
251
 
246
 
252
    /* Writeable executable areas are not supported. */
247
    /* Writeable executable areas are not supported. */
253
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
248
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
254
        return NULL;
249
        return NULL;
255
   
250
   
256
    ipl = interrupts_disable();
251
    ipl = interrupts_disable();
257
    mutex_lock(&as->lock);
252
    mutex_lock(&as->lock);
258
   
253
   
259
    if (!check_area_conflicts(as, base, size, NULL)) {
254
    if (!check_area_conflicts(as, base, size, NULL)) {
260
        mutex_unlock(&as->lock);
255
        mutex_unlock(&as->lock);
261
        interrupts_restore(ipl);
256
        interrupts_restore(ipl);
262
        return NULL;
257
        return NULL;
263
    }
258
    }
264
   
259
   
265
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
260
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
266
 
261
 
267
    mutex_initialize(&a->lock);
262
    mutex_initialize(&a->lock);
268
   
263
   
269
    a->as = as;
264
    a->as = as;
270
    a->flags = flags;
265
    a->flags = flags;
271
    a->attributes = attrs;
266
    a->attributes = attrs;
272
    a->pages = SIZE2FRAMES(size);
267
    a->pages = SIZE2FRAMES(size);
273
    a->base = base;
268
    a->base = base;
274
    a->sh_info = NULL;
269
    a->sh_info = NULL;
275
    a->backend = backend;
270
    a->backend = backend;
276
    if (backend_data)
271
    if (backend_data)
277
        a->backend_data = *backend_data;
272
        a->backend_data = *backend_data;
278
    else
273
    else
279
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
274
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
280
 
275
 
281
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
282
    /*
-
 
283
     * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
-
 
284
     * orig_color is probably wrong until the flag is reset. In other words, it is
-
 
285
     * initialized with the color of the area being created and not with the color
-
 
286
     * of the original address space area at the beginning of the share chain. Of
-
 
287
     * course, the correct color is set by as_area_share() before the flag is
-
 
288
     * reset.
-
 
289
     */
-
 
290
    a->orig_color = PAGE_COLOR(base);
-
 
291
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
292
 
-
 
293
    btree_create(&a->used_space);
276
    btree_create(&a->used_space);
294
   
277
   
295
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
278
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
296
 
279
 
297
    mutex_unlock(&as->lock);
280
    mutex_unlock(&as->lock);
298
    interrupts_restore(ipl);
281
    interrupts_restore(ipl);
299
 
282
 
300
    return a;
283
    return a;
301
}
284
}
302
 
285
 
303
/** Find address space area and change it.
286
/** Find address space area and change it.
304
 *
287
 *
305
 * @param as Address space.
288
 * @param as Address space.
306
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
289
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
307
 * @param size New size of the virtual memory block starting at address.
290
 * @param size New size of the virtual memory block starting at address.
308
 * @param flags Flags influencing the remap operation. Currently unused.
291
 * @param flags Flags influencing the remap operation. Currently unused.
309
 *
292
 *
310
 * @return Zero on success or a value from @ref errno.h otherwise.
293
 * @return Zero on success or a value from @ref errno.h otherwise.
311
 */
294
 */
312
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
295
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
313
{
296
{
314
    as_area_t *area;
297
    as_area_t *area;
315
    ipl_t ipl;
298
    ipl_t ipl;
316
    size_t pages;
299
    size_t pages;
317
   
300
   
318
    ipl = interrupts_disable();
301
    ipl = interrupts_disable();
319
    mutex_lock(&as->lock);
302
    mutex_lock(&as->lock);
320
   
303
   
321
    /*
304
    /*
322
     * Locate the area.
305
     * Locate the area.
323
     */
306
     */
324
    area = find_area_and_lock(as, address);
307
    area = find_area_and_lock(as, address);
325
    if (!area) {
308
    if (!area) {
326
        mutex_unlock(&as->lock);
309
        mutex_unlock(&as->lock);
327
        interrupts_restore(ipl);
310
        interrupts_restore(ipl);
328
        return ENOENT;
311
        return ENOENT;
329
    }
312
    }
330
 
313
 
331
    if (area->backend == &phys_backend) {
314
    if (area->backend == &phys_backend) {
332
        /*
315
        /*
333
         * Remapping of address space areas associated
316
         * Remapping of address space areas associated
334
         * with memory mapped devices is not supported.
317
         * with memory mapped devices is not supported.
335
         */
318
         */
336
        mutex_unlock(&area->lock);
319
        mutex_unlock(&area->lock);
337
        mutex_unlock(&as->lock);
320
        mutex_unlock(&as->lock);
338
        interrupts_restore(ipl);
321
        interrupts_restore(ipl);
339
        return ENOTSUP;
322
        return ENOTSUP;
340
    }
323
    }
341
    if (area->sh_info) {
324
    if (area->sh_info) {
342
        /*
325
        /*
343
         * Remapping of shared address space areas
326
         * Remapping of shared address space areas
344
         * is not supported.
327
         * is not supported.
345
         */
328
         */
346
        mutex_unlock(&area->lock);
329
        mutex_unlock(&area->lock);
347
        mutex_unlock(&as->lock);
330
        mutex_unlock(&as->lock);
348
        interrupts_restore(ipl);
331
        interrupts_restore(ipl);
349
        return ENOTSUP;
332
        return ENOTSUP;
350
    }
333
    }
351
 
334
 
352
    pages = SIZE2FRAMES((address - area->base) + size);
335
    pages = SIZE2FRAMES((address - area->base) + size);
353
    if (!pages) {
336
    if (!pages) {
354
        /*
337
        /*
355
         * Zero size address space areas are not allowed.
338
         * Zero size address space areas are not allowed.
356
         */
339
         */
357
        mutex_unlock(&area->lock);
340
        mutex_unlock(&area->lock);
358
        mutex_unlock(&as->lock);
341
        mutex_unlock(&as->lock);
359
        interrupts_restore(ipl);
342
        interrupts_restore(ipl);
360
        return EPERM;
343
        return EPERM;
361
    }
344
    }
362
   
345
   
363
    if (pages < area->pages) {
346
    if (pages < area->pages) {
364
        bool cond;
347
        bool cond;
365
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
348
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
366
 
349
 
367
        /*
350
        /*
368
         * Shrinking the area.
351
         * Shrinking the area.
369
         * No need to check for overlaps.
352
         * No need to check for overlaps.
370
         */
353
         */
371
 
354
 
372
        /*
355
        /*
373
         * Start TLB shootdown sequence.
356
         * Start TLB shootdown sequence.
374
         */
357
         */
375
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
358
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
376
 
359
 
377
        /*
360
        /*
378
         * Remove frames belonging to used space starting from
361
         * Remove frames belonging to used space starting from
379
         * the highest addresses downwards until an overlap with
362
         * the highest addresses downwards until an overlap with
380
         * the resized address space area is found. Note that this
363
         * the resized address space area is found. Note that this
381
         * is also the right way to remove part of the used_space
364
         * is also the right way to remove part of the used_space
382
         * B+tree leaf list.
365
         * B+tree leaf list.
383
         */    
366
         */    
384
        for (cond = true; cond;) {
367
        for (cond = true; cond;) {
385
            btree_node_t *node;
368
            btree_node_t *node;
386
       
369
       
387
            ASSERT(!list_empty(&area->used_space.leaf_head));
370
            ASSERT(!list_empty(&area->used_space.leaf_head));
388
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
371
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
389
            if ((cond = (bool) node->keys)) {
372
            if ((cond = (bool) node->keys)) {
390
                uintptr_t b = node->key[node->keys - 1];
373
                uintptr_t b = node->key[node->keys - 1];
391
                count_t c = (count_t) node->value[node->keys - 1];
374
                count_t c = (count_t) node->value[node->keys - 1];
392
                int i = 0;
375
                int i = 0;
393
           
376
           
394
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
377
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
395
                   
378
                   
396
                    if (b + c*PAGE_SIZE <= start_free) {
379
                    if (b + c*PAGE_SIZE <= start_free) {
397
                        /*
380
                        /*
398
                         * The whole interval fits completely
381
                         * The whole interval fits completely
399
                         * in the resized address space area.
382
                         * in the resized address space area.
400
                         */
383
                         */
401
                        break;
384
                        break;
402
                    }
385
                    }
403
       
386
       
404
                    /*
387
                    /*
405
                     * Part of the interval corresponding to b and c
388
                     * Part of the interval corresponding to b and c
406
                     * overlaps with the resized address space area.
389
                     * overlaps with the resized address space area.
407
                     */
390
                     */
408
       
391
       
409
                    cond = false;   /* we are almost done */
392
                    cond = false;   /* we are almost done */
410
                    i = (start_free - b) >> PAGE_WIDTH;
393
                    i = (start_free - b) >> PAGE_WIDTH;
411
                    if (!used_space_remove(area, start_free, c - i))
394
                    if (!used_space_remove(area, start_free, c - i))
412
                        panic("Could not remove used space.\n");
395
                        panic("Could not remove used space.\n");
413
                } else {
396
                } else {
414
                    /*
397
                    /*
415
                     * The interval of used space can be completely removed.
398
                     * The interval of used space can be completely removed.
416
                     */
399
                     */
417
                    if (!used_space_remove(area, b, c))
400
                    if (!used_space_remove(area, b, c))
418
                        panic("Could not remove used space.\n");
401
                        panic("Could not remove used space.\n");
419
                }
402
                }
420
           
403
           
421
                for (; i < c; i++) {
404
                for (; i < c; i++) {
422
                    pte_t *pte;
405
                    pte_t *pte;
423
           
406
           
424
                    page_table_lock(as, false);
407
                    page_table_lock(as, false);
425
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
408
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
426
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
409
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
427
                    if (area->backend && area->backend->frame_free) {
410
                    if (area->backend && area->backend->frame_free) {
428
                        area->backend->frame_free(area,
411
                        area->backend->frame_free(area,
429
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
412
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
430
                    }
413
                    }
431
                    page_mapping_remove(as, b + i*PAGE_SIZE);
414
                    page_mapping_remove(as, b + i*PAGE_SIZE);
432
                    page_table_unlock(as, false);
415
                    page_table_unlock(as, false);
433
                }
416
                }
434
            }
417
            }
435
        }
418
        }
436
 
419
 
437
        /*
420
        /*
438
         * Finish TLB shootdown sequence.
421
         * Finish TLB shootdown sequence.
439
         */
422
         */
440
        tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
423
        tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
441
        tlb_shootdown_finalize();
424
        tlb_shootdown_finalize();
442
       
425
       
443
        /*
426
        /*
444
         * Invalidate software translation caches (e.g. TSB on sparc64).
427
         * Invalidate software translation caches (e.g. TSB on sparc64).
445
         */
428
         */
446
        as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
429
        as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
447
    } else {
430
    } else {
448
        /*
431
        /*
449
         * Growing the area.
432
         * Growing the area.
450
         * Check for overlaps with other address space areas.
433
         * Check for overlaps with other address space areas.
451
         */
434
         */
452
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
435
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
453
            mutex_unlock(&area->lock);
436
            mutex_unlock(&area->lock);
454
            mutex_unlock(&as->lock);       
437
            mutex_unlock(&as->lock);       
455
            interrupts_restore(ipl);
438
            interrupts_restore(ipl);
456
            return EADDRNOTAVAIL;
439
            return EADDRNOTAVAIL;
457
        }
440
        }
458
    }
441
    }
459
 
442
 
460
    area->pages = pages;
443
    area->pages = pages;
461
   
444
   
462
    mutex_unlock(&area->lock);
445
    mutex_unlock(&area->lock);
463
    mutex_unlock(&as->lock);
446
    mutex_unlock(&as->lock);
464
    interrupts_restore(ipl);
447
    interrupts_restore(ipl);
465
 
448
 
466
    return 0;
449
    return 0;
467
}
450
}
468
 
451
 
469
/** Destroy address space area.
452
/** Destroy address space area.
470
 *
453
 *
471
 * @param as Address space.
454
 * @param as Address space.
472
 * @param address Address withing the area to be deleted.
455
 * @param address Address withing the area to be deleted.
473
 *
456
 *
474
 * @return Zero on success or a value from @ref errno.h on failure.
457
 * @return Zero on success or a value from @ref errno.h on failure.
475
 */
458
 */
476
int as_area_destroy(as_t *as, uintptr_t address)
459
int as_area_destroy(as_t *as, uintptr_t address)
477
{
460
{
478
    as_area_t *area;
461
    as_area_t *area;
479
    uintptr_t base;
462
    uintptr_t base;
480
    link_t *cur;
463
    link_t *cur;
481
    ipl_t ipl;
464
    ipl_t ipl;
482
 
465
 
483
    ipl = interrupts_disable();
466
    ipl = interrupts_disable();
484
    mutex_lock(&as->lock);
467
    mutex_lock(&as->lock);
485
 
468
 
486
    area = find_area_and_lock(as, address);
469
    area = find_area_and_lock(as, address);
487
    if (!area) {
470
    if (!area) {
488
        mutex_unlock(&as->lock);
471
        mutex_unlock(&as->lock);
489
        interrupts_restore(ipl);
472
        interrupts_restore(ipl);
490
        return ENOENT;
473
        return ENOENT;
491
    }
474
    }
492
 
475
 
493
    base = area->base;
476
    base = area->base;
494
 
477
 
495
    /*
478
    /*
496
     * Start TLB shootdown sequence.
479
     * Start TLB shootdown sequence.
497
     */
480
     */
498
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
481
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
499
 
482
 
500
    /*
483
    /*
501
     * Visit only the pages mapped by used_space B+tree.
484
     * Visit only the pages mapped by used_space B+tree.
502
     */
485
     */
503
    for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
486
    for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
504
        btree_node_t *node;
487
        btree_node_t *node;
505
        int i;
488
        int i;
506
       
489
       
507
        node = list_get_instance(cur, btree_node_t, leaf_link);
490
        node = list_get_instance(cur, btree_node_t, leaf_link);
508
        for (i = 0; i < node->keys; i++) {
491
        for (i = 0; i < node->keys; i++) {
509
            uintptr_t b = node->key[i];
492
            uintptr_t b = node->key[i];
510
            count_t j;
493
            count_t j;
511
            pte_t *pte;
494
            pte_t *pte;
512
           
495
           
513
            for (j = 0; j < (count_t) node->value[i]; j++) {
496
            for (j = 0; j < (count_t) node->value[i]; j++) {
514
                page_table_lock(as, false);
497
                page_table_lock(as, false);
515
                pte = page_mapping_find(as, b + j*PAGE_SIZE);
498
                pte = page_mapping_find(as, b + j*PAGE_SIZE);
516
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
499
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
517
                if (area->backend && area->backend->frame_free) {
500
                if (area->backend && area->backend->frame_free) {
518
                    area->backend->frame_free(area,
501
                    area->backend->frame_free(area,
519
                        b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
502
                        b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
520
                }
503
                }
521
                page_mapping_remove(as, b + j*PAGE_SIZE);              
504
                page_mapping_remove(as, b + j*PAGE_SIZE);              
522
                page_table_unlock(as, false);
505
                page_table_unlock(as, false);
523
            }
506
            }
524
        }
507
        }
525
    }
508
    }
526
 
509
 
527
    /*
510
    /*
528
     * Finish TLB shootdown sequence.
511
     * Finish TLB shootdown sequence.
529
     */
512
     */
530
    tlb_invalidate_pages(as->asid, area->base, area->pages);
513
    tlb_invalidate_pages(as->asid, area->base, area->pages);
531
    tlb_shootdown_finalize();
514
    tlb_shootdown_finalize();
532
   
515
   
533
    /*
516
    /*
534
     * Invalidate potential software translation caches (e.g. TSB on sparc64).
517
     * Invalidate potential software translation caches (e.g. TSB on sparc64).
535
     */
518
     */
536
    as_invalidate_translation_cache(as, area->base, area->pages);
519
    as_invalidate_translation_cache(as, area->base, area->pages);
537
   
520
   
538
    btree_destroy(&area->used_space);
521
    btree_destroy(&area->used_space);
539
 
522
 
540
    area->attributes |= AS_AREA_ATTR_PARTIAL;
523
    area->attributes |= AS_AREA_ATTR_PARTIAL;
541
   
524
   
542
    if (area->sh_info)
525
    if (area->sh_info)
543
        sh_info_remove_reference(area->sh_info);
526
        sh_info_remove_reference(area->sh_info);
544
       
527
       
545
    mutex_unlock(&area->lock);
528
    mutex_unlock(&area->lock);
546
 
529
 
547
    /*
530
    /*
548
     * Remove the empty area from address space.
531
     * Remove the empty area from address space.
549
     */
532
     */
550
    btree_remove(&as->as_area_btree, base, NULL);
533
    btree_remove(&as->as_area_btree, base, NULL);
551
   
534
   
552
    free(area);
535
    free(area);
553
   
536
   
554
    mutex_unlock(&as->lock);
537
    mutex_unlock(&as->lock);
555
    interrupts_restore(ipl);
538
    interrupts_restore(ipl);
556
    return 0;
539
    return 0;
557
}
540
}
558
 
541
 
559
/** Share address space area with another or the same address space.
542
/** Share address space area with another or the same address space.
560
 *
543
 *
561
 * Address space area mapping is shared with a new address space area.
544
 * Address space area mapping is shared with a new address space area.
562
 * If the source address space area has not been shared so far,
545
 * If the source address space area has not been shared so far,
563
 * a new sh_info is created. The new address space area simply gets the
546
 * a new sh_info is created. The new address space area simply gets the
564
 * sh_info of the source area. The process of duplicating the
547
 * sh_info of the source area. The process of duplicating the
565
 * mapping is done through the backend share function.
548
 * mapping is done through the backend share function.
566
 *
549
 *
567
 * @param src_as Pointer to source address space.
550
 * @param src_as Pointer to source address space.
568
 * @param src_base Base address of the source address space area.
551
 * @param src_base Base address of the source address space area.
569
 * @param acc_size Expected size of the source area.
552
 * @param acc_size Expected size of the source area.
570
 * @param dst_as Pointer to destination address space.
553
 * @param dst_as Pointer to destination address space.
571
 * @param dst_base Target base address.
554
 * @param dst_base Target base address.
572
 * @param dst_flags_mask Destination address space area flags mask.
555
 * @param dst_flags_mask Destination address space area flags mask.
573
 *
556
 *
574
 * @return Zero on success or ENOENT if there is no such task or if there is no
557
 * @return Zero on success or ENOENT if there is no such task or if there is no
575
 * such address space area, EPERM if there was a problem in accepting the area
558
 * such address space area, EPERM if there was a problem in accepting the area
576
 * or ENOMEM if there was a problem in allocating destination address space
559
 * or ENOMEM if there was a problem in allocating destination address space
577
 * area. ENOTSUP is returned if the address space area backend does not support
560
 * area. ENOTSUP is returned if the address space area backend does not support
-
 
561
 * sharing or if the kernel detects an attempt to create an illegal address
578
 * sharing.
562
 * alias.
579
 */
563
 */
580
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
564
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
581
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
565
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
582
{
566
{
583
    ipl_t ipl;
567
    ipl_t ipl;
584
    int src_flags;
568
    int src_flags;
585
    size_t src_size;
569
    size_t src_size;
586
    int src_orig_color;
-
 
587
    as_area_t *src_area, *dst_area;
570
    as_area_t *src_area, *dst_area;
588
    share_info_t *sh_info;
571
    share_info_t *sh_info;
589
    mem_backend_t *src_backend;
572
    mem_backend_t *src_backend;
590
    mem_backend_data_t src_backend_data;
573
    mem_backend_data_t src_backend_data;
591
   
574
   
592
    ipl = interrupts_disable();
575
    ipl = interrupts_disable();
593
    mutex_lock(&src_as->lock);
576
    mutex_lock(&src_as->lock);
594
    src_area = find_area_and_lock(src_as, src_base);
577
    src_area = find_area_and_lock(src_as, src_base);
595
    if (!src_area) {
578
    if (!src_area) {
596
        /*
579
        /*
597
         * Could not find the source address space area.
580
         * Could not find the source address space area.
598
         */
581
         */
599
        mutex_unlock(&src_as->lock);
582
        mutex_unlock(&src_as->lock);
600
        interrupts_restore(ipl);
583
        interrupts_restore(ipl);
601
        return ENOENT;
584
        return ENOENT;
602
    }
585
    }
603
   
-
 
604
 
586
 
605
    if (!src_area->backend || !src_area->backend->share) {
587
    if (!src_area->backend || !src_area->backend->share) {
606
        /*
588
        /*
607
         * There is no backend or the backend does not
589
         * There is no backend or the backend does not
608
         * know how to share the area.
590
         * know how to share the area.
609
         */
591
         */
610
        mutex_unlock(&src_area->lock);
592
        mutex_unlock(&src_area->lock);
611
        mutex_unlock(&src_as->lock);
593
        mutex_unlock(&src_as->lock);
612
        interrupts_restore(ipl);
594
        interrupts_restore(ipl);
613
        return ENOTSUP;
595
        return ENOTSUP;
614
    }
596
    }
615
   
597
   
616
    src_size = src_area->pages * PAGE_SIZE;
598
    src_size = src_area->pages * PAGE_SIZE;
617
    src_flags = src_area->flags;
599
    src_flags = src_area->flags;
618
    src_backend = src_area->backend;
600
    src_backend = src_area->backend;
619
    src_backend_data = src_area->backend_data;
601
    src_backend_data = src_area->backend_data;
620
    src_orig_color = src_area->orig_color;
-
 
621
 
602
 
622
    /* Share the cacheable flag from the original mapping */
603
    /* Share the cacheable flag from the original mapping */
623
    if (src_flags & AS_AREA_CACHEABLE)
604
    if (src_flags & AS_AREA_CACHEABLE)
624
        dst_flags_mask |= AS_AREA_CACHEABLE;
605
        dst_flags_mask |= AS_AREA_CACHEABLE;
625
 
606
 
626
    if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
607
    if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
627
        mutex_unlock(&src_area->lock);
608
        mutex_unlock(&src_area->lock);
628
        mutex_unlock(&src_as->lock);
609
        mutex_unlock(&src_as->lock);
629
        interrupts_restore(ipl);
610
        interrupts_restore(ipl);
630
        return EPERM;
611
        return EPERM;
631
    }
612
    }
632
 
613
 
-
 
614
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
615
    if (!(dst_flags_mask & AS_AREA_EXEC)) {
-
 
616
        if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
-
 
617
            /*
-
 
618
             * Refuse to create an illegal address alias.
-
 
619
             */
-
 
620
            mutex_unlock(&src_area->lock);
-
 
621
            mutex_unlock(&src_as->lock);
-
 
622
            interrupts_restore(ipl);
-
 
623
            return ENOTSUP;
-
 
624
        }
-
 
625
    }
-
 
626
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
627
 
633
    /*
628
    /*
634
     * Now we are committed to sharing the area.
629
     * Now we are committed to sharing the area.
635
     * First, prepare the area for sharing.
630
     * First, prepare the area for sharing.
636
     * Then it will be safe to unlock it.
631
     * Then it will be safe to unlock it.
637
     */
632
     */
638
    sh_info = src_area->sh_info;
633
    sh_info = src_area->sh_info;
639
    if (!sh_info) {
634
    if (!sh_info) {
640
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
635
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
641
        mutex_initialize(&sh_info->lock);
636
        mutex_initialize(&sh_info->lock);
642
        sh_info->refcount = 2;
637
        sh_info->refcount = 2;
643
        btree_create(&sh_info->pagemap);
638
        btree_create(&sh_info->pagemap);
644
        src_area->sh_info = sh_info;
639
        src_area->sh_info = sh_info;
645
    } else {
640
    } else {
646
        mutex_lock(&sh_info->lock);
641
        mutex_lock(&sh_info->lock);
647
        sh_info->refcount++;
642
        sh_info->refcount++;
648
        mutex_unlock(&sh_info->lock);
643
        mutex_unlock(&sh_info->lock);
649
    }
644
    }
650
 
645
 
651
    src_area->backend->share(src_area);
646
    src_area->backend->share(src_area);
652
 
647
 
653
    mutex_unlock(&src_area->lock);
648
    mutex_unlock(&src_area->lock);
654
    mutex_unlock(&src_as->lock);
649
    mutex_unlock(&src_as->lock);
655
 
650
 
656
    /*
651
    /*
657
     * Create copy of the source address space area.
652
     * Create copy of the source address space area.
658
     * The destination area is created with AS_AREA_ATTR_PARTIAL
653
     * The destination area is created with AS_AREA_ATTR_PARTIAL
659
     * attribute set which prevents race condition with
654
     * attribute set which prevents race condition with
660
     * preliminary as_page_fault() calls.
655
     * preliminary as_page_fault() calls.
661
     * The flags of the source area are masked against dst_flags_mask
656
     * The flags of the source area are masked against dst_flags_mask
662
     * to support sharing in less privileged mode.
657
     * to support sharing in less privileged mode.
663
     */
658
     */
664
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
659
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
665
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
660
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
666
    if (!dst_area) {
661
    if (!dst_area) {
667
        /*
662
        /*
668
         * Destination address space area could not be created.
663
         * Destination address space area could not be created.
669
         */
664
         */
670
        sh_info_remove_reference(sh_info);
665
        sh_info_remove_reference(sh_info);
671
       
666
       
672
        interrupts_restore(ipl);
667
        interrupts_restore(ipl);
673
        return ENOMEM;
668
        return ENOMEM;
674
    }
669
    }
675
 
670
 
676
    /*
671
    /*
677
     * Now the destination address space area has been
672
     * Now the destination address space area has been
678
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
673
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
679
     * attribute and set the sh_info.
674
     * attribute and set the sh_info.
680
     */
675
     */
681
    mutex_lock(&dst_as->lock); 
676
    mutex_lock(&dst_as->lock); 
682
    mutex_lock(&dst_area->lock);
677
    mutex_lock(&dst_area->lock);
683
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
678
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
684
    dst_area->sh_info = sh_info;
679
    dst_area->sh_info = sh_info;
685
    dst_area->orig_color = src_orig_color;
-
 
686
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
687
    if (src_orig_color != PAGE_COLOR(dst_base)) {
-
 
688
        /*
-
 
689
         * We have just detected an attempt to create an invalid address
-
 
690
         * alias. We allow this and set a special flag that tells the
-
 
691
         * architecture specific code to flush the D-cache when the
-
 
692
         * offending address space is installed and deinstalled
-
 
693
         * (cleanup).
-
 
694
         *
-
 
695
         * In order for the flags to take effect immediately, we also
-
 
696
         * perform a global D-cache shootdown.
-
 
697
         */
-
 
698
        dcache_shootdown_start();
-
 
699
        dst_as->dcache_flush_on_install = true;
-
 
700
        dst_as->dcache_flush_on_deinstall = true;
-
 
701
        dcache_flush();
-
 
702
        dcache_shootdown_finalize();
-
 
703
    }
-
 
704
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
705
    mutex_unlock(&dst_area->lock);
680
    mutex_unlock(&dst_area->lock);
706
    mutex_unlock(&dst_as->lock);   
681
    mutex_unlock(&dst_as->lock);   
707
 
682
 
708
    interrupts_restore(ipl);
683
    interrupts_restore(ipl);
709
   
684
   
710
    return 0;
685
    return 0;
711
}
686
}
712
 
687
 
713
/** Check access mode for address space area.
688
/** Check access mode for address space area.
714
 *
689
 *
715
 * The address space area must be locked prior to this call.
690
 * The address space area must be locked prior to this call.
716
 *
691
 *
717
 * @param area Address space area.
692
 * @param area Address space area.
718
 * @param access Access mode.
693
 * @param access Access mode.
719
 *
694
 *
720
 * @return False if access violates area's permissions, true otherwise.
695
 * @return False if access violates area's permissions, true otherwise.
721
 */
696
 */
722
bool as_area_check_access(as_area_t *area, pf_access_t access)
697
bool as_area_check_access(as_area_t *area, pf_access_t access)
723
{
698
{
724
    int flagmap[] = {
699
    int flagmap[] = {
725
        [PF_ACCESS_READ] = AS_AREA_READ,
700
        [PF_ACCESS_READ] = AS_AREA_READ,
726
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
701
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
727
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
702
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
728
    };
703
    };
729
 
704
 
730
    if (!(area->flags & flagmap[access]))
705
    if (!(area->flags & flagmap[access]))
731
        return false;
706
        return false;
732
   
707
   
733
    return true;
708
    return true;
734
}
709
}
735
 
710
 
736
/** Handle page fault within the current address space.
711
/** Handle page fault within the current address space.
737
 *
712
 *
738
 * This is the high-level page fault handler. It decides
713
 * This is the high-level page fault handler. It decides
739
 * whether the page fault can be resolved by any backend
714
 * whether the page fault can be resolved by any backend
740
 * and if so, it invokes the backend to resolve the page
715
 * and if so, it invokes the backend to resolve the page
741
 * fault.
716
 * fault.
742
 *
717
 *
743
 * Interrupts are assumed disabled.
718
 * Interrupts are assumed disabled.
744
 *
719
 *
745
 * @param page Faulting page.
720
 * @param page Faulting page.
746
 * @param access Access mode that caused the fault (i.e. read/write/exec).
721
 * @param access Access mode that caused the fault (i.e. read/write/exec).
747
 * @param istate Pointer to interrupted state.
722
 * @param istate Pointer to interrupted state.
748
 *
723
 *
749
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
724
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
750
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
725
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
751
 */
726
 */
752
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
727
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
753
{
728
{
754
    pte_t *pte;
729
    pte_t *pte;
755
    as_area_t *area;
730
    as_area_t *area;
756
   
731
   
757
    if (!THREAD)
732
    if (!THREAD)
758
        return AS_PF_FAULT;
733
        return AS_PF_FAULT;
759
       
734
       
760
    ASSERT(AS);
735
    ASSERT(AS);
761
 
736
 
762
    mutex_lock(&AS->lock);
737
    mutex_lock(&AS->lock);
763
    area = find_area_and_lock(AS, page);   
738
    area = find_area_and_lock(AS, page);   
764
    if (!area) {
739
    if (!area) {
765
        /*
740
        /*
766
         * No area contained mapping for 'page'.
741
         * No area contained mapping for 'page'.
767
         * Signal page fault to low-level handler.
742
         * Signal page fault to low-level handler.
768
         */
743
         */
769
        mutex_unlock(&AS->lock);
744
        mutex_unlock(&AS->lock);
770
        goto page_fault;
745
        goto page_fault;
771
    }
746
    }
772
 
747
 
773
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
748
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
774
        /*
749
        /*
775
         * The address space area is not fully initialized.
750
         * The address space area is not fully initialized.
776
         * Avoid possible race by returning error.
751
         * Avoid possible race by returning error.
777
         */
752
         */
778
        mutex_unlock(&area->lock);
753
        mutex_unlock(&area->lock);
779
        mutex_unlock(&AS->lock);
754
        mutex_unlock(&AS->lock);
780
        goto page_fault;       
755
        goto page_fault;       
781
    }
756
    }
782
 
757
 
783
    if (!area->backend || !area->backend->page_fault) {
758
    if (!area->backend || !area->backend->page_fault) {
784
        /*
759
        /*
785
         * The address space area is not backed by any backend
760
         * The address space area is not backed by any backend
786
         * or the backend cannot handle page faults.
761
         * or the backend cannot handle page faults.
787
         */
762
         */
788
        mutex_unlock(&area->lock);
763
        mutex_unlock(&area->lock);
789
        mutex_unlock(&AS->lock);
764
        mutex_unlock(&AS->lock);
790
        goto page_fault;       
765
        goto page_fault;       
791
    }
766
    }
792
 
767
 
793
    page_table_lock(AS, false);
768
    page_table_lock(AS, false);
794
   
769
   
795
    /*
770
    /*
796
     * To avoid race condition between two page faults
771
     * To avoid race condition between two page faults
797
     * on the same address, we need to make sure
772
     * on the same address, we need to make sure
798
     * the mapping has not been already inserted.
773
     * the mapping has not been already inserted.
799
     */
774
     */
800
    if ((pte = page_mapping_find(AS, page))) {
775
    if ((pte = page_mapping_find(AS, page))) {
801
        if (PTE_PRESENT(pte)) {
776
        if (PTE_PRESENT(pte)) {
802
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
777
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
803
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
778
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
804
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
779
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
805
                page_table_unlock(AS, false);
780
                page_table_unlock(AS, false);
806
                mutex_unlock(&area->lock);
781
                mutex_unlock(&area->lock);
807
                mutex_unlock(&AS->lock);
782
                mutex_unlock(&AS->lock);
808
                return AS_PF_OK;
783
                return AS_PF_OK;
809
            }
784
            }
810
        }
785
        }
811
    }
786
    }
812
   
787
   
813
    /*
788
    /*
814
     * Resort to the backend page fault handler.
789
     * Resort to the backend page fault handler.
815
     */
790
     */
816
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
791
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
817
        page_table_unlock(AS, false);
792
        page_table_unlock(AS, false);
818
        mutex_unlock(&area->lock);
793
        mutex_unlock(&area->lock);
819
        mutex_unlock(&AS->lock);
794
        mutex_unlock(&AS->lock);
820
        goto page_fault;
795
        goto page_fault;
821
    }
796
    }
822
   
797
   
823
    page_table_unlock(AS, false);
798
    page_table_unlock(AS, false);
824
    mutex_unlock(&area->lock);
799
    mutex_unlock(&area->lock);
825
    mutex_unlock(&AS->lock);
800
    mutex_unlock(&AS->lock);
826
    return AS_PF_OK;
801
    return AS_PF_OK;
827
 
802
 
828
page_fault:
803
page_fault:
829
    if (THREAD->in_copy_from_uspace) {
804
    if (THREAD->in_copy_from_uspace) {
830
        THREAD->in_copy_from_uspace = false;
805
        THREAD->in_copy_from_uspace = false;
831
        istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
806
        istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
832
    } else if (THREAD->in_copy_to_uspace) {
807
    } else if (THREAD->in_copy_to_uspace) {
833
        THREAD->in_copy_to_uspace = false;
808
        THREAD->in_copy_to_uspace = false;
834
        istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
809
        istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
835
    } else {
810
    } else {
836
        return AS_PF_FAULT;
811
        return AS_PF_FAULT;
837
    }
812
    }
838
 
813
 
839
    return AS_PF_DEFER;
814
    return AS_PF_DEFER;
840
}
815
}
841
 
816
 
842
/** Switch address spaces.
817
/** Switch address spaces.
843
 *
818
 *
844
 * Note that this function cannot sleep as it is essentially a part of
819
 * Note that this function cannot sleep as it is essentially a part of
845
 * scheduling. Sleeping here would lead to deadlock on wakeup.
820
 * scheduling. Sleeping here would lead to deadlock on wakeup.
846
 *
821
 *
847
 * @param old Old address space or NULL.
822
 * @param old Old address space or NULL.
848
 * @param new New address space.
823
 * @param new New address space.
849
 */
824
 */
850
void as_switch(as_t *old, as_t *new)
825
void as_switch(as_t *old, as_t *new)
851
{
826
{
852
    ipl_t ipl;
827
    ipl_t ipl;
853
    bool needs_asid = false;
828
    bool needs_asid = false;
854
   
829
   
855
    ipl = interrupts_disable();
830
    ipl = interrupts_disable();
856
    spinlock_lock(&inactive_as_with_asid_lock);
831
    spinlock_lock(&inactive_as_with_asid_lock);
857
 
832
 
858
    /*
833
    /*
859
     * First, take care of the old address space.
834
     * First, take care of the old address space.
860
     */
835
     */
861
    if (old) {
836
    if (old) {
862
        mutex_lock_active(&old->lock);
837
        mutex_lock_active(&old->lock);
863
        ASSERT(old->cpu_refcount);
838
        ASSERT(old->cpu_refcount);
864
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
839
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
865
            /*
840
            /*
866
             * The old address space is no longer active on
841
             * The old address space is no longer active on
867
             * any processor. It can be appended to the
842
             * any processor. It can be appended to the
868
             * list of inactive address spaces with assigned
843
             * list of inactive address spaces with assigned
869
             * ASID.
844
             * ASID.
870
             */
845
             */
871
             ASSERT(old->asid != ASID_INVALID);
846
             ASSERT(old->asid != ASID_INVALID);
872
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
847
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
873
        }
848
        }
874
        mutex_unlock(&old->lock);
849
        mutex_unlock(&old->lock);
875
 
850
 
876
        /*
851
        /*
877
         * Perform architecture-specific tasks when the address space
852
         * Perform architecture-specific tasks when the address space
878
         * is being removed from the CPU.
853
         * is being removed from the CPU.
879
         */
854
         */
880
        as_deinstall_arch(old);
855
        as_deinstall_arch(old);
881
    }
856
    }
882
 
857
 
883
    /*
858
    /*
884
     * Second, prepare the new address space.
859
     * Second, prepare the new address space.
885
     */
860
     */
886
    mutex_lock_active(&new->lock);
861
    mutex_lock_active(&new->lock);
887
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
862
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
888
        if (new->asid != ASID_INVALID)
863
        if (new->asid != ASID_INVALID)
889
            list_remove(&new->inactive_as_with_asid_link);
864
            list_remove(&new->inactive_as_with_asid_link);
890
        else
865
        else
891
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
866
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
892
    }
867
    }
893
    SET_PTL0_ADDRESS(new->page_table);
868
    SET_PTL0_ADDRESS(new->page_table);
894
    mutex_unlock(&new->lock);
869
    mutex_unlock(&new->lock);
895
 
870
 
896
    if (needs_asid) {
871
    if (needs_asid) {
897
        /*
872
        /*
898
         * Allocation of new ASID was deferred
873
         * Allocation of new ASID was deferred
899
         * until now in order to avoid deadlock.
874
         * until now in order to avoid deadlock.
900
         */
875
         */
901
        asid_t asid;
876
        asid_t asid;
902
       
877
       
903
        asid = asid_get();
878
        asid = asid_get();
904
        mutex_lock_active(&new->lock);
879
        mutex_lock_active(&new->lock);
905
        new->asid = asid;
880
        new->asid = asid;
906
        mutex_unlock(&new->lock);
881
        mutex_unlock(&new->lock);
907
    }
882
    }
908
    spinlock_unlock(&inactive_as_with_asid_lock);
883
    spinlock_unlock(&inactive_as_with_asid_lock);
909
    interrupts_restore(ipl);
884
    interrupts_restore(ipl);
910
   
885
   
911
    /*
886
    /*
912
     * Perform architecture-specific steps.
887
     * Perform architecture-specific steps.
913
     * (e.g. write ASID to hardware register etc.)
888
     * (e.g. write ASID to hardware register etc.)
914
     */
889
     */
915
    as_install_arch(new);
890
    as_install_arch(new);
916
   
891
   
917
    AS = new;
892
    AS = new;
918
}
893
}
919
 
894
 
920
/** Convert address space area flags to page flags.
895
/** Convert address space area flags to page flags.
921
 *
896
 *
922
 * @param aflags Flags of some address space area.
897
 * @param aflags Flags of some address space area.
923
 *
898
 *
924
 * @return Flags to be passed to page_mapping_insert().
899
 * @return Flags to be passed to page_mapping_insert().
925
 */
900
 */
926
int area_flags_to_page_flags(int aflags)
901
int area_flags_to_page_flags(int aflags)
927
{
902
{
928
    int flags;
903
    int flags;
929
 
904
 
930
    flags = PAGE_USER | PAGE_PRESENT;
905
    flags = PAGE_USER | PAGE_PRESENT;
931
   
906
   
932
    if (aflags & AS_AREA_READ)
907
    if (aflags & AS_AREA_READ)
933
        flags |= PAGE_READ;
908
        flags |= PAGE_READ;
934
       
909
       
935
    if (aflags & AS_AREA_WRITE)
910
    if (aflags & AS_AREA_WRITE)
936
        flags |= PAGE_WRITE;
911
        flags |= PAGE_WRITE;
937
   
912
   
938
    if (aflags & AS_AREA_EXEC)
913
    if (aflags & AS_AREA_EXEC)
939
        flags |= PAGE_EXEC;
914
        flags |= PAGE_EXEC;
940
   
915
   
941
    if (aflags & AS_AREA_CACHEABLE)
916
    if (aflags & AS_AREA_CACHEABLE)
942
        flags |= PAGE_CACHEABLE;
917
        flags |= PAGE_CACHEABLE;
943
       
918
       
944
    return flags;
919
    return flags;
945
}
920
}
946
 
921
 
947
/** Compute flags for virtual address translation subsytem.
922
/** Compute flags for virtual address translation subsytem.
948
 *
923
 *
949
 * The address space area must be locked.
924
 * The address space area must be locked.
950
 * Interrupts must be disabled.
925
 * Interrupts must be disabled.
951
 *
926
 *
952
 * @param a Address space area.
927
 * @param a Address space area.
953
 *
928
 *
954
 * @return Flags to be used in page_mapping_insert().
929
 * @return Flags to be used in page_mapping_insert().
955
 */
930
 */
956
int as_area_get_flags(as_area_t *a)
931
int as_area_get_flags(as_area_t *a)
957
{
932
{
958
    return area_flags_to_page_flags(a->flags);
933
    return area_flags_to_page_flags(a->flags);
959
}
934
}
960
 
935
 
961
/** Create page table.
936
/** Create page table.
962
 *
937
 *
963
 * Depending on architecture, create either address space
938
 * Depending on architecture, create either address space
964
 * private or global page table.
939
 * private or global page table.
965
 *
940
 *
966
 * @param flags Flags saying whether the page table is for kernel address space.
941
 * @param flags Flags saying whether the page table is for kernel address space.
967
 *
942
 *
968
 * @return First entry of the page table.
943
 * @return First entry of the page table.
969
 */
944
 */
970
pte_t *page_table_create(int flags)
945
pte_t *page_table_create(int flags)
971
{
946
{
972
        ASSERT(as_operations);
947
        ASSERT(as_operations);
973
        ASSERT(as_operations->page_table_create);
948
        ASSERT(as_operations->page_table_create);
974
 
949
 
975
        return as_operations->page_table_create(flags);
950
        return as_operations->page_table_create(flags);
976
}
951
}
977
 
952
 
978
/** Destroy page table.
953
/** Destroy page table.
979
 *
954
 *
980
 * Destroy page table in architecture specific way.
955
 * Destroy page table in architecture specific way.
981
 *
956
 *
982
 * @param page_table Physical address of PTL0.
957
 * @param page_table Physical address of PTL0.
983
 */
958
 */
984
void page_table_destroy(pte_t *page_table)
959
void page_table_destroy(pte_t *page_table)
985
{
960
{
986
        ASSERT(as_operations);
961
        ASSERT(as_operations);
987
        ASSERT(as_operations->page_table_destroy);
962
        ASSERT(as_operations->page_table_destroy);
988
 
963
 
989
        as_operations->page_table_destroy(page_table);
964
        as_operations->page_table_destroy(page_table);
990
}
965
}
991
 
966
 
992
/** Lock page table.
967
/** Lock page table.
993
 *
968
 *
994
 * This function should be called before any page_mapping_insert(),
969
 * This function should be called before any page_mapping_insert(),
995
 * page_mapping_remove() and page_mapping_find().
970
 * page_mapping_remove() and page_mapping_find().
996
 *
971
 *
997
 * Locking order is such that address space areas must be locked
972
 * Locking order is such that address space areas must be locked
998
 * prior to this call. Address space can be locked prior to this
973
 * prior to this call. Address space can be locked prior to this
999
 * call in which case the lock argument is false.
974
 * call in which case the lock argument is false.
1000
 *
975
 *
1001
 * @param as Address space.
976
 * @param as Address space.
1002
 * @param lock If false, do not attempt to lock as->lock.
977
 * @param lock If false, do not attempt to lock as->lock.
1003
 */
978
 */
1004
void page_table_lock(as_t *as, bool lock)
979
void page_table_lock(as_t *as, bool lock)
1005
{
980
{
1006
    ASSERT(as_operations);
981
    ASSERT(as_operations);
1007
    ASSERT(as_operations->page_table_lock);
982
    ASSERT(as_operations->page_table_lock);
1008
 
983
 
1009
    as_operations->page_table_lock(as, lock);
984
    as_operations->page_table_lock(as, lock);
1010
}
985
}
1011
 
986
 
1012
/** Unlock page table.
987
/** Unlock page table.
1013
 *
988
 *
1014
 * @param as Address space.
989
 * @param as Address space.
1015
 * @param unlock If false, do not attempt to unlock as->lock.
990
 * @param unlock If false, do not attempt to unlock as->lock.
1016
 */
991
 */
1017
void page_table_unlock(as_t *as, bool unlock)
992
void page_table_unlock(as_t *as, bool unlock)
1018
{
993
{
1019
    ASSERT(as_operations);
994
    ASSERT(as_operations);
1020
    ASSERT(as_operations->page_table_unlock);
995
    ASSERT(as_operations->page_table_unlock);
1021
 
996
 
1022
    as_operations->page_table_unlock(as, unlock);
997
    as_operations->page_table_unlock(as, unlock);
1023
}
998
}
1024
 
999
 
1025
 
1000
 
1026
/** Find address space area and lock it.
1001
/** Find address space area and lock it.
1027
 *
1002
 *
1028
 * The address space must be locked and interrupts must be disabled.
1003
 * The address space must be locked and interrupts must be disabled.
1029
 *
1004
 *
1030
 * @param as Address space.
1005
 * @param as Address space.
1031
 * @param va Virtual address.
1006
 * @param va Virtual address.
1032
 *
1007
 *
1033
 * @return Locked address space area containing va on success or NULL on failure.
1008
 * @return Locked address space area containing va on success or NULL on failure.
1034
 */
1009
 */
1035
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1010
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1036
{
1011
{
1037
    as_area_t *a;
1012
    as_area_t *a;
1038
    btree_node_t *leaf, *lnode;
1013
    btree_node_t *leaf, *lnode;
1039
    int i;
1014
    int i;
1040
   
1015
   
1041
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1016
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1042
    if (a) {
1017
    if (a) {
1043
        /* va is the base address of an address space area */
1018
        /* va is the base address of an address space area */
1044
        mutex_lock(&a->lock);
1019
        mutex_lock(&a->lock);
1045
        return a;
1020
        return a;
1046
    }
1021
    }
1047
   
1022
   
1048
    /*
1023
    /*
1049
     * Search the leaf node and the righmost record of its left neighbour
1024
     * Search the leaf node and the righmost record of its left neighbour
1050
     * to find out whether this is a miss or va belongs to an address
1025
     * to find out whether this is a miss or va belongs to an address
1051
     * space area found there.
1026
     * space area found there.
1052
     */
1027
     */
1053
   
1028
   
1054
    /* First, search the leaf node itself. */
1029
    /* First, search the leaf node itself. */
1055
    for (i = 0; i < leaf->keys; i++) {
1030
    for (i = 0; i < leaf->keys; i++) {
1056
        a = (as_area_t *) leaf->value[i];
1031
        a = (as_area_t *) leaf->value[i];
1057
        mutex_lock(&a->lock);
1032
        mutex_lock(&a->lock);
1058
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1033
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1059
            return a;
1034
            return a;
1060
        }
1035
        }
1061
        mutex_unlock(&a->lock);
1036
        mutex_unlock(&a->lock);
1062
    }
1037
    }
1063
 
1038
 
1064
    /*
1039
    /*
1065
     * Second, locate the left neighbour and test its last record.
1040
     * Second, locate the left neighbour and test its last record.
1066
     * Because of its position in the B+tree, it must have base < va.
1041
     * Because of its position in the B+tree, it must have base < va.
1067
     */
1042
     */
1068
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1043
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1069
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1044
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1070
        mutex_lock(&a->lock);
1045
        mutex_lock(&a->lock);
1071
        if (va < a->base + a->pages * PAGE_SIZE) {
1046
        if (va < a->base + a->pages * PAGE_SIZE) {
1072
            return a;
1047
            return a;
1073
        }
1048
        }
1074
        mutex_unlock(&a->lock);
1049
        mutex_unlock(&a->lock);
1075
    }
1050
    }
1076
 
1051
 
1077
    return NULL;
1052
    return NULL;
1078
}
1053
}
1079
 
1054
 
1080
/** Check area conflicts with other areas.
1055
/** Check area conflicts with other areas.
1081
 *
1056
 *
1082
 * The address space must be locked and interrupts must be disabled.
1057
 * The address space must be locked and interrupts must be disabled.
1083
 *
1058
 *
1084
 * @param as Address space.
1059
 * @param as Address space.
1085
 * @param va Starting virtual address of the area being tested.
1060
 * @param va Starting virtual address of the area being tested.
1086
 * @param size Size of the area being tested.
1061
 * @param size Size of the area being tested.
1087
 * @param avoid_area Do not touch this area.
1062
 * @param avoid_area Do not touch this area.
1088
 *
1063
 *
1089
 * @return True if there is no conflict, false otherwise.
1064
 * @return True if there is no conflict, false otherwise.
1090
 */
1065
 */
1091
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1066
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1092
{
1067
{
1093
    as_area_t *a;
1068
    as_area_t *a;
1094
    btree_node_t *leaf, *node;
1069
    btree_node_t *leaf, *node;
1095
    int i;
1070
    int i;
1096
   
1071
   
1097
    /*
1072
    /*
1098
     * We don't want any area to have conflicts with NULL page.
1073
     * We don't want any area to have conflicts with NULL page.
1099
     */
1074
     */
1100
    if (overlaps(va, size, NULL, PAGE_SIZE))
1075
    if (overlaps(va, size, NULL, PAGE_SIZE))
1101
        return false;
1076
        return false;
1102
   
1077
   
1103
    /*
1078
    /*
1104
     * The leaf node is found in O(log n), where n is proportional to
1079
     * The leaf node is found in O(log n), where n is proportional to
1105
     * the number of address space areas belonging to as.
1080
     * the number of address space areas belonging to as.
1106
     * The check for conflicts is then attempted on the rightmost
1081
     * The check for conflicts is then attempted on the rightmost
1107
     * record in the left neighbour, the leftmost record in the right
1082
     * record in the left neighbour, the leftmost record in the right
1108
     * neighbour and all records in the leaf node itself.
1083
     * neighbour and all records in the leaf node itself.
1109
     */
1084
     */
1110
   
1085
   
1111
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1086
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1112
        if (a != avoid_area)
1087
        if (a != avoid_area)
1113
            return false;
1088
            return false;
1114
    }
1089
    }
1115
   
1090
   
1116
    /* First, check the two border cases. */
1091
    /* First, check the two border cases. */
1117
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1092
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1118
        a = (as_area_t *) node->value[node->keys - 1];
1093
        a = (as_area_t *) node->value[node->keys - 1];
1119
        mutex_lock(&a->lock);
1094
        mutex_lock(&a->lock);
1120
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1095
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1121
            mutex_unlock(&a->lock);
1096
            mutex_unlock(&a->lock);
1122
            return false;
1097
            return false;
1123
        }
1098
        }
1124
        mutex_unlock(&a->lock);
1099
        mutex_unlock(&a->lock);
1125
    }
1100
    }
1126
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1101
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1127
        a = (as_area_t *) node->value[0];
1102
        a = (as_area_t *) node->value[0];
1128
        mutex_lock(&a->lock);
1103
        mutex_lock(&a->lock);
1129
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1104
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1130
            mutex_unlock(&a->lock);
1105
            mutex_unlock(&a->lock);
1131
            return false;
1106
            return false;
1132
        }
1107
        }
1133
        mutex_unlock(&a->lock);
1108
        mutex_unlock(&a->lock);
1134
    }
1109
    }
1135
   
1110
   
1136
    /* Second, check the leaf node. */
1111
    /* Second, check the leaf node. */
1137
    for (i = 0; i < leaf->keys; i++) {
1112
    for (i = 0; i < leaf->keys; i++) {
1138
        a = (as_area_t *) leaf->value[i];
1113
        a = (as_area_t *) leaf->value[i];
1139
   
1114
   
1140
        if (a == avoid_area)
1115
        if (a == avoid_area)
1141
            continue;
1116
            continue;
1142
   
1117
   
1143
        mutex_lock(&a->lock);
1118
        mutex_lock(&a->lock);
1144
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1119
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1145
            mutex_unlock(&a->lock);
1120
            mutex_unlock(&a->lock);
1146
            return false;
1121
            return false;
1147
        }
1122
        }
1148
        mutex_unlock(&a->lock);
1123
        mutex_unlock(&a->lock);
1149
    }
1124
    }
1150
 
1125
 
1151
    /*
1126
    /*
1152
     * So far, the area does not conflict with other areas.
1127
     * So far, the area does not conflict with other areas.
1153
     * Check if it doesn't conflict with kernel address space.
1128
     * Check if it doesn't conflict with kernel address space.
1154
     */  
1129
     */  
1155
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1130
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1156
        return !overlaps(va, size,
1131
        return !overlaps(va, size,
1157
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1132
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1158
    }
1133
    }
1159
 
1134
 
1160
    return true;
1135
    return true;
1161
}
1136
}
1162
 
1137
 
1163
/** Return size of the address space area with given base.  */
1138
/** Return size of the address space area with given base.  */
1164
size_t as_get_size(uintptr_t base)
1139
size_t as_get_size(uintptr_t base)
1165
{
1140
{
1166
    ipl_t ipl;
1141
    ipl_t ipl;
1167
    as_area_t *src_area;
1142
    as_area_t *src_area;
1168
    size_t size;
1143
    size_t size;
1169
 
1144
 
1170
    ipl = interrupts_disable();
1145
    ipl = interrupts_disable();
1171
    src_area = find_area_and_lock(AS, base);
1146
    src_area = find_area_and_lock(AS, base);
1172
    if (src_area){
1147
    if (src_area){
1173
        size = src_area->pages * PAGE_SIZE;
1148
        size = src_area->pages * PAGE_SIZE;
1174
        mutex_unlock(&src_area->lock);
1149
        mutex_unlock(&src_area->lock);
1175
    } else {
1150
    } else {
1176
        size = 0;
1151
        size = 0;
1177
    }
1152
    }
1178
    interrupts_restore(ipl);
1153
    interrupts_restore(ipl);
1179
    return size;
1154
    return size;
1180
}
1155
}
1181
 
1156
 
1182
/** Mark portion of address space area as used.
1157
/** Mark portion of address space area as used.
1183
 *
1158
 *
1184
 * The address space area must be already locked.
1159
 * The address space area must be already locked.
1185
 *
1160
 *
1186
 * @param a Address space area.
1161
 * @param a Address space area.
1187
 * @param page First page to be marked.
1162
 * @param page First page to be marked.
1188
 * @param count Number of page to be marked.
1163
 * @param count Number of page to be marked.
1189
 *
1164
 *
1190
 * @return 0 on failure and 1 on success.
1165
 * @return 0 on failure and 1 on success.
1191
 */
1166
 */
1192
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1167
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1193
{
1168
{
1194
    btree_node_t *leaf, *node;
1169
    btree_node_t *leaf, *node;
1195
    count_t pages;
1170
    count_t pages;
1196
    int i;
1171
    int i;
1197
 
1172
 
1198
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1173
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1199
    ASSERT(count);
1174
    ASSERT(count);
1200
 
1175
 
1201
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1176
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1202
    if (pages) {
1177
    if (pages) {
1203
        /*
1178
        /*
1204
         * We hit the beginning of some used space.
1179
         * We hit the beginning of some used space.
1205
         */
1180
         */
1206
        return 0;
1181
        return 0;
1207
    }
1182
    }
1208
 
1183
 
1209
    if (!leaf->keys) {
1184
    if (!leaf->keys) {
1210
        btree_insert(&a->used_space, page, (void *) count, leaf);
1185
        btree_insert(&a->used_space, page, (void *) count, leaf);
1211
        return 1;
1186
        return 1;
1212
    }
1187
    }
1213
 
1188
 
1214
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1189
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1215
    if (node) {
1190
    if (node) {
1216
        uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1191
        uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1217
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1192
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1218
       
1193
       
1219
        /*
1194
        /*
1220
         * Examine the possibility that the interval fits
1195
         * Examine the possibility that the interval fits
1221
         * somewhere between the rightmost interval of
1196
         * somewhere between the rightmost interval of
1222
         * the left neigbour and the first interval of the leaf.
1197
         * the left neigbour and the first interval of the leaf.
1223
         */
1198
         */
1224
         
1199
         
1225
        if (page >= right_pg) {
1200
        if (page >= right_pg) {
1226
            /* Do nothing. */
1201
            /* Do nothing. */
1227
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1202
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1228
            /* The interval intersects with the left interval. */
1203
            /* The interval intersects with the left interval. */
1229
            return 0;
1204
            return 0;
1230
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1205
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1231
            /* The interval intersects with the right interval. */
1206
            /* The interval intersects with the right interval. */
1232
            return 0;          
1207
            return 0;          
1233
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1208
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1234
            /* The interval can be added by merging the two already present intervals. */
1209
            /* The interval can be added by merging the two already present intervals. */
1235
            node->value[node->keys - 1] += count + right_cnt;
1210
            node->value[node->keys - 1] += count + right_cnt;
1236
            btree_remove(&a->used_space, right_pg, leaf);
1211
            btree_remove(&a->used_space, right_pg, leaf);
1237
            return 1;
1212
            return 1;
1238
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1213
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1239
            /* The interval can be added by simply growing the left interval. */
1214
            /* The interval can be added by simply growing the left interval. */
1240
            node->value[node->keys - 1] += count;
1215
            node->value[node->keys - 1] += count;
1241
            return 1;
1216
            return 1;
1242
        } else if (page + count*PAGE_SIZE == right_pg) {
1217
        } else if (page + count*PAGE_SIZE == right_pg) {
1243
            /*
1218
            /*
1244
             * The interval can be addded by simply moving base of the right
1219
             * The interval can be addded by simply moving base of the right
1245
             * interval down and increasing its size accordingly.
1220
             * interval down and increasing its size accordingly.
1246
             */
1221
             */
1247
            leaf->value[0] += count;
1222
            leaf->value[0] += count;
1248
            leaf->key[0] = page;
1223
            leaf->key[0] = page;
1249
            return 1;
1224
            return 1;
1250
        } else {
1225
        } else {
1251
            /*
1226
            /*
1252
             * The interval is between both neigbouring intervals,
1227
             * The interval is between both neigbouring intervals,
1253
             * but cannot be merged with any of them.
1228
             * but cannot be merged with any of them.
1254
             */
1229
             */
1255
            btree_insert(&a->used_space, page, (void *) count, leaf);
1230
            btree_insert(&a->used_space, page, (void *) count, leaf);
1256
            return 1;
1231
            return 1;
1257
        }
1232
        }
1258
    } else if (page < leaf->key[0]) {
1233
    } else if (page < leaf->key[0]) {
1259
        uintptr_t right_pg = leaf->key[0];
1234
        uintptr_t right_pg = leaf->key[0];
1260
        count_t right_cnt = (count_t) leaf->value[0];
1235
        count_t right_cnt = (count_t) leaf->value[0];
1261
   
1236
   
1262
        /*
1237
        /*
1263
         * Investigate the border case in which the left neighbour does not
1238
         * Investigate the border case in which the left neighbour does not
1264
         * exist but the interval fits from the left.
1239
         * exist but the interval fits from the left.
1265
         */
1240
         */
1266
         
1241
         
1267
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1242
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1268
            /* The interval intersects with the right interval. */
1243
            /* The interval intersects with the right interval. */
1269
            return 0;
1244
            return 0;
1270
        } else if (page + count*PAGE_SIZE == right_pg) {
1245
        } else if (page + count*PAGE_SIZE == right_pg) {
1271
            /*
1246
            /*
1272
             * The interval can be added by moving the base of the right interval down
1247
             * The interval can be added by moving the base of the right interval down
1273
             * and increasing its size accordingly.
1248
             * and increasing its size accordingly.
1274
             */
1249
             */
1275
            leaf->key[0] = page;
1250
            leaf->key[0] = page;
1276
            leaf->value[0] += count;
1251
            leaf->value[0] += count;
1277
            return 1;
1252
            return 1;
1278
        } else {
1253
        } else {
1279
            /*
1254
            /*
1280
             * The interval doesn't adjoin with the right interval.
1255
             * The interval doesn't adjoin with the right interval.
1281
             * It must be added individually.
1256
             * It must be added individually.
1282
             */
1257
             */
1283
            btree_insert(&a->used_space, page, (void *) count, leaf);
1258
            btree_insert(&a->used_space, page, (void *) count, leaf);
1284
            return 1;
1259
            return 1;
1285
        }
1260
        }
1286
    }
1261
    }
1287
 
1262
 
1288
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1263
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1289
    if (node) {
1264
    if (node) {
1290
        uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1265
        uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1291
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1266
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1292
       
1267
       
1293
        /*
1268
        /*
1294
         * Examine the possibility that the interval fits
1269
         * Examine the possibility that the interval fits
1295
         * somewhere between the leftmost interval of
1270
         * somewhere between the leftmost interval of
1296
         * the right neigbour and the last interval of the leaf.
1271
         * the right neigbour and the last interval of the leaf.
1297
         */
1272
         */
1298
 
1273
 
1299
        if (page < left_pg) {
1274
        if (page < left_pg) {
1300
            /* Do nothing. */
1275
            /* Do nothing. */
1301
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1276
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1302
            /* The interval intersects with the left interval. */
1277
            /* The interval intersects with the left interval. */
1303
            return 0;
1278
            return 0;
1304
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1279
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1305
            /* The interval intersects with the right interval. */
1280
            /* The interval intersects with the right interval. */
1306
            return 0;          
1281
            return 0;          
1307
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1282
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1308
            /* The interval can be added by merging the two already present intervals. */
1283
            /* The interval can be added by merging the two already present intervals. */
1309
            leaf->value[leaf->keys - 1] += count + right_cnt;
1284
            leaf->value[leaf->keys - 1] += count + right_cnt;
1310
            btree_remove(&a->used_space, right_pg, node);
1285
            btree_remove(&a->used_space, right_pg, node);
1311
            return 1;
1286
            return 1;
1312
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1287
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1313
            /* The interval can be added by simply growing the left interval. */
1288
            /* The interval can be added by simply growing the left interval. */
1314
            leaf->value[leaf->keys - 1] +=  count;
1289
            leaf->value[leaf->keys - 1] +=  count;
1315
            return 1;
1290
            return 1;
1316
        } else if (page + count*PAGE_SIZE == right_pg) {
1291
        } else if (page + count*PAGE_SIZE == right_pg) {
1317
            /*
1292
            /*
1318
             * The interval can be addded by simply moving base of the right
1293
             * The interval can be addded by simply moving base of the right
1319
             * interval down and increasing its size accordingly.
1294
             * interval down and increasing its size accordingly.
1320
             */
1295
             */
1321
            node->value[0] += count;
1296
            node->value[0] += count;
1322
            node->key[0] = page;
1297
            node->key[0] = page;
1323
            return 1;
1298
            return 1;
1324
        } else {
1299
        } else {
1325
            /*
1300
            /*
1326
             * The interval is between both neigbouring intervals,
1301
             * The interval is between both neigbouring intervals,
1327
             * but cannot be merged with any of them.
1302
             * but cannot be merged with any of them.
1328
             */
1303
             */
1329
            btree_insert(&a->used_space, page, (void *) count, leaf);
1304
            btree_insert(&a->used_space, page, (void *) count, leaf);
1330
            return 1;
1305
            return 1;
1331
        }
1306
        }
1332
    } else if (page >= leaf->key[leaf->keys - 1]) {
1307
    } else if (page >= leaf->key[leaf->keys - 1]) {
1333
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1308
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1334
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1309
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1335
   
1310
   
1336
        /*
1311
        /*
1337
         * Investigate the border case in which the right neighbour does not
1312
         * Investigate the border case in which the right neighbour does not
1338
         * exist but the interval fits from the right.
1313
         * exist but the interval fits from the right.
1339
         */
1314
         */
1340
         
1315
         
1341
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1316
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1342
            /* The interval intersects with the left interval. */
1317
            /* The interval intersects with the left interval. */
1343
            return 0;
1318
            return 0;
1344
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1319
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1345
            /* The interval can be added by growing the left interval. */
1320
            /* The interval can be added by growing the left interval. */
1346
            leaf->value[leaf->keys - 1] += count;
1321
            leaf->value[leaf->keys - 1] += count;
1347
            return 1;
1322
            return 1;
1348
        } else {
1323
        } else {
1349
            /*
1324
            /*
1350
             * The interval doesn't adjoin with the left interval.
1325
             * The interval doesn't adjoin with the left interval.
1351
             * It must be added individually.
1326
             * It must be added individually.
1352
             */
1327
             */
1353
            btree_insert(&a->used_space, page, (void *) count, leaf);
1328
            btree_insert(&a->used_space, page, (void *) count, leaf);
1354
            return 1;
1329
            return 1;
1355
        }
1330
        }
1356
    }
1331
    }
1357
   
1332
   
1358
    /*
1333
    /*
1359
     * Note that if the algorithm made it thus far, the interval can fit only
1334
     * Note that if the algorithm made it thus far, the interval can fit only
1360
     * between two other intervals of the leaf. The two border cases were already
1335
     * between two other intervals of the leaf. The two border cases were already
1361
     * resolved.
1336
     * resolved.
1362
     */
1337
     */
1363
    for (i = 1; i < leaf->keys; i++) {
1338
    for (i = 1; i < leaf->keys; i++) {
1364
        if (page < leaf->key[i]) {
1339
        if (page < leaf->key[i]) {
1365
            uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1340
            uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1366
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1341
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1367
 
1342
 
1368
            /*
1343
            /*
1369
             * The interval fits between left_pg and right_pg.
1344
             * The interval fits between left_pg and right_pg.
1370
             */
1345
             */
1371
 
1346
 
1372
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1347
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1373
                /* The interval intersects with the left interval. */
1348
                /* The interval intersects with the left interval. */
1374
                return 0;
1349
                return 0;
1375
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1350
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1376
                /* The interval intersects with the right interval. */
1351
                /* The interval intersects with the right interval. */
1377
                return 0;          
1352
                return 0;          
1378
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1353
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1379
                /* The interval can be added by merging the two already present intervals. */
1354
                /* The interval can be added by merging the two already present intervals. */
1380
                leaf->value[i - 1] += count + right_cnt;
1355
                leaf->value[i - 1] += count + right_cnt;
1381
                btree_remove(&a->used_space, right_pg, leaf);
1356
                btree_remove(&a->used_space, right_pg, leaf);
1382
                return 1;
1357
                return 1;
1383
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1358
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1384
                /* The interval can be added by simply growing the left interval. */
1359
                /* The interval can be added by simply growing the left interval. */
1385
                leaf->value[i - 1] += count;
1360
                leaf->value[i - 1] += count;
1386
                return 1;
1361
                return 1;
1387
            } else if (page + count*PAGE_SIZE == right_pg) {
1362
            } else if (page + count*PAGE_SIZE == right_pg) {
1388
                /*
1363
                /*
1389
                     * The interval can be addded by simply moving base of the right
1364
                     * The interval can be addded by simply moving base of the right
1390
                 * interval down and increasing its size accordingly.
1365
                 * interval down and increasing its size accordingly.
1391
                 */
1366
                 */
1392
                leaf->value[i] += count;
1367
                leaf->value[i] += count;
1393
                leaf->key[i] = page;
1368
                leaf->key[i] = page;
1394
                return 1;
1369
                return 1;
1395
            } else {
1370
            } else {
1396
                /*
1371
                /*
1397
                 * The interval is between both neigbouring intervals,
1372
                 * The interval is between both neigbouring intervals,
1398
                 * but cannot be merged with any of them.
1373
                 * but cannot be merged with any of them.
1399
                 */
1374
                 */
1400
                btree_insert(&a->used_space, page, (void *) count, leaf);
1375
                btree_insert(&a->used_space, page, (void *) count, leaf);
1401
                return 1;
1376
                return 1;
1402
            }
1377
            }
1403
        }
1378
        }
1404
    }
1379
    }
1405
 
1380
 
1406
    panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1381
    panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1407
}
1382
}
1408
 
1383
 
1409
/** Mark portion of address space area as unused.
1384
/** Mark portion of address space area as unused.
1410
 *
1385
 *
1411
 * The address space area must be already locked.
1386
 * The address space area must be already locked.
1412
 *
1387
 *
1413
 * @param a Address space area.
1388
 * @param a Address space area.
1414
 * @param page First page to be marked.
1389
 * @param page First page to be marked.
1415
 * @param count Number of page to be marked.
1390
 * @param count Number of page to be marked.
1416
 *
1391
 *
1417
 * @return 0 on failure and 1 on success.
1392
 * @return 0 on failure and 1 on success.
1418
 */
1393
 */
1419
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1394
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1420
{
1395
{
1421
    btree_node_t *leaf, *node;
1396
    btree_node_t *leaf, *node;
1422
    count_t pages;
1397
    count_t pages;
1423
    int i;
1398
    int i;
1424
 
1399
 
1425
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1400
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1426
    ASSERT(count);
1401
    ASSERT(count);
1427
 
1402
 
1428
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1403
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1429
    if (pages) {
1404
    if (pages) {
1430
        /*
1405
        /*
1431
         * We are lucky, page is the beginning of some interval.
1406
         * We are lucky, page is the beginning of some interval.
1432
         */
1407
         */
1433
        if (count > pages) {
1408
        if (count > pages) {
1434
            return 0;
1409
            return 0;
1435
        } else if (count == pages) {
1410
        } else if (count == pages) {
1436
            btree_remove(&a->used_space, page, leaf);
1411
            btree_remove(&a->used_space, page, leaf);
1437
            return 1;
1412
            return 1;
1438
        } else {
1413
        } else {
1439
            /*
1414
            /*
1440
             * Find the respective interval.
1415
             * Find the respective interval.
1441
             * Decrease its size and relocate its start address.
1416
             * Decrease its size and relocate its start address.
1442
             */
1417
             */
1443
            for (i = 0; i < leaf->keys; i++) {
1418
            for (i = 0; i < leaf->keys; i++) {
1444
                if (leaf->key[i] == page) {
1419
                if (leaf->key[i] == page) {
1445
                    leaf->key[i] += count*PAGE_SIZE;
1420
                    leaf->key[i] += count*PAGE_SIZE;
1446
                    leaf->value[i] -= count;
1421
                    leaf->value[i] -= count;
1447
                    return 1;
1422
                    return 1;
1448
                }
1423
                }
1449
            }
1424
            }
1450
            goto error;
1425
            goto error;
1451
        }
1426
        }
1452
    }
1427
    }
1453
 
1428
 
1454
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1429
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1455
    if (node && page < leaf->key[0]) {
1430
    if (node && page < leaf->key[0]) {
1456
        uintptr_t left_pg = node->key[node->keys - 1];
1431
        uintptr_t left_pg = node->key[node->keys - 1];
1457
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1432
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1458
 
1433
 
1459
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1434
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1460
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1435
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1461
                /*
1436
                /*
1462
                 * The interval is contained in the rightmost interval
1437
                 * The interval is contained in the rightmost interval
1463
                 * of the left neighbour and can be removed by
1438
                 * of the left neighbour and can be removed by
1464
                 * updating the size of the bigger interval.
1439
                 * updating the size of the bigger interval.
1465
                 */
1440
                 */
1466
                node->value[node->keys - 1] -= count;
1441
                node->value[node->keys - 1] -= count;
1467
                return 1;
1442
                return 1;
1468
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1443
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1469
                count_t new_cnt;
1444
                count_t new_cnt;
1470
               
1445
               
1471
                /*
1446
                /*
1472
                 * The interval is contained in the rightmost interval
1447
                 * The interval is contained in the rightmost interval
1473
                 * of the left neighbour but its removal requires
1448
                 * of the left neighbour but its removal requires
1474
                 * both updating the size of the original interval and
1449
                 * both updating the size of the original interval and
1475
                 * also inserting a new interval.
1450
                 * also inserting a new interval.
1476
                 */
1451
                 */
1477
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1452
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1478
                node->value[node->keys - 1] -= count + new_cnt;
1453
                node->value[node->keys - 1] -= count + new_cnt;
1479
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1454
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1480
                return 1;
1455
                return 1;
1481
            }
1456
            }
1482
        }
1457
        }
1483
        return 0;
1458
        return 0;
1484
    } else if (page < leaf->key[0]) {
1459
    } else if (page < leaf->key[0]) {
1485
        return 0;
1460
        return 0;
1486
    }
1461
    }
1487
   
1462
   
1488
    if (page > leaf->key[leaf->keys - 1]) {
1463
    if (page > leaf->key[leaf->keys - 1]) {
1489
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1464
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1490
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1465
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1491
 
1466
 
1492
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1467
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1493
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1468
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1494
                /*
1469
                /*
1495
                 * The interval is contained in the rightmost interval
1470
                 * The interval is contained in the rightmost interval
1496
                 * of the leaf and can be removed by updating the size
1471
                 * of the leaf and can be removed by updating the size
1497
                 * of the bigger interval.
1472
                 * of the bigger interval.
1498
                 */
1473
                 */
1499
                leaf->value[leaf->keys - 1] -= count;
1474
                leaf->value[leaf->keys - 1] -= count;
1500
                return 1;
1475
                return 1;
1501
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1476
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1502
                count_t new_cnt;
1477
                count_t new_cnt;
1503
               
1478
               
1504
                /*
1479
                /*
1505
                 * The interval is contained in the rightmost interval
1480
                 * The interval is contained in the rightmost interval
1506
                 * of the leaf but its removal requires both updating
1481
                 * of the leaf but its removal requires both updating
1507
                 * the size of the original interval and
1482
                 * the size of the original interval and
1508
                 * also inserting a new interval.
1483
                 * also inserting a new interval.
1509
                 */
1484
                 */
1510
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1485
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1511
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1486
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1512
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1487
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1513
                return 1;
1488
                return 1;
1514
            }
1489
            }
1515
        }
1490
        }
1516
        return 0;
1491
        return 0;
1517
    }  
1492
    }  
1518
   
1493
   
1519
    /*
1494
    /*
1520
     * The border cases have been already resolved.
1495
     * The border cases have been already resolved.
1521
     * Now the interval can be only between intervals of the leaf.
1496
     * Now the interval can be only between intervals of the leaf.
1522
     */
1497
     */
1523
    for (i = 1; i < leaf->keys - 1; i++) {
1498
    for (i = 1; i < leaf->keys - 1; i++) {
1524
        if (page < leaf->key[i]) {
1499
        if (page < leaf->key[i]) {
1525
            uintptr_t left_pg = leaf->key[i - 1];
1500
            uintptr_t left_pg = leaf->key[i - 1];
1526
            count_t left_cnt = (count_t) leaf->value[i - 1];
1501
            count_t left_cnt = (count_t) leaf->value[i - 1];
1527
 
1502
 
1528
            /*
1503
            /*
1529
             * Now the interval is between intervals corresponding to (i - 1) and i.
1504
             * Now the interval is between intervals corresponding to (i - 1) and i.
1530
             */
1505
             */
1531
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1506
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1532
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1507
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1533
                    /*
1508
                    /*
1534
                    * The interval is contained in the interval (i - 1)
1509
                    * The interval is contained in the interval (i - 1)
1535
                     * of the leaf and can be removed by updating the size
1510
                     * of the leaf and can be removed by updating the size
1536
                     * of the bigger interval.
1511
                     * of the bigger interval.
1537
                     */
1512
                     */
1538
                    leaf->value[i - 1] -= count;
1513
                    leaf->value[i - 1] -= count;
1539
                    return 1;
1514
                    return 1;
1540
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1515
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1541
                    count_t new_cnt;
1516
                    count_t new_cnt;
1542
               
1517
               
1543
                    /*
1518
                    /*
1544
                     * The interval is contained in the interval (i - 1)
1519
                     * The interval is contained in the interval (i - 1)
1545
                     * of the leaf but its removal requires both updating
1520
                     * of the leaf but its removal requires both updating
1546
                     * the size of the original interval and
1521
                     * the size of the original interval and
1547
                     * also inserting a new interval.
1522
                     * also inserting a new interval.
1548
                     */
1523
                     */
1549
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1524
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1550
                    leaf->value[i - 1] -= count + new_cnt;
1525
                    leaf->value[i - 1] -= count + new_cnt;
1551
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1526
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1552
                    return 1;
1527
                    return 1;
1553
                }
1528
                }
1554
            }
1529
            }
1555
            return 0;
1530
            return 0;
1556
        }
1531
        }
1557
    }
1532
    }
1558
 
1533
 
1559
error:
1534
error:
1560
    panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1535
    panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1561
}
1536
}
1562
 
1537
 
1563
/** Remove reference to address space area share info.
1538
/** Remove reference to address space area share info.
1564
 *
1539
 *
1565
 * If the reference count drops to 0, the sh_info is deallocated.
1540
 * If the reference count drops to 0, the sh_info is deallocated.
1566
 *
1541
 *
1567
 * @param sh_info Pointer to address space area share info.
1542
 * @param sh_info Pointer to address space area share info.
1568
 */
1543
 */
1569
void sh_info_remove_reference(share_info_t *sh_info)
1544
void sh_info_remove_reference(share_info_t *sh_info)
1570
{
1545
{
1571
    bool dealloc = false;
1546
    bool dealloc = false;
1572
 
1547
 
1573
    mutex_lock(&sh_info->lock);
1548
    mutex_lock(&sh_info->lock);
1574
    ASSERT(sh_info->refcount);
1549
    ASSERT(sh_info->refcount);
1575
    if (--sh_info->refcount == 0) {
1550
    if (--sh_info->refcount == 0) {
1576
        dealloc = true;
1551
        dealloc = true;
1577
        link_t *cur;
1552
        link_t *cur;
1578
       
1553
       
1579
        /*
1554
        /*
1580
         * Now walk carefully the pagemap B+tree and free/remove
1555
         * Now walk carefully the pagemap B+tree and free/remove
1581
         * reference from all frames found there.
1556
         * reference from all frames found there.
1582
         */
1557
         */
1583
        for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1558
        for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1584
            btree_node_t *node;
1559
            btree_node_t *node;
1585
            int i;
1560
            int i;
1586
           
1561
           
1587
            node = list_get_instance(cur, btree_node_t, leaf_link);
1562
            node = list_get_instance(cur, btree_node_t, leaf_link);
1588
            for (i = 0; i < node->keys; i++)
1563
            for (i = 0; i < node->keys; i++)
1589
                frame_free((uintptr_t) node->value[i]);
1564
                frame_free((uintptr_t) node->value[i]);
1590
        }
1565
        }
1591
       
1566
       
1592
    }
1567
    }
1593
    mutex_unlock(&sh_info->lock);
1568
    mutex_unlock(&sh_info->lock);
1594
   
1569
   
1595
    if (dealloc) {
1570
    if (dealloc) {
1596
        btree_destroy(&sh_info->pagemap);
1571
        btree_destroy(&sh_info->pagemap);
1597
        free(sh_info);
1572
        free(sh_info);
1598
    }
1573
    }
1599
}
1574
}
1600
 
1575
 
1601
/*
1576
/*
1602
 * Address space related syscalls.
1577
 * Address space related syscalls.
1603
 */
1578
 */
1604
 
1579
 
1605
/** Wrapper for as_area_create(). */
1580
/** Wrapper for as_area_create(). */
1606
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1581
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1607
{
1582
{
1608
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1583
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1609
        return (unative_t) address;
1584
        return (unative_t) address;
1610
    else
1585
    else
1611
        return (unative_t) -1;
1586
        return (unative_t) -1;
1612
}
1587
}
1613
 
1588
 
1614
/** Wrapper for as_area_resize(). */
1589
/** Wrapper for as_area_resize(). */
1615
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1590
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1616
{
1591
{
1617
    return (unative_t) as_area_resize(AS, address, size, 0);
1592
    return (unative_t) as_area_resize(AS, address, size, 0);
1618
}
1593
}
1619
 
1594
 
1620
/** Wrapper for as_area_destroy(). */
1595
/** Wrapper for as_area_destroy(). */
1621
unative_t sys_as_area_destroy(uintptr_t address)
1596
unative_t sys_as_area_destroy(uintptr_t address)
1622
{
1597
{
1623
    return (unative_t) as_area_destroy(AS, address);
1598
    return (unative_t) as_area_destroy(AS, address);
1624
}
1599
}
1625
 
1600
 
1626
/** Print out information about address space.
1601
/** Print out information about address space.
1627
 *
1602
 *
1628
 * @param as Address space.
1603
 * @param as Address space.
1629
 */
1604
 */
1630
void as_print(as_t *as)
1605
void as_print(as_t *as)
1631
{
1606
{
1632
    ipl_t ipl;
1607
    ipl_t ipl;
1633
   
1608
   
1634
    ipl = interrupts_disable();
1609
    ipl = interrupts_disable();
1635
    mutex_lock(&as->lock);
1610
    mutex_lock(&as->lock);
1636
   
1611
   
1637
    /* print out info about address space areas */
1612
    /* print out info about address space areas */
1638
    link_t *cur;
1613
    link_t *cur;
1639
    for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1614
    for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1640
        btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1615
        btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1641
       
1616
       
1642
        int i;
1617
        int i;
1643
        for (i = 0; i < node->keys; i++) {
1618
        for (i = 0; i < node->keys; i++) {
1644
            as_area_t *area = node->value[i];
1619
            as_area_t *area = node->value[i];
1645
       
1620
       
1646
            mutex_lock(&area->lock);
1621
            mutex_lock(&area->lock);
1647
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1622
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1648
                area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1623
                area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1649
            mutex_unlock(&area->lock);
1624
            mutex_unlock(&area->lock);
1650
        }
1625
        }
1651
    }
1626
    }
1652
   
1627
   
1653
    mutex_unlock(&as->lock);
1628
    mutex_unlock(&as->lock);
1654
    interrupts_restore(ipl);
1629
    interrupts_restore(ipl);
1655
}
1630
}
1656
 
1631
 
1657
/** @}
1632
/** @}
1658
 */
1633
 */
1659
 
1634