Subversion Repositories HelenOS

Rev

Rev 2007 | Rev 2015 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2007 Rev 2009
1
/*
1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericmm
29
/** @addtogroup genericmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Address space related functions.
35
 * @brief   Address space related functions.
36
 *
36
 *
37
 * This file contains address space manipulation functions.
37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
39
 * Virtual Address Translation (VAT) subsystem.
40
 *
40
 *
41
 * Functionality provided by this file allows one to
41
 * Functionality provided by this file allows one to
42
 * create address spaces and create, resize and share
42
 * create address spaces and create, resize and share
43
 * address space areas.
43
 * address space areas.
44
 *
44
 *
45
 * @see page.c
45
 * @see page.c
46
 *
46
 *
47
 */
47
 */
48
 
48
 
49
#include <mm/as.h>
49
#include <mm/as.h>
50
#include <arch/mm/as.h>
50
#include <arch/mm/as.h>
51
#include <mm/page.h>
51
#include <mm/page.h>
52
#include <mm/frame.h>
52
#include <mm/frame.h>
53
#include <mm/slab.h>
53
#include <mm/slab.h>
54
#include <mm/tlb.h>
54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
56
#include <genarch/mm/page_pt.h>
57
#include <genarch/mm/page_ht.h>
57
#include <genarch/mm/page_ht.h>
58
#include <mm/asid.h>
58
#include <mm/asid.h>
59
#include <arch/mm/asid.h>
59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
60
#include <synch/spinlock.h>
61
#include <synch/mutex.h>
61
#include <synch/mutex.h>
62
#include <adt/list.h>
62
#include <adt/list.h>
63
#include <adt/btree.h>
63
#include <adt/btree.h>
64
#include <proc/task.h>
64
#include <proc/task.h>
65
#include <proc/thread.h>
65
#include <proc/thread.h>
66
#include <arch/asm.h>
66
#include <arch/asm.h>
67
#include <panic.h>
67
#include <panic.h>
68
#include <debug.h>
68
#include <debug.h>
69
#include <print.h>
69
#include <print.h>
70
#include <memstr.h>
70
#include <memstr.h>
71
#include <macros.h>
71
#include <macros.h>
72
#include <arch.h>
72
#include <arch.h>
73
#include <errno.h>
73
#include <errno.h>
74
#include <config.h>
74
#include <config.h>
75
#include <align.h>
75
#include <align.h>
76
#include <arch/types.h>
76
#include <arch/types.h>
77
#include <typedefs.h>
77
#include <typedefs.h>
78
#include <syscall/copy.h>
78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
79
#include <arch/interrupt.h>
80
 
80
 
-
 
81
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
82
#include <arch/mm/cache.h>
-
 
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
84
 
81
/**
85
/**
82
 * Each architecture decides what functions will be used to carry out
86
 * Each architecture decides what functions will be used to carry out
83
 * address space operations such as creating or locking page tables.
87
 * address space operations such as creating or locking page tables.
84
 */
88
 */
85
as_operations_t *as_operations = NULL;
89
as_operations_t *as_operations = NULL;
86
 
90
 
87
/**
91
/**
88
 * Slab for as_t objects.
92
 * Slab for as_t objects.
89
 */
93
 */
90
static slab_cache_t *as_slab;
94
static slab_cache_t *as_slab;
91
 
95
 
92
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
96
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
93
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
97
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
94
 
98
 
95
/**
99
/**
96
 * This list contains address spaces that are not active on any
100
 * This list contains address spaces that are not active on any
97
 * processor and that have valid ASID.
101
 * processor and that have valid ASID.
98
 */
102
 */
99
LIST_INITIALIZE(inactive_as_with_asid_head);
103
LIST_INITIALIZE(inactive_as_with_asid_head);
100
 
104
 
101
/** Kernel address space. */
105
/** Kernel address space. */
102
as_t *AS_KERNEL = NULL;
106
as_t *AS_KERNEL = NULL;
103
 
107
 
104
static int area_flags_to_page_flags(int aflags);
108
static int area_flags_to_page_flags(int aflags);
105
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
109
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
106
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
110
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area);
107
static void sh_info_remove_reference(share_info_t *sh_info);
111
static void sh_info_remove_reference(share_info_t *sh_info);
108
 
112
 
109
static int as_constructor(void *obj, int flags)
113
static int as_constructor(void *obj, int flags)
110
{
114
{
111
    as_t *as = (as_t *) obj;
115
    as_t *as = (as_t *) obj;
112
    int rc;
116
    int rc;
113
 
117
 
114
    link_initialize(&as->inactive_as_with_asid_link);
118
    link_initialize(&as->inactive_as_with_asid_link);
115
    mutex_initialize(&as->lock);   
119
    mutex_initialize(&as->lock);   
116
   
120
   
117
    rc = as_constructor_arch(as, flags);
121
    rc = as_constructor_arch(as, flags);
118
   
122
   
119
    return rc;
123
    return rc;
120
}
124
}
121
 
125
 
122
static int as_destructor(void *obj)
126
static int as_destructor(void *obj)
123
{
127
{
124
    as_t *as = (as_t *) obj;
128
    as_t *as = (as_t *) obj;
125
 
129
 
126
    return as_destructor_arch(as);
130
    return as_destructor_arch(as);
127
}
131
}
128
 
132
 
129
/** Initialize address space subsystem. */
133
/** Initialize address space subsystem. */
130
void as_init(void)
134
void as_init(void)
131
{
135
{
132
    as_arch_init();
136
    as_arch_init();
133
   
137
   
134
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
138
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
135
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
139
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
136
   
140
   
137
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
141
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
138
    if (!AS_KERNEL)
142
    if (!AS_KERNEL)
139
        panic("can't create kernel address space\n");
143
        panic("can't create kernel address space\n");
140
   
144
   
141
}
145
}
142
 
146
 
143
/** Create address space.
147
/** Create address space.
144
 *
148
 *
145
 * @param flags Flags that influence way in wich the address space is created.
149
 * @param flags Flags that influence way in wich the address space is created.
146
 */
150
 */
147
as_t *as_create(int flags)
151
as_t *as_create(int flags)
148
{
152
{
149
    as_t *as;
153
    as_t *as;
150
 
154
 
151
    as = (as_t *) slab_alloc(as_slab, 0);
155
    as = (as_t *) slab_alloc(as_slab, 0);
152
    (void) as_create_arch(as, 0);
156
    (void) as_create_arch(as, 0);
153
   
157
   
154
    btree_create(&as->as_area_btree);
158
    btree_create(&as->as_area_btree);
155
   
159
   
156
    if (flags & FLAG_AS_KERNEL)
160
    if (flags & FLAG_AS_KERNEL)
157
        as->asid = ASID_KERNEL;
161
        as->asid = ASID_KERNEL;
158
    else
162
    else
159
        as->asid = ASID_INVALID;
163
        as->asid = ASID_INVALID;
160
   
164
   
161
    as->refcount = 0;
165
    as->refcount = 0;
162
    as->cpu_refcount = 0;
166
    as->cpu_refcount = 0;
163
    as->page_table = page_table_create(flags);
167
    as->page_table = page_table_create(flags);
164
 
168
 
-
 
169
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
170
    as->dcache_flush_on_install = false;
-
 
171
    as->dcache_flush_on_deinstall = false;
-
 
172
#endif  /* CONFIG_VIRT_IDX_DCACHE */
-
 
173
 
165
    return as;
174
    return as;
166
}
175
}
167
 
176
 
168
/** Destroy adress space.
177
/** Destroy adress space.
169
 *
178
 *
170
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
179
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
171
 * the address space can be destroyed.
180
 * the address space can be destroyed.
172
 */
181
 */
173
void as_destroy(as_t *as)
182
void as_destroy(as_t *as)
174
{
183
{
175
    ipl_t ipl;
184
    ipl_t ipl;
176
    bool cond;
185
    bool cond;
177
 
186
 
178
    ASSERT(as->refcount == 0);
187
    ASSERT(as->refcount == 0);
179
   
188
   
180
    /*
189
    /*
181
     * Since there is no reference to this area,
190
     * Since there is no reference to this area,
182
     * it is safe not to lock its mutex.
191
     * it is safe not to lock its mutex.
183
     */
192
     */
184
    ipl = interrupts_disable();
193
    ipl = interrupts_disable();
185
    spinlock_lock(&inactive_as_with_asid_lock);
194
    spinlock_lock(&inactive_as_with_asid_lock);
186
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
195
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
187
        if (as != AS && as->cpu_refcount == 0)
196
        if (as != AS && as->cpu_refcount == 0)
188
            list_remove(&as->inactive_as_with_asid_link);
197
            list_remove(&as->inactive_as_with_asid_link);
189
        asid_put(as->asid);
198
        asid_put(as->asid);
190
    }
199
    }
191
    spinlock_unlock(&inactive_as_with_asid_lock);
200
    spinlock_unlock(&inactive_as_with_asid_lock);
192
 
201
 
193
    /*
202
    /*
194
     * Destroy address space areas of the address space.
203
     * Destroy address space areas of the address space.
195
     * The B+tree must be walked carefully because it is
204
     * The B+tree must be walked carefully because it is
196
     * also being destroyed.
205
     * also being destroyed.
197
     */
206
     */
198
    for (cond = true; cond; ) {
207
    for (cond = true; cond; ) {
199
        btree_node_t *node;
208
        btree_node_t *node;
200
 
209
 
201
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
210
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
202
        node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
211
        node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
203
 
212
 
204
        if ((cond = node->keys)) {
213
        if ((cond = node->keys)) {
205
            as_area_destroy(as, node->key[0]);
214
            as_area_destroy(as, node->key[0]);
206
        }
215
        }
207
    }
216
    }
208
 
217
 
209
    btree_destroy(&as->as_area_btree);
218
    btree_destroy(&as->as_area_btree);
210
    page_table_destroy(as->page_table);
219
    page_table_destroy(as->page_table);
211
 
220
 
212
    interrupts_restore(ipl);
221
    interrupts_restore(ipl);
213
   
222
   
214
    slab_free(as_slab, as);
223
    slab_free(as_slab, as);
215
}
224
}
216
 
225
 
217
/** Create address space area of common attributes.
226
/** Create address space area of common attributes.
218
 *
227
 *
219
 * The created address space area is added to the target address space.
228
 * The created address space area is added to the target address space.
220
 *
229
 *
221
 * @param as Target address space.
230
 * @param as Target address space.
222
 * @param flags Flags of the area memory.
231
 * @param flags Flags of the area memory.
223
 * @param size Size of area.
232
 * @param size Size of area.
224
 * @param base Base address of area.
233
 * @param base Base address of area.
225
 * @param attrs Attributes of the area.
234
 * @param attrs Attributes of the area.
226
 * @param backend Address space area backend. NULL if no backend is used.
235
 * @param backend Address space area backend. NULL if no backend is used.
227
 * @param backend_data NULL or a pointer to an array holding two void *.
236
 * @param backend_data NULL or a pointer to an array holding two void *.
228
 *
237
 *
229
 * @return Address space area on success or NULL on failure.
238
 * @return Address space area on success or NULL on failure.
230
 */
239
 */
231
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
240
as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
232
           mem_backend_t *backend, mem_backend_data_t *backend_data)
241
           mem_backend_t *backend, mem_backend_data_t *backend_data)
233
{
242
{
234
    ipl_t ipl;
243
    ipl_t ipl;
235
    as_area_t *a;
244
    as_area_t *a;
236
   
245
   
237
    if (base % PAGE_SIZE)
246
    if (base % PAGE_SIZE)
238
        return NULL;
247
        return NULL;
239
 
248
 
240
    if (!size)
249
    if (!size)
241
        return NULL;
250
        return NULL;
242
 
251
 
243
    /* Writeable executable areas are not supported. */
252
    /* Writeable executable areas are not supported. */
244
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
253
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
245
        return NULL;
254
        return NULL;
246
   
255
   
247
    ipl = interrupts_disable();
256
    ipl = interrupts_disable();
248
    mutex_lock(&as->lock);
257
    mutex_lock(&as->lock);
249
   
258
   
250
    if (!check_area_conflicts(as, base, size, NULL)) {
259
    if (!check_area_conflicts(as, base, size, NULL)) {
251
        mutex_unlock(&as->lock);
260
        mutex_unlock(&as->lock);
252
        interrupts_restore(ipl);
261
        interrupts_restore(ipl);
253
        return NULL;
262
        return NULL;
254
    }
263
    }
255
   
264
   
256
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
265
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
257
 
266
 
258
    mutex_initialize(&a->lock);
267
    mutex_initialize(&a->lock);
259
   
268
   
260
    a->as = as;
269
    a->as = as;
261
    a->flags = flags;
270
    a->flags = flags;
262
    a->attributes = attrs;
271
    a->attributes = attrs;
263
    a->pages = SIZE2FRAMES(size);
272
    a->pages = SIZE2FRAMES(size);
264
    a->base = base;
273
    a->base = base;
265
    a->sh_info = NULL;
274
    a->sh_info = NULL;
266
    a->backend = backend;
275
    a->backend = backend;
267
    if (backend_data)
276
    if (backend_data)
268
        a->backend_data = *backend_data;
277
        a->backend_data = *backend_data;
269
    else
278
    else
270
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
279
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
271
 
280
 
-
 
281
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
282
    /*
-
 
283
     * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
-
 
284
     * orig_color is probably wrong until the flag is reset. In other words, it is
-
 
285
     * initialized with the color of the area being created and not with the color
-
 
286
     * of the original address space area at the beginning of the share chain. Of
-
 
287
     * course, the correct color is set by as_area_share() before the flag is
-
 
288
     * reset.
-
 
289
     */
-
 
290
    a->orig_color = PAGE_COLOR(base);
-
 
291
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
292
 
272
    btree_create(&a->used_space);
293
    btree_create(&a->used_space);
273
   
294
   
274
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
295
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
275
 
296
 
276
    mutex_unlock(&as->lock);
297
    mutex_unlock(&as->lock);
277
    interrupts_restore(ipl);
298
    interrupts_restore(ipl);
278
 
299
 
279
    return a;
300
    return a;
280
}
301
}
281
 
302
 
282
/** Find address space area and change it.
303
/** Find address space area and change it.
283
 *
304
 *
284
 * @param as Address space.
305
 * @param as Address space.
285
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
306
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
286
 * @param size New size of the virtual memory block starting at address.
307
 * @param size New size of the virtual memory block starting at address.
287
 * @param flags Flags influencing the remap operation. Currently unused.
308
 * @param flags Flags influencing the remap operation. Currently unused.
288
 *
309
 *
289
 * @return Zero on success or a value from @ref errno.h otherwise.
310
 * @return Zero on success or a value from @ref errno.h otherwise.
290
 */
311
 */
291
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
312
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
292
{
313
{
293
    as_area_t *area;
314
    as_area_t *area;
294
    ipl_t ipl;
315
    ipl_t ipl;
295
    size_t pages;
316
    size_t pages;
296
   
317
   
297
    ipl = interrupts_disable();
318
    ipl = interrupts_disable();
298
    mutex_lock(&as->lock);
319
    mutex_lock(&as->lock);
299
   
320
   
300
    /*
321
    /*
301
     * Locate the area.
322
     * Locate the area.
302
     */
323
     */
303
    area = find_area_and_lock(as, address);
324
    area = find_area_and_lock(as, address);
304
    if (!area) {
325
    if (!area) {
305
        mutex_unlock(&as->lock);
326
        mutex_unlock(&as->lock);
306
        interrupts_restore(ipl);
327
        interrupts_restore(ipl);
307
        return ENOENT;
328
        return ENOENT;
308
    }
329
    }
309
 
330
 
310
    if (area->backend == &phys_backend) {
331
    if (area->backend == &phys_backend) {
311
        /*
332
        /*
312
         * Remapping of address space areas associated
333
         * Remapping of address space areas associated
313
         * with memory mapped devices is not supported.
334
         * with memory mapped devices is not supported.
314
         */
335
         */
315
        mutex_unlock(&area->lock);
336
        mutex_unlock(&area->lock);
316
        mutex_unlock(&as->lock);
337
        mutex_unlock(&as->lock);
317
        interrupts_restore(ipl);
338
        interrupts_restore(ipl);
318
        return ENOTSUP;
339
        return ENOTSUP;
319
    }
340
    }
320
    if (area->sh_info) {
341
    if (area->sh_info) {
321
        /*
342
        /*
322
         * Remapping of shared address space areas
343
         * Remapping of shared address space areas
323
         * is not supported.
344
         * is not supported.
324
         */
345
         */
325
        mutex_unlock(&area->lock);
346
        mutex_unlock(&area->lock);
326
        mutex_unlock(&as->lock);
347
        mutex_unlock(&as->lock);
327
        interrupts_restore(ipl);
348
        interrupts_restore(ipl);
328
        return ENOTSUP;
349
        return ENOTSUP;
329
    }
350
    }
330
 
351
 
331
    pages = SIZE2FRAMES((address - area->base) + size);
352
    pages = SIZE2FRAMES((address - area->base) + size);
332
    if (!pages) {
353
    if (!pages) {
333
        /*
354
        /*
334
         * Zero size address space areas are not allowed.
355
         * Zero size address space areas are not allowed.
335
         */
356
         */
336
        mutex_unlock(&area->lock);
357
        mutex_unlock(&area->lock);
337
        mutex_unlock(&as->lock);
358
        mutex_unlock(&as->lock);
338
        interrupts_restore(ipl);
359
        interrupts_restore(ipl);
339
        return EPERM;
360
        return EPERM;
340
    }
361
    }
341
   
362
   
342
    if (pages < area->pages) {
363
    if (pages < area->pages) {
343
        bool cond;
364
        bool cond;
344
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
365
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
345
 
366
 
346
        /*
367
        /*
347
         * Shrinking the area.
368
         * Shrinking the area.
348
         * No need to check for overlaps.
369
         * No need to check for overlaps.
349
         */
370
         */
350
 
371
 
351
        /*
372
        /*
352
         * Start TLB shootdown sequence.
373
         * Start TLB shootdown sequence.
353
         */
374
         */
354
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
375
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
355
 
376
 
356
        /*
377
        /*
357
         * Remove frames belonging to used space starting from
378
         * Remove frames belonging to used space starting from
358
         * the highest addresses downwards until an overlap with
379
         * the highest addresses downwards until an overlap with
359
         * the resized address space area is found. Note that this
380
         * the resized address space area is found. Note that this
360
         * is also the right way to remove part of the used_space
381
         * is also the right way to remove part of the used_space
361
         * B+tree leaf list.
382
         * B+tree leaf list.
362
         */    
383
         */    
363
        for (cond = true; cond;) {
384
        for (cond = true; cond;) {
364
            btree_node_t *node;
385
            btree_node_t *node;
365
       
386
       
366
            ASSERT(!list_empty(&area->used_space.leaf_head));
387
            ASSERT(!list_empty(&area->used_space.leaf_head));
367
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
388
            node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
368
            if ((cond = (bool) node->keys)) {
389
            if ((cond = (bool) node->keys)) {
369
                uintptr_t b = node->key[node->keys - 1];
390
                uintptr_t b = node->key[node->keys - 1];
370
                count_t c = (count_t) node->value[node->keys - 1];
391
                count_t c = (count_t) node->value[node->keys - 1];
371
                int i = 0;
392
                int i = 0;
372
           
393
           
373
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
394
                if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
374
                   
395
                   
375
                    if (b + c*PAGE_SIZE <= start_free) {
396
                    if (b + c*PAGE_SIZE <= start_free) {
376
                        /*
397
                        /*
377
                         * The whole interval fits completely
398
                         * The whole interval fits completely
378
                         * in the resized address space area.
399
                         * in the resized address space area.
379
                         */
400
                         */
380
                        break;
401
                        break;
381
                    }
402
                    }
382
       
403
       
383
                    /*
404
                    /*
384
                     * Part of the interval corresponding to b and c
405
                     * Part of the interval corresponding to b and c
385
                     * overlaps with the resized address space area.
406
                     * overlaps with the resized address space area.
386
                     */
407
                     */
387
       
408
       
388
                    cond = false;   /* we are almost done */
409
                    cond = false;   /* we are almost done */
389
                    i = (start_free - b) >> PAGE_WIDTH;
410
                    i = (start_free - b) >> PAGE_WIDTH;
390
                    if (!used_space_remove(area, start_free, c - i))
411
                    if (!used_space_remove(area, start_free, c - i))
391
                        panic("Could not remove used space.\n");
412
                        panic("Could not remove used space.\n");
392
                } else {
413
                } else {
393
                    /*
414
                    /*
394
                     * The interval of used space can be completely removed.
415
                     * The interval of used space can be completely removed.
395
                     */
416
                     */
396
                    if (!used_space_remove(area, b, c))
417
                    if (!used_space_remove(area, b, c))
397
                        panic("Could not remove used space.\n");
418
                        panic("Could not remove used space.\n");
398
                }
419
                }
399
           
420
           
400
                for (; i < c; i++) {
421
                for (; i < c; i++) {
401
                    pte_t *pte;
422
                    pte_t *pte;
402
           
423
           
403
                    page_table_lock(as, false);
424
                    page_table_lock(as, false);
404
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
425
                    pte = page_mapping_find(as, b + i*PAGE_SIZE);
405
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
426
                    ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
406
                    if (area->backend && area->backend->frame_free) {
427
                    if (area->backend && area->backend->frame_free) {
407
                        area->backend->frame_free(area,
428
                        area->backend->frame_free(area,
408
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
429
                            b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
409
                    }
430
                    }
410
                    page_mapping_remove(as, b + i*PAGE_SIZE);
431
                    page_mapping_remove(as, b + i*PAGE_SIZE);
411
                    page_table_unlock(as, false);
432
                    page_table_unlock(as, false);
412
                }
433
                }
413
            }
434
            }
414
        }
435
        }
415
 
436
 
416
        /*
437
        /*
417
         * Finish TLB shootdown sequence.
438
         * Finish TLB shootdown sequence.
418
         */
439
         */
419
        tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
440
        tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
420
        tlb_shootdown_finalize();
441
        tlb_shootdown_finalize();
421
       
442
       
422
        /*
443
        /*
423
         * Invalidate software translation caches (e.g. TSB on sparc64).
444
         * Invalidate software translation caches (e.g. TSB on sparc64).
424
         */
445
         */
425
        as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
446
        as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages);
426
    } else {
447
    } else {
427
        /*
448
        /*
428
         * Growing the area.
449
         * Growing the area.
429
         * Check for overlaps with other address space areas.
450
         * Check for overlaps with other address space areas.
430
         */
451
         */
431
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
452
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
432
            mutex_unlock(&area->lock);
453
            mutex_unlock(&area->lock);
433
            mutex_unlock(&as->lock);       
454
            mutex_unlock(&as->lock);       
434
            interrupts_restore(ipl);
455
            interrupts_restore(ipl);
435
            return EADDRNOTAVAIL;
456
            return EADDRNOTAVAIL;
436
        }
457
        }
437
    }
458
    }
438
 
459
 
439
    area->pages = pages;
460
    area->pages = pages;
440
   
461
   
441
    mutex_unlock(&area->lock);
462
    mutex_unlock(&area->lock);
442
    mutex_unlock(&as->lock);
463
    mutex_unlock(&as->lock);
443
    interrupts_restore(ipl);
464
    interrupts_restore(ipl);
444
 
465
 
445
    return 0;
466
    return 0;
446
}
467
}
447
 
468
 
448
/** Destroy address space area.
469
/** Destroy address space area.
449
 *
470
 *
450
 * @param as Address space.
471
 * @param as Address space.
451
 * @param address Address withing the area to be deleted.
472
 * @param address Address withing the area to be deleted.
452
 *
473
 *
453
 * @return Zero on success or a value from @ref errno.h on failure.
474
 * @return Zero on success or a value from @ref errno.h on failure.
454
 */
475
 */
455
int as_area_destroy(as_t *as, uintptr_t address)
476
int as_area_destroy(as_t *as, uintptr_t address)
456
{
477
{
457
    as_area_t *area;
478
    as_area_t *area;
458
    uintptr_t base;
479
    uintptr_t base;
459
    link_t *cur;
480
    link_t *cur;
460
    ipl_t ipl;
481
    ipl_t ipl;
461
 
482
 
462
    ipl = interrupts_disable();
483
    ipl = interrupts_disable();
463
    mutex_lock(&as->lock);
484
    mutex_lock(&as->lock);
464
 
485
 
465
    area = find_area_and_lock(as, address);
486
    area = find_area_and_lock(as, address);
466
    if (!area) {
487
    if (!area) {
467
        mutex_unlock(&as->lock);
488
        mutex_unlock(&as->lock);
468
        interrupts_restore(ipl);
489
        interrupts_restore(ipl);
469
        return ENOENT;
490
        return ENOENT;
470
    }
491
    }
471
 
492
 
472
    base = area->base;
493
    base = area->base;
473
 
494
 
474
    /*
495
    /*
475
     * Start TLB shootdown sequence.
496
     * Start TLB shootdown sequence.
476
     */
497
     */
477
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
498
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
478
 
499
 
479
    /*
500
    /*
480
     * Visit only the pages mapped by used_space B+tree.
501
     * Visit only the pages mapped by used_space B+tree.
481
     */
502
     */
482
    for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
503
    for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
483
        btree_node_t *node;
504
        btree_node_t *node;
484
        int i;
505
        int i;
485
       
506
       
486
        node = list_get_instance(cur, btree_node_t, leaf_link);
507
        node = list_get_instance(cur, btree_node_t, leaf_link);
487
        for (i = 0; i < node->keys; i++) {
508
        for (i = 0; i < node->keys; i++) {
488
            uintptr_t b = node->key[i];
509
            uintptr_t b = node->key[i];
489
            count_t j;
510
            count_t j;
490
            pte_t *pte;
511
            pte_t *pte;
491
           
512
           
492
            for (j = 0; j < (count_t) node->value[i]; j++) {
513
            for (j = 0; j < (count_t) node->value[i]; j++) {
493
                page_table_lock(as, false);
514
                page_table_lock(as, false);
494
                pte = page_mapping_find(as, b + j*PAGE_SIZE);
515
                pte = page_mapping_find(as, b + j*PAGE_SIZE);
495
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
516
                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
496
                if (area->backend && area->backend->frame_free) {
517
                if (area->backend && area->backend->frame_free) {
497
                    area->backend->frame_free(area,
518
                    area->backend->frame_free(area,
498
                        b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
519
                        b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
499
                }
520
                }
500
                page_mapping_remove(as, b + j*PAGE_SIZE);              
521
                page_mapping_remove(as, b + j*PAGE_SIZE);              
501
                page_table_unlock(as, false);
522
                page_table_unlock(as, false);
502
            }
523
            }
503
        }
524
        }
504
    }
525
    }
505
 
526
 
506
    /*
527
    /*
507
     * Finish TLB shootdown sequence.
528
     * Finish TLB shootdown sequence.
508
     */
529
     */
509
    tlb_invalidate_pages(as->asid, area->base, area->pages);
530
    tlb_invalidate_pages(as->asid, area->base, area->pages);
510
    tlb_shootdown_finalize();
531
    tlb_shootdown_finalize();
511
   
532
   
512
    /*
533
    /*
513
     * Invalidate potential software translation caches (e.g. TSB on sparc64).
534
     * Invalidate potential software translation caches (e.g. TSB on sparc64).
514
     */
535
     */
515
    as_invalidate_translation_cache(as, area->base, area->pages);
536
    as_invalidate_translation_cache(as, area->base, area->pages);
516
   
537
   
517
    btree_destroy(&area->used_space);
538
    btree_destroy(&area->used_space);
518
 
539
 
519
    area->attributes |= AS_AREA_ATTR_PARTIAL;
540
    area->attributes |= AS_AREA_ATTR_PARTIAL;
520
   
541
   
521
    if (area->sh_info)
542
    if (area->sh_info)
522
        sh_info_remove_reference(area->sh_info);
543
        sh_info_remove_reference(area->sh_info);
523
       
544
       
524
    mutex_unlock(&area->lock);
545
    mutex_unlock(&area->lock);
525
 
546
 
526
    /*
547
    /*
527
     * Remove the empty area from address space.
548
     * Remove the empty area from address space.
528
     */
549
     */
529
    btree_remove(&as->as_area_btree, base, NULL);
550
    btree_remove(&as->as_area_btree, base, NULL);
530
   
551
   
531
    free(area);
552
    free(area);
532
   
553
   
533
    mutex_unlock(&as->lock);
554
    mutex_unlock(&as->lock);
534
    interrupts_restore(ipl);
555
    interrupts_restore(ipl);
535
    return 0;
556
    return 0;
536
}
557
}
537
 
558
 
538
/** Share address space area with another or the same address space.
559
/** Share address space area with another or the same address space.
539
 *
560
 *
540
 * Address space area mapping is shared with a new address space area.
561
 * Address space area mapping is shared with a new address space area.
541
 * If the source address space area has not been shared so far,
562
 * If the source address space area has not been shared so far,
542
 * a new sh_info is created. The new address space area simply gets the
563
 * a new sh_info is created. The new address space area simply gets the
543
 * sh_info of the source area. The process of duplicating the
564
 * sh_info of the source area. The process of duplicating the
544
 * mapping is done through the backend share function.
565
 * mapping is done through the backend share function.
545
 *
566
 *
546
 * @param src_as Pointer to source address space.
567
 * @param src_as Pointer to source address space.
547
 * @param src_base Base address of the source address space area.
568
 * @param src_base Base address of the source address space area.
548
 * @param acc_size Expected size of the source area.
569
 * @param acc_size Expected size of the source area.
549
 * @param dst_as Pointer to destination address space.
570
 * @param dst_as Pointer to destination address space.
550
 * @param dst_base Target base address.
571
 * @param dst_base Target base address.
551
 * @param dst_flags_mask Destination address space area flags mask.
572
 * @param dst_flags_mask Destination address space area flags mask.
552
 *
573
 *
553
 * @return Zero on success or ENOENT if there is no such task or if there is no
574
 * @return Zero on success or ENOENT if there is no such task or if there is no
554
 * such address space area, EPERM if there was a problem in accepting the area
575
 * such address space area, EPERM if there was a problem in accepting the area
555
 * or ENOMEM if there was a problem in allocating destination address space
576
 * or ENOMEM if there was a problem in allocating destination address space
556
 * area. ENOTSUP is returned if the address space area backend does not support
577
 * area. ENOTSUP is returned if the address space area backend does not support
557
 * sharing. It can be also returned if the architecture uses virtually indexed
-
 
558
 * caches and the source and destination areas start at pages with different
-
 
559
 * page colors.
578
 * sharing.
560
 */
579
 */
561
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
580
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
562
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
581
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
563
{
582
{
564
    ipl_t ipl;
583
    ipl_t ipl;
565
    int src_flags;
584
    int src_flags;
566
    size_t src_size;
585
    size_t src_size;
-
 
586
    int src_orig_color;
567
    as_area_t *src_area, *dst_area;
587
    as_area_t *src_area, *dst_area;
568
    share_info_t *sh_info;
588
    share_info_t *sh_info;
569
    mem_backend_t *src_backend;
589
    mem_backend_t *src_backend;
570
    mem_backend_data_t src_backend_data;
590
    mem_backend_data_t src_backend_data;
571
   
591
   
572
    ipl = interrupts_disable();
592
    ipl = interrupts_disable();
573
    mutex_lock(&src_as->lock);
593
    mutex_lock(&src_as->lock);
574
    src_area = find_area_and_lock(src_as, src_base);
594
    src_area = find_area_and_lock(src_as, src_base);
575
    if (!src_area) {
595
    if (!src_area) {
576
        /*
596
        /*
577
         * Could not find the source address space area.
597
         * Could not find the source address space area.
578
         */
598
         */
579
        mutex_unlock(&src_as->lock);
599
        mutex_unlock(&src_as->lock);
580
        interrupts_restore(ipl);
600
        interrupts_restore(ipl);
581
        return ENOENT;
601
        return ENOENT;
582
    }
602
    }
583
   
603
   
584
#if 0   /* disable the check for now */
-
 
585
#ifdef CONFIG_VIRT_IDX_CACHE
-
 
586
    if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
-
 
587
        /*
-
 
588
         * Refuse to create illegal address alias.
-
 
589
         */
-
 
590
        mutex_unlock(&src_area->lock);
-
 
591
        mutex_unlock(&src_as->lock);
-
 
592
        interrupts_restore(ipl);
-
 
593
        return ENOTSUP;
-
 
594
    }
-
 
595
#endif /* CONFIG_VIRT_IDX_CACHE */
-
 
596
#endif
-
 
597
 
604
 
598
    if (!src_area->backend || !src_area->backend->share) {
605
    if (!src_area->backend || !src_area->backend->share) {
599
        /*
606
        /*
600
         * There is no backend or the backend does not
607
         * There is no backend or the backend does not
601
         * know how to share the area.
608
         * know how to share the area.
602
         */
609
         */
603
        mutex_unlock(&src_area->lock);
610
        mutex_unlock(&src_area->lock);
604
        mutex_unlock(&src_as->lock);
611
        mutex_unlock(&src_as->lock);
605
        interrupts_restore(ipl);
612
        interrupts_restore(ipl);
606
        return ENOTSUP;
613
        return ENOTSUP;
607
    }
614
    }
608
   
615
   
609
    src_size = src_area->pages * PAGE_SIZE;
616
    src_size = src_area->pages * PAGE_SIZE;
610
    src_flags = src_area->flags;
617
    src_flags = src_area->flags;
611
    src_backend = src_area->backend;
618
    src_backend = src_area->backend;
612
    src_backend_data = src_area->backend_data;
619
    src_backend_data = src_area->backend_data;
-
 
620
    src_orig_color = src_area->orig_color;
613
 
621
 
614
    /* Share the cacheable flag from the original mapping */
622
    /* Share the cacheable flag from the original mapping */
615
    if (src_flags & AS_AREA_CACHEABLE)
623
    if (src_flags & AS_AREA_CACHEABLE)
616
        dst_flags_mask |= AS_AREA_CACHEABLE;
624
        dst_flags_mask |= AS_AREA_CACHEABLE;
617
 
625
 
618
    if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
626
    if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
619
        mutex_unlock(&src_area->lock);
627
        mutex_unlock(&src_area->lock);
620
        mutex_unlock(&src_as->lock);
628
        mutex_unlock(&src_as->lock);
621
        interrupts_restore(ipl);
629
        interrupts_restore(ipl);
622
        return EPERM;
630
        return EPERM;
623
    }
631
    }
624
 
632
 
625
    /*
633
    /*
626
     * Now we are committed to sharing the area.
634
     * Now we are committed to sharing the area.
627
     * First, prepare the area for sharing.
635
     * First, prepare the area for sharing.
628
     * Then it will be safe to unlock it.
636
     * Then it will be safe to unlock it.
629
     */
637
     */
630
    sh_info = src_area->sh_info;
638
    sh_info = src_area->sh_info;
631
    if (!sh_info) {
639
    if (!sh_info) {
632
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
640
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
633
        mutex_initialize(&sh_info->lock);
641
        mutex_initialize(&sh_info->lock);
634
        sh_info->refcount = 2;
642
        sh_info->refcount = 2;
635
        btree_create(&sh_info->pagemap);
643
        btree_create(&sh_info->pagemap);
636
        src_area->sh_info = sh_info;
644
        src_area->sh_info = sh_info;
637
    } else {
645
    } else {
638
        mutex_lock(&sh_info->lock);
646
        mutex_lock(&sh_info->lock);
639
        sh_info->refcount++;
647
        sh_info->refcount++;
640
        mutex_unlock(&sh_info->lock);
648
        mutex_unlock(&sh_info->lock);
641
    }
649
    }
642
 
650
 
643
    src_area->backend->share(src_area);
651
    src_area->backend->share(src_area);
644
 
652
 
645
    mutex_unlock(&src_area->lock);
653
    mutex_unlock(&src_area->lock);
646
    mutex_unlock(&src_as->lock);
654
    mutex_unlock(&src_as->lock);
647
 
655
 
648
    /*
656
    /*
649
     * Create copy of the source address space area.
657
     * Create copy of the source address space area.
650
     * The destination area is created with AS_AREA_ATTR_PARTIAL
658
     * The destination area is created with AS_AREA_ATTR_PARTIAL
651
     * attribute set which prevents race condition with
659
     * attribute set which prevents race condition with
652
     * preliminary as_page_fault() calls.
660
     * preliminary as_page_fault() calls.
653
     * The flags of the source area are masked against dst_flags_mask
661
     * The flags of the source area are masked against dst_flags_mask
654
     * to support sharing in less privileged mode.
662
     * to support sharing in less privileged mode.
655
     */
663
     */
656
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
664
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
657
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
665
                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
658
    if (!dst_area) {
666
    if (!dst_area) {
659
        /*
667
        /*
660
         * Destination address space area could not be created.
668
         * Destination address space area could not be created.
661
         */
669
         */
662
        sh_info_remove_reference(sh_info);
670
        sh_info_remove_reference(sh_info);
663
       
671
       
664
        interrupts_restore(ipl);
672
        interrupts_restore(ipl);
665
        return ENOMEM;
673
        return ENOMEM;
666
    }
674
    }
667
   
675
 
668
    /*
676
    /*
669
     * Now the destination address space area has been
677
     * Now the destination address space area has been
670
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
678
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
671
     * attribute and set the sh_info.
679
     * attribute and set the sh_info.
672
     */
680
     */
-
 
681
    mutex_lock(&dst_as->lock); 
673
    mutex_lock(&dst_area->lock);
682
    mutex_lock(&dst_area->lock);
674
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
683
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
675
    dst_area->sh_info = sh_info;
684
    dst_area->sh_info = sh_info;
-
 
685
    dst_area->orig_color = src_orig_color;
-
 
686
#ifdef CONFIG_VIRT_IDX_DCACHE
-
 
687
    if (src_orig_color != PAGE_COLOR(dst_base)) {
-
 
688
        /*
-
 
689
         * We have just detected an attempt to create an invalid address
-
 
690
         * alias. We allow this and set a special flag that tells the
-
 
691
         * architecture specific code to flush the D-cache when the
-
 
692
         * offending address space is installed and deinstalled
-
 
693
         * (cleanup).
-
 
694
         *
-
 
695
         * In order for the flags to take effect immediately, we also
-
 
696
         * perform a global D-cache shootdown.
-
 
697
         */
-
 
698
        dcache_shootdown_start();
-
 
699
        dst_as->dcache_flush_on_install = true;
-
 
700
        dst_as->dcache_flush_on_deinstall = true;
-
 
701
        dcache_flush();
-
 
702
        dcache_shootdown_finalize();
-
 
703
    }
-
 
704
#endif /* CONFIG_VIRT_IDX_DCACHE */
676
    mutex_unlock(&dst_area->lock);
705
    mutex_unlock(&dst_area->lock);
-
 
706
    mutex_unlock(&dst_as->lock);   
677
   
707
 
678
    interrupts_restore(ipl);
708
    interrupts_restore(ipl);
679
   
709
   
680
    return 0;
710
    return 0;
681
}
711
}
682
 
712
 
683
/** Check access mode for address space area.
713
/** Check access mode for address space area.
684
 *
714
 *
685
 * The address space area must be locked prior to this call.
715
 * The address space area must be locked prior to this call.
686
 *
716
 *
687
 * @param area Address space area.
717
 * @param area Address space area.
688
 * @param access Access mode.
718
 * @param access Access mode.
689
 *
719
 *
690
 * @return False if access violates area's permissions, true otherwise.
720
 * @return False if access violates area's permissions, true otherwise.
691
 */
721
 */
692
bool as_area_check_access(as_area_t *area, pf_access_t access)
722
bool as_area_check_access(as_area_t *area, pf_access_t access)
693
{
723
{
694
    int flagmap[] = {
724
    int flagmap[] = {
695
        [PF_ACCESS_READ] = AS_AREA_READ,
725
        [PF_ACCESS_READ] = AS_AREA_READ,
696
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
726
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
697
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
727
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
698
    };
728
    };
699
 
729
 
700
    if (!(area->flags & flagmap[access]))
730
    if (!(area->flags & flagmap[access]))
701
        return false;
731
        return false;
702
   
732
   
703
    return true;
733
    return true;
704
}
734
}
705
 
735
 
706
/** Handle page fault within the current address space.
736
/** Handle page fault within the current address space.
707
 *
737
 *
708
 * This is the high-level page fault handler. It decides
738
 * This is the high-level page fault handler. It decides
709
 * whether the page fault can be resolved by any backend
739
 * whether the page fault can be resolved by any backend
710
 * and if so, it invokes the backend to resolve the page
740
 * and if so, it invokes the backend to resolve the page
711
 * fault.
741
 * fault.
712
 *
742
 *
713
 * Interrupts are assumed disabled.
743
 * Interrupts are assumed disabled.
714
 *
744
 *
715
 * @param page Faulting page.
745
 * @param page Faulting page.
716
 * @param access Access mode that caused the fault (i.e. read/write/exec).
746
 * @param access Access mode that caused the fault (i.e. read/write/exec).
717
 * @param istate Pointer to interrupted state.
747
 * @param istate Pointer to interrupted state.
718
 *
748
 *
719
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
749
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
720
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
750
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
721
 */
751
 */
722
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
752
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
723
{
753
{
724
    pte_t *pte;
754
    pte_t *pte;
725
    as_area_t *area;
755
    as_area_t *area;
726
   
756
   
727
    if (!THREAD)
757
    if (!THREAD)
728
        return AS_PF_FAULT;
758
        return AS_PF_FAULT;
729
       
759
       
730
    ASSERT(AS);
760
    ASSERT(AS);
731
 
761
 
732
    mutex_lock(&AS->lock);
762
    mutex_lock(&AS->lock);
733
    area = find_area_and_lock(AS, page);   
763
    area = find_area_and_lock(AS, page);   
734
    if (!area) {
764
    if (!area) {
735
        /*
765
        /*
736
         * No area contained mapping for 'page'.
766
         * No area contained mapping for 'page'.
737
         * Signal page fault to low-level handler.
767
         * Signal page fault to low-level handler.
738
         */
768
         */
739
        mutex_unlock(&AS->lock);
769
        mutex_unlock(&AS->lock);
740
        goto page_fault;
770
        goto page_fault;
741
    }
771
    }
742
 
772
 
743
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
773
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
744
        /*
774
        /*
745
         * The address space area is not fully initialized.
775
         * The address space area is not fully initialized.
746
         * Avoid possible race by returning error.
776
         * Avoid possible race by returning error.
747
         */
777
         */
748
        mutex_unlock(&area->lock);
778
        mutex_unlock(&area->lock);
749
        mutex_unlock(&AS->lock);
779
        mutex_unlock(&AS->lock);
750
        goto page_fault;       
780
        goto page_fault;       
751
    }
781
    }
752
 
782
 
753
    if (!area->backend || !area->backend->page_fault) {
783
    if (!area->backend || !area->backend->page_fault) {
754
        /*
784
        /*
755
         * The address space area is not backed by any backend
785
         * The address space area is not backed by any backend
756
         * or the backend cannot handle page faults.
786
         * or the backend cannot handle page faults.
757
         */
787
         */
758
        mutex_unlock(&area->lock);
788
        mutex_unlock(&area->lock);
759
        mutex_unlock(&AS->lock);
789
        mutex_unlock(&AS->lock);
760
        goto page_fault;       
790
        goto page_fault;       
761
    }
791
    }
762
 
792
 
763
    page_table_lock(AS, false);
793
    page_table_lock(AS, false);
764
   
794
   
765
    /*
795
    /*
766
     * To avoid race condition between two page faults
796
     * To avoid race condition between two page faults
767
     * on the same address, we need to make sure
797
     * on the same address, we need to make sure
768
     * the mapping has not been already inserted.
798
     * the mapping has not been already inserted.
769
     */
799
     */
770
    if ((pte = page_mapping_find(AS, page))) {
800
    if ((pte = page_mapping_find(AS, page))) {
771
        if (PTE_PRESENT(pte)) {
801
        if (PTE_PRESENT(pte)) {
772
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
802
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
773
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
803
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
774
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
804
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
775
                page_table_unlock(AS, false);
805
                page_table_unlock(AS, false);
776
                mutex_unlock(&area->lock);
806
                mutex_unlock(&area->lock);
777
                mutex_unlock(&AS->lock);
807
                mutex_unlock(&AS->lock);
778
                return AS_PF_OK;
808
                return AS_PF_OK;
779
            }
809
            }
780
        }
810
        }
781
    }
811
    }
782
   
812
   
783
    /*
813
    /*
784
     * Resort to the backend page fault handler.
814
     * Resort to the backend page fault handler.
785
     */
815
     */
786
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
816
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
787
        page_table_unlock(AS, false);
817
        page_table_unlock(AS, false);
788
        mutex_unlock(&area->lock);
818
        mutex_unlock(&area->lock);
789
        mutex_unlock(&AS->lock);
819
        mutex_unlock(&AS->lock);
790
        goto page_fault;
820
        goto page_fault;
791
    }
821
    }
792
   
822
   
793
    page_table_unlock(AS, false);
823
    page_table_unlock(AS, false);
794
    mutex_unlock(&area->lock);
824
    mutex_unlock(&area->lock);
795
    mutex_unlock(&AS->lock);
825
    mutex_unlock(&AS->lock);
796
    return AS_PF_OK;
826
    return AS_PF_OK;
797
 
827
 
798
page_fault:
828
page_fault:
799
    if (THREAD->in_copy_from_uspace) {
829
    if (THREAD->in_copy_from_uspace) {
800
        THREAD->in_copy_from_uspace = false;
830
        THREAD->in_copy_from_uspace = false;
801
        istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
831
        istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address);
802
    } else if (THREAD->in_copy_to_uspace) {
832
    } else if (THREAD->in_copy_to_uspace) {
803
        THREAD->in_copy_to_uspace = false;
833
        THREAD->in_copy_to_uspace = false;
804
        istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
834
        istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address);
805
    } else {
835
    } else {
806
        return AS_PF_FAULT;
836
        return AS_PF_FAULT;
807
    }
837
    }
808
 
838
 
809
    return AS_PF_DEFER;
839
    return AS_PF_DEFER;
810
}
840
}
811
 
841
 
812
/** Switch address spaces.
842
/** Switch address spaces.
813
 *
843
 *
814
 * Note that this function cannot sleep as it is essentially a part of
844
 * Note that this function cannot sleep as it is essentially a part of
815
 * scheduling. Sleeping here would lead to deadlock on wakeup.
845
 * scheduling. Sleeping here would lead to deadlock on wakeup.
816
 *
846
 *
817
 * @param old Old address space or NULL.
847
 * @param old Old address space or NULL.
818
 * @param new New address space.
848
 * @param new New address space.
819
 */
849
 */
820
void as_switch(as_t *old, as_t *new)
850
void as_switch(as_t *old, as_t *new)
821
{
851
{
822
    ipl_t ipl;
852
    ipl_t ipl;
823
    bool needs_asid = false;
853
    bool needs_asid = false;
824
   
854
   
825
    ipl = interrupts_disable();
855
    ipl = interrupts_disable();
826
    spinlock_lock(&inactive_as_with_asid_lock);
856
    spinlock_lock(&inactive_as_with_asid_lock);
827
 
857
 
828
    /*
858
    /*
829
     * First, take care of the old address space.
859
     * First, take care of the old address space.
830
     */
860
     */
831
    if (old) {
861
    if (old) {
832
        mutex_lock_active(&old->lock);
862
        mutex_lock_active(&old->lock);
833
        ASSERT(old->cpu_refcount);
863
        ASSERT(old->cpu_refcount);
834
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
864
        if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
835
            /*
865
            /*
836
             * The old address space is no longer active on
866
             * The old address space is no longer active on
837
             * any processor. It can be appended to the
867
             * any processor. It can be appended to the
838
             * list of inactive address spaces with assigned
868
             * list of inactive address spaces with assigned
839
             * ASID.
869
             * ASID.
840
             */
870
             */
841
             ASSERT(old->asid != ASID_INVALID);
871
             ASSERT(old->asid != ASID_INVALID);
842
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
872
             list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
843
        }
873
        }
844
        mutex_unlock(&old->lock);
874
        mutex_unlock(&old->lock);
845
 
875
 
846
        /*
876
        /*
847
         * Perform architecture-specific tasks when the address space
877
         * Perform architecture-specific tasks when the address space
848
         * is being removed from the CPU.
878
         * is being removed from the CPU.
849
         */
879
         */
850
        as_deinstall_arch(old);
880
        as_deinstall_arch(old);
851
    }
881
    }
852
 
882
 
853
    /*
883
    /*
854
     * Second, prepare the new address space.
884
     * Second, prepare the new address space.
855
     */
885
     */
856
    mutex_lock_active(&new->lock);
886
    mutex_lock_active(&new->lock);
857
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
887
    if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
858
        if (new->asid != ASID_INVALID)
888
        if (new->asid != ASID_INVALID)
859
            list_remove(&new->inactive_as_with_asid_link);
889
            list_remove(&new->inactive_as_with_asid_link);
860
        else
890
        else
861
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
891
            needs_asid = true;  /* defer call to asid_get() until new->lock is released */
862
    }
892
    }
863
    SET_PTL0_ADDRESS(new->page_table);
893
    SET_PTL0_ADDRESS(new->page_table);
864
    mutex_unlock(&new->lock);
894
    mutex_unlock(&new->lock);
865
 
895
 
866
    if (needs_asid) {
896
    if (needs_asid) {
867
        /*
897
        /*
868
         * Allocation of new ASID was deferred
898
         * Allocation of new ASID was deferred
869
         * until now in order to avoid deadlock.
899
         * until now in order to avoid deadlock.
870
         */
900
         */
871
        asid_t asid;
901
        asid_t asid;
872
       
902
       
873
        asid = asid_get();
903
        asid = asid_get();
874
        mutex_lock_active(&new->lock);
904
        mutex_lock_active(&new->lock);
875
        new->asid = asid;
905
        new->asid = asid;
876
        mutex_unlock(&new->lock);
906
        mutex_unlock(&new->lock);
877
    }
907
    }
878
    spinlock_unlock(&inactive_as_with_asid_lock);
908
    spinlock_unlock(&inactive_as_with_asid_lock);
879
    interrupts_restore(ipl);
909
    interrupts_restore(ipl);
880
   
910
   
881
    /*
911
    /*
882
     * Perform architecture-specific steps.
912
     * Perform architecture-specific steps.
883
     * (e.g. write ASID to hardware register etc.)
913
     * (e.g. write ASID to hardware register etc.)
884
     */
914
     */
885
    as_install_arch(new);
915
    as_install_arch(new);
886
   
916
   
887
    AS = new;
917
    AS = new;
888
}
918
}
889
 
919
 
890
/** Convert address space area flags to page flags.
920
/** Convert address space area flags to page flags.
891
 *
921
 *
892
 * @param aflags Flags of some address space area.
922
 * @param aflags Flags of some address space area.
893
 *
923
 *
894
 * @return Flags to be passed to page_mapping_insert().
924
 * @return Flags to be passed to page_mapping_insert().
895
 */
925
 */
896
int area_flags_to_page_flags(int aflags)
926
int area_flags_to_page_flags(int aflags)
897
{
927
{
898
    int flags;
928
    int flags;
899
 
929
 
900
    flags = PAGE_USER | PAGE_PRESENT;
930
    flags = PAGE_USER | PAGE_PRESENT;
901
   
931
   
902
    if (aflags & AS_AREA_READ)
932
    if (aflags & AS_AREA_READ)
903
        flags |= PAGE_READ;
933
        flags |= PAGE_READ;
904
       
934
       
905
    if (aflags & AS_AREA_WRITE)
935
    if (aflags & AS_AREA_WRITE)
906
        flags |= PAGE_WRITE;
936
        flags |= PAGE_WRITE;
907
   
937
   
908
    if (aflags & AS_AREA_EXEC)
938
    if (aflags & AS_AREA_EXEC)
909
        flags |= PAGE_EXEC;
939
        flags |= PAGE_EXEC;
910
   
940
   
911
    if (aflags & AS_AREA_CACHEABLE)
941
    if (aflags & AS_AREA_CACHEABLE)
912
        flags |= PAGE_CACHEABLE;
942
        flags |= PAGE_CACHEABLE;
913
       
943
       
914
    return flags;
944
    return flags;
915
}
945
}
916
 
946
 
917
/** Compute flags for virtual address translation subsytem.
947
/** Compute flags for virtual address translation subsytem.
918
 *
948
 *
919
 * The address space area must be locked.
949
 * The address space area must be locked.
920
 * Interrupts must be disabled.
950
 * Interrupts must be disabled.
921
 *
951
 *
922
 * @param a Address space area.
952
 * @param a Address space area.
923
 *
953
 *
924
 * @return Flags to be used in page_mapping_insert().
954
 * @return Flags to be used in page_mapping_insert().
925
 */
955
 */
926
int as_area_get_flags(as_area_t *a)
956
int as_area_get_flags(as_area_t *a)
927
{
957
{
928
    return area_flags_to_page_flags(a->flags);
958
    return area_flags_to_page_flags(a->flags);
929
}
959
}
930
 
960
 
931
/** Create page table.
961
/** Create page table.
932
 *
962
 *
933
 * Depending on architecture, create either address space
963
 * Depending on architecture, create either address space
934
 * private or global page table.
964
 * private or global page table.
935
 *
965
 *
936
 * @param flags Flags saying whether the page table is for kernel address space.
966
 * @param flags Flags saying whether the page table is for kernel address space.
937
 *
967
 *
938
 * @return First entry of the page table.
968
 * @return First entry of the page table.
939
 */
969
 */
940
pte_t *page_table_create(int flags)
970
pte_t *page_table_create(int flags)
941
{
971
{
942
        ASSERT(as_operations);
972
        ASSERT(as_operations);
943
        ASSERT(as_operations->page_table_create);
973
        ASSERT(as_operations->page_table_create);
944
 
974
 
945
        return as_operations->page_table_create(flags);
975
        return as_operations->page_table_create(flags);
946
}
976
}
947
 
977
 
948
/** Destroy page table.
978
/** Destroy page table.
949
 *
979
 *
950
 * Destroy page table in architecture specific way.
980
 * Destroy page table in architecture specific way.
951
 *
981
 *
952
 * @param page_table Physical address of PTL0.
982
 * @param page_table Physical address of PTL0.
953
 */
983
 */
954
void page_table_destroy(pte_t *page_table)
984
void page_table_destroy(pte_t *page_table)
955
{
985
{
956
        ASSERT(as_operations);
986
        ASSERT(as_operations);
957
        ASSERT(as_operations->page_table_destroy);
987
        ASSERT(as_operations->page_table_destroy);
958
 
988
 
959
        as_operations->page_table_destroy(page_table);
989
        as_operations->page_table_destroy(page_table);
960
}
990
}
961
 
991
 
962
/** Lock page table.
992
/** Lock page table.
963
 *
993
 *
964
 * This function should be called before any page_mapping_insert(),
994
 * This function should be called before any page_mapping_insert(),
965
 * page_mapping_remove() and page_mapping_find().
995
 * page_mapping_remove() and page_mapping_find().
966
 *
996
 *
967
 * Locking order is such that address space areas must be locked
997
 * Locking order is such that address space areas must be locked
968
 * prior to this call. Address space can be locked prior to this
998
 * prior to this call. Address space can be locked prior to this
969
 * call in which case the lock argument is false.
999
 * call in which case the lock argument is false.
970
 *
1000
 *
971
 * @param as Address space.
1001
 * @param as Address space.
972
 * @param lock If false, do not attempt to lock as->lock.
1002
 * @param lock If false, do not attempt to lock as->lock.
973
 */
1003
 */
974
void page_table_lock(as_t *as, bool lock)
1004
void page_table_lock(as_t *as, bool lock)
975
{
1005
{
976
    ASSERT(as_operations);
1006
    ASSERT(as_operations);
977
    ASSERT(as_operations->page_table_lock);
1007
    ASSERT(as_operations->page_table_lock);
978
 
1008
 
979
    as_operations->page_table_lock(as, lock);
1009
    as_operations->page_table_lock(as, lock);
980
}
1010
}
981
 
1011
 
982
/** Unlock page table.
1012
/** Unlock page table.
983
 *
1013
 *
984
 * @param as Address space.
1014
 * @param as Address space.
985
 * @param unlock If false, do not attempt to unlock as->lock.
1015
 * @param unlock If false, do not attempt to unlock as->lock.
986
 */
1016
 */
987
void page_table_unlock(as_t *as, bool unlock)
1017
void page_table_unlock(as_t *as, bool unlock)
988
{
1018
{
989
    ASSERT(as_operations);
1019
    ASSERT(as_operations);
990
    ASSERT(as_operations->page_table_unlock);
1020
    ASSERT(as_operations->page_table_unlock);
991
 
1021
 
992
    as_operations->page_table_unlock(as, unlock);
1022
    as_operations->page_table_unlock(as, unlock);
993
}
1023
}
994
 
1024
 
995
 
1025
 
996
/** Find address space area and lock it.
1026
/** Find address space area and lock it.
997
 *
1027
 *
998
 * The address space must be locked and interrupts must be disabled.
1028
 * The address space must be locked and interrupts must be disabled.
999
 *
1029
 *
1000
 * @param as Address space.
1030
 * @param as Address space.
1001
 * @param va Virtual address.
1031
 * @param va Virtual address.
1002
 *
1032
 *
1003
 * @return Locked address space area containing va on success or NULL on failure.
1033
 * @return Locked address space area containing va on success or NULL on failure.
1004
 */
1034
 */
1005
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1035
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1006
{
1036
{
1007
    as_area_t *a;
1037
    as_area_t *a;
1008
    btree_node_t *leaf, *lnode;
1038
    btree_node_t *leaf, *lnode;
1009
    int i;
1039
    int i;
1010
   
1040
   
1011
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1041
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1012
    if (a) {
1042
    if (a) {
1013
        /* va is the base address of an address space area */
1043
        /* va is the base address of an address space area */
1014
        mutex_lock(&a->lock);
1044
        mutex_lock(&a->lock);
1015
        return a;
1045
        return a;
1016
    }
1046
    }
1017
   
1047
   
1018
    /*
1048
    /*
1019
     * Search the leaf node and the righmost record of its left neighbour
1049
     * Search the leaf node and the righmost record of its left neighbour
1020
     * to find out whether this is a miss or va belongs to an address
1050
     * to find out whether this is a miss or va belongs to an address
1021
     * space area found there.
1051
     * space area found there.
1022
     */
1052
     */
1023
   
1053
   
1024
    /* First, search the leaf node itself. */
1054
    /* First, search the leaf node itself. */
1025
    for (i = 0; i < leaf->keys; i++) {
1055
    for (i = 0; i < leaf->keys; i++) {
1026
        a = (as_area_t *) leaf->value[i];
1056
        a = (as_area_t *) leaf->value[i];
1027
        mutex_lock(&a->lock);
1057
        mutex_lock(&a->lock);
1028
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1058
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1029
            return a;
1059
            return a;
1030
        }
1060
        }
1031
        mutex_unlock(&a->lock);
1061
        mutex_unlock(&a->lock);
1032
    }
1062
    }
1033
 
1063
 
1034
    /*
1064
    /*
1035
     * Second, locate the left neighbour and test its last record.
1065
     * Second, locate the left neighbour and test its last record.
1036
     * Because of its position in the B+tree, it must have base < va.
1066
     * Because of its position in the B+tree, it must have base < va.
1037
     */
1067
     */
1038
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1068
    if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1039
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1069
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1040
        mutex_lock(&a->lock);
1070
        mutex_lock(&a->lock);
1041
        if (va < a->base + a->pages * PAGE_SIZE) {
1071
        if (va < a->base + a->pages * PAGE_SIZE) {
1042
            return a;
1072
            return a;
1043
        }
1073
        }
1044
        mutex_unlock(&a->lock);
1074
        mutex_unlock(&a->lock);
1045
    }
1075
    }
1046
 
1076
 
1047
    return NULL;
1077
    return NULL;
1048
}
1078
}
1049
 
1079
 
1050
/** Check area conflicts with other areas.
1080
/** Check area conflicts with other areas.
1051
 *
1081
 *
1052
 * The address space must be locked and interrupts must be disabled.
1082
 * The address space must be locked and interrupts must be disabled.
1053
 *
1083
 *
1054
 * @param as Address space.
1084
 * @param as Address space.
1055
 * @param va Starting virtual address of the area being tested.
1085
 * @param va Starting virtual address of the area being tested.
1056
 * @param size Size of the area being tested.
1086
 * @param size Size of the area being tested.
1057
 * @param avoid_area Do not touch this area.
1087
 * @param avoid_area Do not touch this area.
1058
 *
1088
 *
1059
 * @return True if there is no conflict, false otherwise.
1089
 * @return True if there is no conflict, false otherwise.
1060
 */
1090
 */
1061
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1091
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1062
{
1092
{
1063
    as_area_t *a;
1093
    as_area_t *a;
1064
    btree_node_t *leaf, *node;
1094
    btree_node_t *leaf, *node;
1065
    int i;
1095
    int i;
1066
   
1096
   
1067
    /*
1097
    /*
1068
     * We don't want any area to have conflicts with NULL page.
1098
     * We don't want any area to have conflicts with NULL page.
1069
     */
1099
     */
1070
    if (overlaps(va, size, NULL, PAGE_SIZE))
1100
    if (overlaps(va, size, NULL, PAGE_SIZE))
1071
        return false;
1101
        return false;
1072
   
1102
   
1073
    /*
1103
    /*
1074
     * The leaf node is found in O(log n), where n is proportional to
1104
     * The leaf node is found in O(log n), where n is proportional to
1075
     * the number of address space areas belonging to as.
1105
     * the number of address space areas belonging to as.
1076
     * The check for conflicts is then attempted on the rightmost
1106
     * The check for conflicts is then attempted on the rightmost
1077
     * record in the left neighbour, the leftmost record in the right
1107
     * record in the left neighbour, the leftmost record in the right
1078
     * neighbour and all records in the leaf node itself.
1108
     * neighbour and all records in the leaf node itself.
1079
     */
1109
     */
1080
   
1110
   
1081
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1111
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1082
        if (a != avoid_area)
1112
        if (a != avoid_area)
1083
            return false;
1113
            return false;
1084
    }
1114
    }
1085
   
1115
   
1086
    /* First, check the two border cases. */
1116
    /* First, check the two border cases. */
1087
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1117
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1088
        a = (as_area_t *) node->value[node->keys - 1];
1118
        a = (as_area_t *) node->value[node->keys - 1];
1089
        mutex_lock(&a->lock);
1119
        mutex_lock(&a->lock);
1090
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1120
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1091
            mutex_unlock(&a->lock);
1121
            mutex_unlock(&a->lock);
1092
            return false;
1122
            return false;
1093
        }
1123
        }
1094
        mutex_unlock(&a->lock);
1124
        mutex_unlock(&a->lock);
1095
    }
1125
    }
1096
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1126
    if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1097
        a = (as_area_t *) node->value[0];
1127
        a = (as_area_t *) node->value[0];
1098
        mutex_lock(&a->lock);
1128
        mutex_lock(&a->lock);
1099
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1129
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1100
            mutex_unlock(&a->lock);
1130
            mutex_unlock(&a->lock);
1101
            return false;
1131
            return false;
1102
        }
1132
        }
1103
        mutex_unlock(&a->lock);
1133
        mutex_unlock(&a->lock);
1104
    }
1134
    }
1105
   
1135
   
1106
    /* Second, check the leaf node. */
1136
    /* Second, check the leaf node. */
1107
    for (i = 0; i < leaf->keys; i++) {
1137
    for (i = 0; i < leaf->keys; i++) {
1108
        a = (as_area_t *) leaf->value[i];
1138
        a = (as_area_t *) leaf->value[i];
1109
   
1139
   
1110
        if (a == avoid_area)
1140
        if (a == avoid_area)
1111
            continue;
1141
            continue;
1112
   
1142
   
1113
        mutex_lock(&a->lock);
1143
        mutex_lock(&a->lock);
1114
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1144
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1115
            mutex_unlock(&a->lock);
1145
            mutex_unlock(&a->lock);
1116
            return false;
1146
            return false;
1117
        }
1147
        }
1118
        mutex_unlock(&a->lock);
1148
        mutex_unlock(&a->lock);
1119
    }
1149
    }
1120
 
1150
 
1121
    /*
1151
    /*
1122
     * So far, the area does not conflict with other areas.
1152
     * So far, the area does not conflict with other areas.
1123
     * Check if it doesn't conflict with kernel address space.
1153
     * Check if it doesn't conflict with kernel address space.
1124
     */  
1154
     */  
1125
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1155
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1126
        return !overlaps(va, size,
1156
        return !overlaps(va, size,
1127
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1157
            KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1128
    }
1158
    }
1129
 
1159
 
1130
    return true;
1160
    return true;
1131
}
1161
}
1132
 
1162
 
1133
/** Return size of the address space area with given base.  */
1163
/** Return size of the address space area with given base.  */
1134
size_t as_get_size(uintptr_t base)
1164
size_t as_get_size(uintptr_t base)
1135
{
1165
{
1136
    ipl_t ipl;
1166
    ipl_t ipl;
1137
    as_area_t *src_area;
1167
    as_area_t *src_area;
1138
    size_t size;
1168
    size_t size;
1139
 
1169
 
1140
    ipl = interrupts_disable();
1170
    ipl = interrupts_disable();
1141
    src_area = find_area_and_lock(AS, base);
1171
    src_area = find_area_and_lock(AS, base);
1142
    if (src_area){
1172
    if (src_area){
1143
        size = src_area->pages * PAGE_SIZE;
1173
        size = src_area->pages * PAGE_SIZE;
1144
        mutex_unlock(&src_area->lock);
1174
        mutex_unlock(&src_area->lock);
1145
    } else {
1175
    } else {
1146
        size = 0;
1176
        size = 0;
1147
    }
1177
    }
1148
    interrupts_restore(ipl);
1178
    interrupts_restore(ipl);
1149
    return size;
1179
    return size;
1150
}
1180
}
1151
 
1181
 
1152
/** Mark portion of address space area as used.
1182
/** Mark portion of address space area as used.
1153
 *
1183
 *
1154
 * The address space area must be already locked.
1184
 * The address space area must be already locked.
1155
 *
1185
 *
1156
 * @param a Address space area.
1186
 * @param a Address space area.
1157
 * @param page First page to be marked.
1187
 * @param page First page to be marked.
1158
 * @param count Number of page to be marked.
1188
 * @param count Number of page to be marked.
1159
 *
1189
 *
1160
 * @return 0 on failure and 1 on success.
1190
 * @return 0 on failure and 1 on success.
1161
 */
1191
 */
1162
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1192
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1163
{
1193
{
1164
    btree_node_t *leaf, *node;
1194
    btree_node_t *leaf, *node;
1165
    count_t pages;
1195
    count_t pages;
1166
    int i;
1196
    int i;
1167
 
1197
 
1168
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1198
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1169
    ASSERT(count);
1199
    ASSERT(count);
1170
 
1200
 
1171
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1201
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1172
    if (pages) {
1202
    if (pages) {
1173
        /*
1203
        /*
1174
         * We hit the beginning of some used space.
1204
         * We hit the beginning of some used space.
1175
         */
1205
         */
1176
        return 0;
1206
        return 0;
1177
    }
1207
    }
1178
 
1208
 
1179
    if (!leaf->keys) {
1209
    if (!leaf->keys) {
1180
        btree_insert(&a->used_space, page, (void *) count, leaf);
1210
        btree_insert(&a->used_space, page, (void *) count, leaf);
1181
        return 1;
1211
        return 1;
1182
    }
1212
    }
1183
 
1213
 
1184
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1214
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1185
    if (node) {
1215
    if (node) {
1186
        uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1216
        uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1187
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1217
        count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1188
       
1218
       
1189
        /*
1219
        /*
1190
         * Examine the possibility that the interval fits
1220
         * Examine the possibility that the interval fits
1191
         * somewhere between the rightmost interval of
1221
         * somewhere between the rightmost interval of
1192
         * the left neigbour and the first interval of the leaf.
1222
         * the left neigbour and the first interval of the leaf.
1193
         */
1223
         */
1194
         
1224
         
1195
        if (page >= right_pg) {
1225
        if (page >= right_pg) {
1196
            /* Do nothing. */
1226
            /* Do nothing. */
1197
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1227
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1198
            /* The interval intersects with the left interval. */
1228
            /* The interval intersects with the left interval. */
1199
            return 0;
1229
            return 0;
1200
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1230
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1201
            /* The interval intersects with the right interval. */
1231
            /* The interval intersects with the right interval. */
1202
            return 0;          
1232
            return 0;          
1203
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1233
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1204
            /* The interval can be added by merging the two already present intervals. */
1234
            /* The interval can be added by merging the two already present intervals. */
1205
            node->value[node->keys - 1] += count + right_cnt;
1235
            node->value[node->keys - 1] += count + right_cnt;
1206
            btree_remove(&a->used_space, right_pg, leaf);
1236
            btree_remove(&a->used_space, right_pg, leaf);
1207
            return 1;
1237
            return 1;
1208
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1238
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1209
            /* The interval can be added by simply growing the left interval. */
1239
            /* The interval can be added by simply growing the left interval. */
1210
            node->value[node->keys - 1] += count;
1240
            node->value[node->keys - 1] += count;
1211
            return 1;
1241
            return 1;
1212
        } else if (page + count*PAGE_SIZE == right_pg) {
1242
        } else if (page + count*PAGE_SIZE == right_pg) {
1213
            /*
1243
            /*
1214
             * The interval can be addded by simply moving base of the right
1244
             * The interval can be addded by simply moving base of the right
1215
             * interval down and increasing its size accordingly.
1245
             * interval down and increasing its size accordingly.
1216
             */
1246
             */
1217
            leaf->value[0] += count;
1247
            leaf->value[0] += count;
1218
            leaf->key[0] = page;
1248
            leaf->key[0] = page;
1219
            return 1;
1249
            return 1;
1220
        } else {
1250
        } else {
1221
            /*
1251
            /*
1222
             * The interval is between both neigbouring intervals,
1252
             * The interval is between both neigbouring intervals,
1223
             * but cannot be merged with any of them.
1253
             * but cannot be merged with any of them.
1224
             */
1254
             */
1225
            btree_insert(&a->used_space, page, (void *) count, leaf);
1255
            btree_insert(&a->used_space, page, (void *) count, leaf);
1226
            return 1;
1256
            return 1;
1227
        }
1257
        }
1228
    } else if (page < leaf->key[0]) {
1258
    } else if (page < leaf->key[0]) {
1229
        uintptr_t right_pg = leaf->key[0];
1259
        uintptr_t right_pg = leaf->key[0];
1230
        count_t right_cnt = (count_t) leaf->value[0];
1260
        count_t right_cnt = (count_t) leaf->value[0];
1231
   
1261
   
1232
        /*
1262
        /*
1233
         * Investigate the border case in which the left neighbour does not
1263
         * Investigate the border case in which the left neighbour does not
1234
         * exist but the interval fits from the left.
1264
         * exist but the interval fits from the left.
1235
         */
1265
         */
1236
         
1266
         
1237
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1267
        if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1238
            /* The interval intersects with the right interval. */
1268
            /* The interval intersects with the right interval. */
1239
            return 0;
1269
            return 0;
1240
        } else if (page + count*PAGE_SIZE == right_pg) {
1270
        } else if (page + count*PAGE_SIZE == right_pg) {
1241
            /*
1271
            /*
1242
             * The interval can be added by moving the base of the right interval down
1272
             * The interval can be added by moving the base of the right interval down
1243
             * and increasing its size accordingly.
1273
             * and increasing its size accordingly.
1244
             */
1274
             */
1245
            leaf->key[0] = page;
1275
            leaf->key[0] = page;
1246
            leaf->value[0] += count;
1276
            leaf->value[0] += count;
1247
            return 1;
1277
            return 1;
1248
        } else {
1278
        } else {
1249
            /*
1279
            /*
1250
             * The interval doesn't adjoin with the right interval.
1280
             * The interval doesn't adjoin with the right interval.
1251
             * It must be added individually.
1281
             * It must be added individually.
1252
             */
1282
             */
1253
            btree_insert(&a->used_space, page, (void *) count, leaf);
1283
            btree_insert(&a->used_space, page, (void *) count, leaf);
1254
            return 1;
1284
            return 1;
1255
        }
1285
        }
1256
    }
1286
    }
1257
 
1287
 
1258
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1288
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1259
    if (node) {
1289
    if (node) {
1260
        uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1290
        uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1261
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1291
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1262
       
1292
       
1263
        /*
1293
        /*
1264
         * Examine the possibility that the interval fits
1294
         * Examine the possibility that the interval fits
1265
         * somewhere between the leftmost interval of
1295
         * somewhere between the leftmost interval of
1266
         * the right neigbour and the last interval of the leaf.
1296
         * the right neigbour and the last interval of the leaf.
1267
         */
1297
         */
1268
 
1298
 
1269
        if (page < left_pg) {
1299
        if (page < left_pg) {
1270
            /* Do nothing. */
1300
            /* Do nothing. */
1271
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1301
        } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1272
            /* The interval intersects with the left interval. */
1302
            /* The interval intersects with the left interval. */
1273
            return 0;
1303
            return 0;
1274
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1304
        } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1275
            /* The interval intersects with the right interval. */
1305
            /* The interval intersects with the right interval. */
1276
            return 0;          
1306
            return 0;          
1277
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1307
        } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1278
            /* The interval can be added by merging the two already present intervals. */
1308
            /* The interval can be added by merging the two already present intervals. */
1279
            leaf->value[leaf->keys - 1] += count + right_cnt;
1309
            leaf->value[leaf->keys - 1] += count + right_cnt;
1280
            btree_remove(&a->used_space, right_pg, node);
1310
            btree_remove(&a->used_space, right_pg, node);
1281
            return 1;
1311
            return 1;
1282
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1312
        } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1283
            /* The interval can be added by simply growing the left interval. */
1313
            /* The interval can be added by simply growing the left interval. */
1284
            leaf->value[leaf->keys - 1] +=  count;
1314
            leaf->value[leaf->keys - 1] +=  count;
1285
            return 1;
1315
            return 1;
1286
        } else if (page + count*PAGE_SIZE == right_pg) {
1316
        } else if (page + count*PAGE_SIZE == right_pg) {
1287
            /*
1317
            /*
1288
             * The interval can be addded by simply moving base of the right
1318
             * The interval can be addded by simply moving base of the right
1289
             * interval down and increasing its size accordingly.
1319
             * interval down and increasing its size accordingly.
1290
             */
1320
             */
1291
            node->value[0] += count;
1321
            node->value[0] += count;
1292
            node->key[0] = page;
1322
            node->key[0] = page;
1293
            return 1;
1323
            return 1;
1294
        } else {
1324
        } else {
1295
            /*
1325
            /*
1296
             * The interval is between both neigbouring intervals,
1326
             * The interval is between both neigbouring intervals,
1297
             * but cannot be merged with any of them.
1327
             * but cannot be merged with any of them.
1298
             */
1328
             */
1299
            btree_insert(&a->used_space, page, (void *) count, leaf);
1329
            btree_insert(&a->used_space, page, (void *) count, leaf);
1300
            return 1;
1330
            return 1;
1301
        }
1331
        }
1302
    } else if (page >= leaf->key[leaf->keys - 1]) {
1332
    } else if (page >= leaf->key[leaf->keys - 1]) {
1303
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1333
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1304
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1334
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1305
   
1335
   
1306
        /*
1336
        /*
1307
         * Investigate the border case in which the right neighbour does not
1337
         * Investigate the border case in which the right neighbour does not
1308
         * exist but the interval fits from the right.
1338
         * exist but the interval fits from the right.
1309
         */
1339
         */
1310
         
1340
         
1311
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1341
        if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1312
            /* The interval intersects with the left interval. */
1342
            /* The interval intersects with the left interval. */
1313
            return 0;
1343
            return 0;
1314
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1344
        } else if (left_pg + left_cnt*PAGE_SIZE == page) {
1315
            /* The interval can be added by growing the left interval. */
1345
            /* The interval can be added by growing the left interval. */
1316
            leaf->value[leaf->keys - 1] += count;
1346
            leaf->value[leaf->keys - 1] += count;
1317
            return 1;
1347
            return 1;
1318
        } else {
1348
        } else {
1319
            /*
1349
            /*
1320
             * The interval doesn't adjoin with the left interval.
1350
             * The interval doesn't adjoin with the left interval.
1321
             * It must be added individually.
1351
             * It must be added individually.
1322
             */
1352
             */
1323
            btree_insert(&a->used_space, page, (void *) count, leaf);
1353
            btree_insert(&a->used_space, page, (void *) count, leaf);
1324
            return 1;
1354
            return 1;
1325
        }
1355
        }
1326
    }
1356
    }
1327
   
1357
   
1328
    /*
1358
    /*
1329
     * Note that if the algorithm made it thus far, the interval can fit only
1359
     * Note that if the algorithm made it thus far, the interval can fit only
1330
     * between two other intervals of the leaf. The two border cases were already
1360
     * between two other intervals of the leaf. The two border cases were already
1331
     * resolved.
1361
     * resolved.
1332
     */
1362
     */
1333
    for (i = 1; i < leaf->keys; i++) {
1363
    for (i = 1; i < leaf->keys; i++) {
1334
        if (page < leaf->key[i]) {
1364
        if (page < leaf->key[i]) {
1335
            uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1365
            uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1336
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1366
            count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1337
 
1367
 
1338
            /*
1368
            /*
1339
             * The interval fits between left_pg and right_pg.
1369
             * The interval fits between left_pg and right_pg.
1340
             */
1370
             */
1341
 
1371
 
1342
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1372
            if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1343
                /* The interval intersects with the left interval. */
1373
                /* The interval intersects with the left interval. */
1344
                return 0;
1374
                return 0;
1345
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1375
            } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1346
                /* The interval intersects with the right interval. */
1376
                /* The interval intersects with the right interval. */
1347
                return 0;          
1377
                return 0;          
1348
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1378
            } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1349
                /* The interval can be added by merging the two already present intervals. */
1379
                /* The interval can be added by merging the two already present intervals. */
1350
                leaf->value[i - 1] += count + right_cnt;
1380
                leaf->value[i - 1] += count + right_cnt;
1351
                btree_remove(&a->used_space, right_pg, leaf);
1381
                btree_remove(&a->used_space, right_pg, leaf);
1352
                return 1;
1382
                return 1;
1353
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1383
            } else if (page == left_pg + left_cnt*PAGE_SIZE) {
1354
                /* The interval can be added by simply growing the left interval. */
1384
                /* The interval can be added by simply growing the left interval. */
1355
                leaf->value[i - 1] += count;
1385
                leaf->value[i - 1] += count;
1356
                return 1;
1386
                return 1;
1357
            } else if (page + count*PAGE_SIZE == right_pg) {
1387
            } else if (page + count*PAGE_SIZE == right_pg) {
1358
                /*
1388
                /*
1359
                     * The interval can be addded by simply moving base of the right
1389
                     * The interval can be addded by simply moving base of the right
1360
                 * interval down and increasing its size accordingly.
1390
                 * interval down and increasing its size accordingly.
1361
                 */
1391
                 */
1362
                leaf->value[i] += count;
1392
                leaf->value[i] += count;
1363
                leaf->key[i] = page;
1393
                leaf->key[i] = page;
1364
                return 1;
1394
                return 1;
1365
            } else {
1395
            } else {
1366
                /*
1396
                /*
1367
                 * The interval is between both neigbouring intervals,
1397
                 * The interval is between both neigbouring intervals,
1368
                 * but cannot be merged with any of them.
1398
                 * but cannot be merged with any of them.
1369
                 */
1399
                 */
1370
                btree_insert(&a->used_space, page, (void *) count, leaf);
1400
                btree_insert(&a->used_space, page, (void *) count, leaf);
1371
                return 1;
1401
                return 1;
1372
            }
1402
            }
1373
        }
1403
        }
1374
    }
1404
    }
1375
 
1405
 
1376
    panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1406
    panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1377
}
1407
}
1378
 
1408
 
1379
/** Mark portion of address space area as unused.
1409
/** Mark portion of address space area as unused.
1380
 *
1410
 *
1381
 * The address space area must be already locked.
1411
 * The address space area must be already locked.
1382
 *
1412
 *
1383
 * @param a Address space area.
1413
 * @param a Address space area.
1384
 * @param page First page to be marked.
1414
 * @param page First page to be marked.
1385
 * @param count Number of page to be marked.
1415
 * @param count Number of page to be marked.
1386
 *
1416
 *
1387
 * @return 0 on failure and 1 on success.
1417
 * @return 0 on failure and 1 on success.
1388
 */
1418
 */
1389
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1419
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1390
{
1420
{
1391
    btree_node_t *leaf, *node;
1421
    btree_node_t *leaf, *node;
1392
    count_t pages;
1422
    count_t pages;
1393
    int i;
1423
    int i;
1394
 
1424
 
1395
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1425
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1396
    ASSERT(count);
1426
    ASSERT(count);
1397
 
1427
 
1398
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1428
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1399
    if (pages) {
1429
    if (pages) {
1400
        /*
1430
        /*
1401
         * We are lucky, page is the beginning of some interval.
1431
         * We are lucky, page is the beginning of some interval.
1402
         */
1432
         */
1403
        if (count > pages) {
1433
        if (count > pages) {
1404
            return 0;
1434
            return 0;
1405
        } else if (count == pages) {
1435
        } else if (count == pages) {
1406
            btree_remove(&a->used_space, page, leaf);
1436
            btree_remove(&a->used_space, page, leaf);
1407
            return 1;
1437
            return 1;
1408
        } else {
1438
        } else {
1409
            /*
1439
            /*
1410
             * Find the respective interval.
1440
             * Find the respective interval.
1411
             * Decrease its size and relocate its start address.
1441
             * Decrease its size and relocate its start address.
1412
             */
1442
             */
1413
            for (i = 0; i < leaf->keys; i++) {
1443
            for (i = 0; i < leaf->keys; i++) {
1414
                if (leaf->key[i] == page) {
1444
                if (leaf->key[i] == page) {
1415
                    leaf->key[i] += count*PAGE_SIZE;
1445
                    leaf->key[i] += count*PAGE_SIZE;
1416
                    leaf->value[i] -= count;
1446
                    leaf->value[i] -= count;
1417
                    return 1;
1447
                    return 1;
1418
                }
1448
                }
1419
            }
1449
            }
1420
            goto error;
1450
            goto error;
1421
        }
1451
        }
1422
    }
1452
    }
1423
 
1453
 
1424
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1454
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1425
    if (node && page < leaf->key[0]) {
1455
    if (node && page < leaf->key[0]) {
1426
        uintptr_t left_pg = node->key[node->keys - 1];
1456
        uintptr_t left_pg = node->key[node->keys - 1];
1427
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1457
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1428
 
1458
 
1429
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1459
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1430
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1460
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1431
                /*
1461
                /*
1432
                 * The interval is contained in the rightmost interval
1462
                 * The interval is contained in the rightmost interval
1433
                 * of the left neighbour and can be removed by
1463
                 * of the left neighbour and can be removed by
1434
                 * updating the size of the bigger interval.
1464
                 * updating the size of the bigger interval.
1435
                 */
1465
                 */
1436
                node->value[node->keys - 1] -= count;
1466
                node->value[node->keys - 1] -= count;
1437
                return 1;
1467
                return 1;
1438
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1468
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1439
                count_t new_cnt;
1469
                count_t new_cnt;
1440
               
1470
               
1441
                /*
1471
                /*
1442
                 * The interval is contained in the rightmost interval
1472
                 * The interval is contained in the rightmost interval
1443
                 * of the left neighbour but its removal requires
1473
                 * of the left neighbour but its removal requires
1444
                 * both updating the size of the original interval and
1474
                 * both updating the size of the original interval and
1445
                 * also inserting a new interval.
1475
                 * also inserting a new interval.
1446
                 */
1476
                 */
1447
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1477
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1448
                node->value[node->keys - 1] -= count + new_cnt;
1478
                node->value[node->keys - 1] -= count + new_cnt;
1449
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1479
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1450
                return 1;
1480
                return 1;
1451
            }
1481
            }
1452
        }
1482
        }
1453
        return 0;
1483
        return 0;
1454
    } else if (page < leaf->key[0]) {
1484
    } else if (page < leaf->key[0]) {
1455
        return 0;
1485
        return 0;
1456
    }
1486
    }
1457
   
1487
   
1458
    if (page > leaf->key[leaf->keys - 1]) {
1488
    if (page > leaf->key[leaf->keys - 1]) {
1459
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1489
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1460
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1490
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1461
 
1491
 
1462
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1492
        if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1463
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1493
            if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1464
                /*
1494
                /*
1465
                 * The interval is contained in the rightmost interval
1495
                 * The interval is contained in the rightmost interval
1466
                 * of the leaf and can be removed by updating the size
1496
                 * of the leaf and can be removed by updating the size
1467
                 * of the bigger interval.
1497
                 * of the bigger interval.
1468
                 */
1498
                 */
1469
                leaf->value[leaf->keys - 1] -= count;
1499
                leaf->value[leaf->keys - 1] -= count;
1470
                return 1;
1500
                return 1;
1471
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1501
            } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1472
                count_t new_cnt;
1502
                count_t new_cnt;
1473
               
1503
               
1474
                /*
1504
                /*
1475
                 * The interval is contained in the rightmost interval
1505
                 * The interval is contained in the rightmost interval
1476
                 * of the leaf but its removal requires both updating
1506
                 * of the leaf but its removal requires both updating
1477
                 * the size of the original interval and
1507
                 * the size of the original interval and
1478
                 * also inserting a new interval.
1508
                 * also inserting a new interval.
1479
                 */
1509
                 */
1480
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1510
                new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1481
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1511
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1482
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1512
                btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1483
                return 1;
1513
                return 1;
1484
            }
1514
            }
1485
        }
1515
        }
1486
        return 0;
1516
        return 0;
1487
    }  
1517
    }  
1488
   
1518
   
1489
    /*
1519
    /*
1490
     * The border cases have been already resolved.
1520
     * The border cases have been already resolved.
1491
     * Now the interval can be only between intervals of the leaf.
1521
     * Now the interval can be only between intervals of the leaf.
1492
     */
1522
     */
1493
    for (i = 1; i < leaf->keys - 1; i++) {
1523
    for (i = 1; i < leaf->keys - 1; i++) {
1494
        if (page < leaf->key[i]) {
1524
        if (page < leaf->key[i]) {
1495
            uintptr_t left_pg = leaf->key[i - 1];
1525
            uintptr_t left_pg = leaf->key[i - 1];
1496
            count_t left_cnt = (count_t) leaf->value[i - 1];
1526
            count_t left_cnt = (count_t) leaf->value[i - 1];
1497
 
1527
 
1498
            /*
1528
            /*
1499
             * Now the interval is between intervals corresponding to (i - 1) and i.
1529
             * Now the interval is between intervals corresponding to (i - 1) and i.
1500
             */
1530
             */
1501
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1531
            if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1502
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1532
                if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1503
                    /*
1533
                    /*
1504
                    * The interval is contained in the interval (i - 1)
1534
                    * The interval is contained in the interval (i - 1)
1505
                     * of the leaf and can be removed by updating the size
1535
                     * of the leaf and can be removed by updating the size
1506
                     * of the bigger interval.
1536
                     * of the bigger interval.
1507
                     */
1537
                     */
1508
                    leaf->value[i - 1] -= count;
1538
                    leaf->value[i - 1] -= count;
1509
                    return 1;
1539
                    return 1;
1510
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1540
                } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1511
                    count_t new_cnt;
1541
                    count_t new_cnt;
1512
               
1542
               
1513
                    /*
1543
                    /*
1514
                     * The interval is contained in the interval (i - 1)
1544
                     * The interval is contained in the interval (i - 1)
1515
                     * of the leaf but its removal requires both updating
1545
                     * of the leaf but its removal requires both updating
1516
                     * the size of the original interval and
1546
                     * the size of the original interval and
1517
                     * also inserting a new interval.
1547
                     * also inserting a new interval.
1518
                     */
1548
                     */
1519
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1549
                    new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1520
                    leaf->value[i - 1] -= count + new_cnt;
1550
                    leaf->value[i - 1] -= count + new_cnt;
1521
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1551
                    btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1522
                    return 1;
1552
                    return 1;
1523
                }
1553
                }
1524
            }
1554
            }
1525
            return 0;
1555
            return 0;
1526
        }
1556
        }
1527
    }
1557
    }
1528
 
1558
 
1529
error:
1559
error:
1530
    panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1560
    panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1531
}
1561
}
1532
 
1562
 
1533
/** Remove reference to address space area share info.
1563
/** Remove reference to address space area share info.
1534
 *
1564
 *
1535
 * If the reference count drops to 0, the sh_info is deallocated.
1565
 * If the reference count drops to 0, the sh_info is deallocated.
1536
 *
1566
 *
1537
 * @param sh_info Pointer to address space area share info.
1567
 * @param sh_info Pointer to address space area share info.
1538
 */
1568
 */
1539
void sh_info_remove_reference(share_info_t *sh_info)
1569
void sh_info_remove_reference(share_info_t *sh_info)
1540
{
1570
{
1541
    bool dealloc = false;
1571
    bool dealloc = false;
1542
 
1572
 
1543
    mutex_lock(&sh_info->lock);
1573
    mutex_lock(&sh_info->lock);
1544
    ASSERT(sh_info->refcount);
1574
    ASSERT(sh_info->refcount);
1545
    if (--sh_info->refcount == 0) {
1575
    if (--sh_info->refcount == 0) {
1546
        dealloc = true;
1576
        dealloc = true;
1547
        link_t *cur;
1577
        link_t *cur;
1548
       
1578
       
1549
        /*
1579
        /*
1550
         * Now walk carefully the pagemap B+tree and free/remove
1580
         * Now walk carefully the pagemap B+tree and free/remove
1551
         * reference from all frames found there.
1581
         * reference from all frames found there.
1552
         */
1582
         */
1553
        for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1583
        for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1554
            btree_node_t *node;
1584
            btree_node_t *node;
1555
            int i;
1585
            int i;
1556
           
1586
           
1557
            node = list_get_instance(cur, btree_node_t, leaf_link);
1587
            node = list_get_instance(cur, btree_node_t, leaf_link);
1558
            for (i = 0; i < node->keys; i++)
1588
            for (i = 0; i < node->keys; i++)
1559
                frame_free((uintptr_t) node->value[i]);
1589
                frame_free((uintptr_t) node->value[i]);
1560
        }
1590
        }
1561
       
1591
       
1562
    }
1592
    }
1563
    mutex_unlock(&sh_info->lock);
1593
    mutex_unlock(&sh_info->lock);
1564
   
1594
   
1565
    if (dealloc) {
1595
    if (dealloc) {
1566
        btree_destroy(&sh_info->pagemap);
1596
        btree_destroy(&sh_info->pagemap);
1567
        free(sh_info);
1597
        free(sh_info);
1568
    }
1598
    }
1569
}
1599
}
1570
 
1600
 
1571
/*
1601
/*
1572
 * Address space related syscalls.
1602
 * Address space related syscalls.
1573
 */
1603
 */
1574
 
1604
 
1575
/** Wrapper for as_area_create(). */
1605
/** Wrapper for as_area_create(). */
1576
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1606
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1577
{
1607
{
1578
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1608
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1579
        return (unative_t) address;
1609
        return (unative_t) address;
1580
    else
1610
    else
1581
        return (unative_t) -1;
1611
        return (unative_t) -1;
1582
}
1612
}
1583
 
1613
 
1584
/** Wrapper for as_area_resize(). */
1614
/** Wrapper for as_area_resize(). */
1585
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1615
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1586
{
1616
{
1587
    return (unative_t) as_area_resize(AS, address, size, 0);
1617
    return (unative_t) as_area_resize(AS, address, size, 0);
1588
}
1618
}
1589
 
1619
 
1590
/** Wrapper for as_area_destroy(). */
1620
/** Wrapper for as_area_destroy(). */
1591
unative_t sys_as_area_destroy(uintptr_t address)
1621
unative_t sys_as_area_destroy(uintptr_t address)
1592
{
1622
{
1593
    return (unative_t) as_area_destroy(AS, address);
1623
    return (unative_t) as_area_destroy(AS, address);
1594
}
1624
}
1595
 
1625
 
1596
/** Print out information about address space.
1626
/** Print out information about address space.
1597
 *
1627
 *
1598
 * @param as Address space.
1628
 * @param as Address space.
1599
 */
1629
 */
1600
void as_print(as_t *as)
1630
void as_print(as_t *as)
1601
{
1631
{
1602
    ipl_t ipl;
1632
    ipl_t ipl;
1603
   
1633
   
1604
    ipl = interrupts_disable();
1634
    ipl = interrupts_disable();
1605
    mutex_lock(&as->lock);
1635
    mutex_lock(&as->lock);
1606
   
1636
   
1607
    /* print out info about address space areas */
1637
    /* print out info about address space areas */
1608
    link_t *cur;
1638
    link_t *cur;
1609
    for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1639
    for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1610
        btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1640
        btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
1611
       
1641
       
1612
        int i;
1642
        int i;
1613
        for (i = 0; i < node->keys; i++) {
1643
        for (i = 0; i < node->keys; i++) {
1614
            as_area_t *area = node->value[i];
1644
            as_area_t *area = node->value[i];
1615
       
1645
       
1616
            mutex_lock(&area->lock);
1646
            mutex_lock(&area->lock);
1617
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1647
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1618
                area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1648
                area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE);
1619
            mutex_unlock(&area->lock);
1649
            mutex_unlock(&area->lock);
1620
        }
1650
        }
1621
    }
1651
    }
1622
   
1652
   
1623
    mutex_unlock(&as->lock);
1653
    mutex_unlock(&as->lock);
1624
    interrupts_restore(ipl);
1654
    interrupts_restore(ipl);
1625
}
1655
}
1626
 
1656
 
1627
/** @}
1657
/** @}
1628
 */
1658
 */
1629
 
1659