Subversion Repositories HelenOS

Rev

Rev 2106 | Rev 2126 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2106 Rev 2125
1
/*
1
/*
2
 * Copyright (c) 2001-2006 Jakub Jermar
2
 * Copyright (c) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericmm
29
/** @addtogroup genericmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Address space related functions.
35
 * @brief   Address space related functions.
36
 *
36
 *
37
 * This file contains address space manipulation functions.
37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
39
 * Virtual Address Translation (VAT) subsystem.
40
 *
40
 *
41
 * Functionality provided by this file allows one to
41
 * Functionality provided by this file allows one to
42
 * create address spaces and create, resize and share
42
 * create address spaces and create, resize and share
43
 * address space areas.
43
 * address space areas.
44
 *
44
 *
45
 * @see page.c
45
 * @see page.c
46
 *
46
 *
47
 */
47
 */
48
 
48
 
49
#include <mm/as.h>
49
#include <mm/as.h>
50
#include <arch/mm/as.h>
50
#include <arch/mm/as.h>
51
#include <mm/page.h>
51
#include <mm/page.h>
52
#include <mm/frame.h>
52
#include <mm/frame.h>
53
#include <mm/slab.h>
53
#include <mm/slab.h>
54
#include <mm/tlb.h>
54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
56
#include <genarch/mm/page_pt.h>
57
#include <genarch/mm/page_ht.h>
57
#include <genarch/mm/page_ht.h>
58
#include <mm/asid.h>
58
#include <mm/asid.h>
59
#include <arch/mm/asid.h>
59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
60
#include <synch/spinlock.h>
61
#include <synch/mutex.h>
61
#include <synch/mutex.h>
62
#include <adt/list.h>
62
#include <adt/list.h>
63
#include <adt/btree.h>
63
#include <adt/btree.h>
64
#include <proc/task.h>
64
#include <proc/task.h>
65
#include <proc/thread.h>
65
#include <proc/thread.h>
66
#include <arch/asm.h>
66
#include <arch/asm.h>
67
#include <panic.h>
67
#include <panic.h>
68
#include <debug.h>
68
#include <debug.h>
69
#include <print.h>
69
#include <print.h>
70
#include <memstr.h>
70
#include <memstr.h>
71
#include <macros.h>
71
#include <macros.h>
72
#include <arch.h>
72
#include <arch.h>
73
#include <errno.h>
73
#include <errno.h>
74
#include <config.h>
74
#include <config.h>
75
#include <align.h>
75
#include <align.h>
76
#include <arch/types.h>
76
#include <arch/types.h>
77
#include <syscall/copy.h>
77
#include <syscall/copy.h>
78
#include <arch/interrupt.h>
78
#include <arch/interrupt.h>
79
 
79
 
80
#ifdef CONFIG_VIRT_IDX_DCACHE
80
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#include <arch/mm/cache.h>
81
#include <arch/mm/cache.h>
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
 
83
 
-
 
84
#ifndef __OBJC__
84
/**
85
/**
85
 * Each architecture decides what functions will be used to carry out
86
 * Each architecture decides what functions will be used to carry out
86
 * address space operations such as creating or locking page tables.
87
 * address space operations such as creating or locking page tables.
87
 */
88
 */
88
as_operations_t *as_operations = NULL;
89
as_operations_t *as_operations = NULL;
-
 
90
#endif
89
 
91
 
90
/**
92
/**
91
 * Slab for as_t objects.
93
 * Slab for as_t objects.
92
 */
94
 */
93
static slab_cache_t *as_slab;
95
static slab_cache_t *as_slab;
94
 
96
 
95
/**
97
/**
96
 * This lock protects inactive_as_with_asid_head list. It must be acquired
98
 * This lock protects inactive_as_with_asid_head list. It must be acquired
97
 * before as_t mutex.
99
 * before as_t mutex.
98
 */
100
 */
99
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
101
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
100
 
102
 
101
/**
103
/**
102
 * This list contains address spaces that are not active on any
104
 * This list contains address spaces that are not active on any
103
 * processor and that have valid ASID.
105
 * processor and that have valid ASID.
104
 */
106
 */
105
LIST_INITIALIZE(inactive_as_with_asid_head);
107
LIST_INITIALIZE(inactive_as_with_asid_head);
106
 
108
 
107
/** Kernel address space. */
109
/** Kernel address space. */
108
as_t *AS_KERNEL = NULL;
110
as_t *AS_KERNEL = NULL;
109
 
111
 
110
static int area_flags_to_page_flags(int aflags);
112
static int area_flags_to_page_flags(int aflags);
111
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
113
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
112
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
114
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
113
    as_area_t *avoid_area);
115
    as_area_t *avoid_area);
114
static void sh_info_remove_reference(share_info_t *sh_info);
116
static void sh_info_remove_reference(share_info_t *sh_info);
115
 
117
 
116
static int as_constructor(void *obj, int flags)
118
static int as_constructor(void *obj, int flags)
117
{
119
{
118
    as_t *as = (as_t *) obj;
120
    as_t *as = (as_t *) obj;
119
    int rc;
121
    int rc;
120
 
122
 
121
    link_initialize(&as->inactive_as_with_asid_link);
123
    link_initialize(&as->inactive_as_with_asid_link);
122
    mutex_initialize(&as->lock);   
124
    mutex_initialize(&as->lock);   
123
   
125
   
124
    rc = as_constructor_arch(as, flags);
126
    rc = as_constructor_arch(as, flags);
125
   
127
   
126
    return rc;
128
    return rc;
127
}
129
}
128
 
130
 
129
static int as_destructor(void *obj)
131
static int as_destructor(void *obj)
130
{
132
{
131
    as_t *as = (as_t *) obj;
133
    as_t *as = (as_t *) obj;
132
 
134
 
133
    return as_destructor_arch(as);
135
    return as_destructor_arch(as);
134
}
136
}
135
 
137
 
136
/** Initialize address space subsystem. */
138
/** Initialize address space subsystem. */
137
void as_init(void)
139
void as_init(void)
138
{
140
{
139
    as_arch_init();
141
    as_arch_init();
140
   
142
   
141
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
143
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
142
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
144
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
143
   
145
   
144
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
146
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
145
    if (!AS_KERNEL)
147
    if (!AS_KERNEL)
146
        panic("can't create kernel address space\n");
148
        panic("can't create kernel address space\n");
147
   
149
   
148
}
150
}
149
 
151
 
150
/** Create address space.
152
/** Create address space.
151
 *
153
 *
152
 * @param flags Flags that influence way in wich the address space is created.
154
 * @param flags Flags that influence way in wich the address space is created.
153
 */
155
 */
154
as_t *as_create(int flags)
156
as_t *as_create(int flags)
155
{
157
{
156
    as_t *as;
158
    as_t *as;
157
 
159
 
158
    as = (as_t *) slab_alloc(as_slab, 0);
160
    as = (as_t *) slab_alloc(as_slab, 0);
159
    (void) as_create_arch(as, 0);
161
    (void) as_create_arch(as, 0);
160
   
162
   
161
    btree_create(&as->as_area_btree);
163
    btree_create(&as->as_area_btree);
162
   
164
   
163
    if (flags & FLAG_AS_KERNEL)
165
    if (flags & FLAG_AS_KERNEL)
164
        as->asid = ASID_KERNEL;
166
        as->asid = ASID_KERNEL;
165
    else
167
    else
166
        as->asid = ASID_INVALID;
168
        as->asid = ASID_INVALID;
167
   
169
   
168
    as->refcount = 0;
170
    as->refcount = 0;
169
    as->cpu_refcount = 0;
171
    as->cpu_refcount = 0;
170
#ifdef AS_PAGE_TABLE
172
#ifdef AS_PAGE_TABLE
171
    as->genarch.page_table = page_table_create(flags);
173
    as->genarch.page_table = page_table_create(flags);
172
#else
174
#else
173
    page_table_create(flags);
175
    page_table_create(flags);
174
#endif
176
#endif
175
 
177
 
176
    return as;
178
    return as;
177
}
179
}
178
 
180
 
179
/** Destroy adress space.
181
/** Destroy adress space.
180
 *
182
 *
181
 * When there are no tasks referencing this address space (i.e. its refcount is
183
 * When there are no tasks referencing this address space (i.e. its refcount is
182
 * zero), the address space can be destroyed.
184
 * zero), the address space can be destroyed.
183
 */
185
 */
184
void as_destroy(as_t *as)
186
void as_destroy(as_t *as)
185
{
187
{
186
    ipl_t ipl;
188
    ipl_t ipl;
187
    bool cond;
189
    bool cond;
188
 
190
 
189
    ASSERT(as->refcount == 0);
191
    ASSERT(as->refcount == 0);
190
   
192
   
191
    /*
193
    /*
192
     * Since there is no reference to this area,
194
     * Since there is no reference to this area,
193
     * it is safe not to lock its mutex.
195
     * it is safe not to lock its mutex.
194
     */
196
     */
195
    ipl = interrupts_disable();
197
    ipl = interrupts_disable();
196
    spinlock_lock(&inactive_as_with_asid_lock);
198
    spinlock_lock(&inactive_as_with_asid_lock);
197
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
199
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
198
        if (as != AS && as->cpu_refcount == 0)
200
        if (as != AS && as->cpu_refcount == 0)
199
            list_remove(&as->inactive_as_with_asid_link);
201
            list_remove(&as->inactive_as_with_asid_link);
200
        asid_put(as->asid);
202
        asid_put(as->asid);
201
    }
203
    }
202
    spinlock_unlock(&inactive_as_with_asid_lock);
204
    spinlock_unlock(&inactive_as_with_asid_lock);
203
 
205
 
204
    /*
206
    /*
205
     * Destroy address space areas of the address space.
207
     * Destroy address space areas of the address space.
206
     * The B+tree must be walked carefully because it is
208
     * The B+tree must be walked carefully because it is
207
     * also being destroyed.
209
     * also being destroyed.
208
     */
210
     */
209
    for (cond = true; cond; ) {
211
    for (cond = true; cond; ) {
210
        btree_node_t *node;
212
        btree_node_t *node;
211
 
213
 
212
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
214
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
213
        node = list_get_instance(as->as_area_btree.leaf_head.next,
215
        node = list_get_instance(as->as_area_btree.leaf_head.next,
214
            btree_node_t, leaf_link);
216
            btree_node_t, leaf_link);
215
 
217
 
216
        if ((cond = node->keys)) {
218
        if ((cond = node->keys)) {
217
            as_area_destroy(as, node->key[0]);
219
            as_area_destroy(as, node->key[0]);
218
        }
220
        }
219
    }
221
    }
220
 
222
 
221
    btree_destroy(&as->as_area_btree);
223
    btree_destroy(&as->as_area_btree);
222
#ifdef AS_PAGE_TABLE
224
#ifdef AS_PAGE_TABLE
223
    page_table_destroy(as->genarch.page_table);
225
    page_table_destroy(as->genarch.page_table);
224
#else
226
#else
225
    page_table_destroy(NULL);
227
    page_table_destroy(NULL);
226
#endif
228
#endif
227
 
229
 
228
    interrupts_restore(ipl);
230
    interrupts_restore(ipl);
229
   
231
   
230
    slab_free(as_slab, as);
232
    slab_free(as_slab, as);
231
}
233
}
232
 
234
 
233
/** Create address space area of common attributes.
235
/** Create address space area of common attributes.
234
 *
236
 *
235
 * The created address space area is added to the target address space.
237
 * The created address space area is added to the target address space.
236
 *
238
 *
237
 * @param as Target address space.
239
 * @param as Target address space.
238
 * @param flags Flags of the area memory.
240
 * @param flags Flags of the area memory.
239
 * @param size Size of area.
241
 * @param size Size of area.
240
 * @param base Base address of area.
242
 * @param base Base address of area.
241
 * @param attrs Attributes of the area.
243
 * @param attrs Attributes of the area.
242
 * @param backend Address space area backend. NULL if no backend is used.
244
 * @param backend Address space area backend. NULL if no backend is used.
243
 * @param backend_data NULL or a pointer to an array holding two void *.
245
 * @param backend_data NULL or a pointer to an array holding two void *.
244
 *
246
 *
245
 * @return Address space area on success or NULL on failure.
247
 * @return Address space area on success or NULL on failure.
246
 */
248
 */
247
as_area_t *
249
as_area_t *
248
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
250
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
249
           mem_backend_t *backend, mem_backend_data_t *backend_data)
251
           mem_backend_t *backend, mem_backend_data_t *backend_data)
250
{
252
{
251
    ipl_t ipl;
253
    ipl_t ipl;
252
    as_area_t *a;
254
    as_area_t *a;
253
   
255
   
254
    if (base % PAGE_SIZE)
256
    if (base % PAGE_SIZE)
255
        return NULL;
257
        return NULL;
256
 
258
 
257
    if (!size)
259
    if (!size)
258
        return NULL;
260
        return NULL;
259
 
261
 
260
    /* Writeable executable areas are not supported. */
262
    /* Writeable executable areas are not supported. */
261
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
263
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
262
        return NULL;
264
        return NULL;
263
   
265
   
264
    ipl = interrupts_disable();
266
    ipl = interrupts_disable();
265
    mutex_lock(&as->lock);
267
    mutex_lock(&as->lock);
266
   
268
   
267
    if (!check_area_conflicts(as, base, size, NULL)) {
269
    if (!check_area_conflicts(as, base, size, NULL)) {
268
        mutex_unlock(&as->lock);
270
        mutex_unlock(&as->lock);
269
        interrupts_restore(ipl);
271
        interrupts_restore(ipl);
270
        return NULL;
272
        return NULL;
271
    }
273
    }
272
   
274
   
273
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
275
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
274
 
276
 
275
    mutex_initialize(&a->lock);
277
    mutex_initialize(&a->lock);
276
   
278
   
277
    a->as = as;
279
    a->as = as;
278
    a->flags = flags;
280
    a->flags = flags;
279
    a->attributes = attrs;
281
    a->attributes = attrs;
280
    a->pages = SIZE2FRAMES(size);
282
    a->pages = SIZE2FRAMES(size);
281
    a->base = base;
283
    a->base = base;
282
    a->sh_info = NULL;
284
    a->sh_info = NULL;
283
    a->backend = backend;
285
    a->backend = backend;
284
    if (backend_data)
286
    if (backend_data)
285
        a->backend_data = *backend_data;
287
        a->backend_data = *backend_data;
286
    else
288
    else
287
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
289
        memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
288
            0);
290
            0);
289
 
291
 
290
    btree_create(&a->used_space);
292
    btree_create(&a->used_space);
291
   
293
   
292
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
294
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
293
 
295
 
294
    mutex_unlock(&as->lock);
296
    mutex_unlock(&as->lock);
295
    interrupts_restore(ipl);
297
    interrupts_restore(ipl);
296
 
298
 
297
    return a;
299
    return a;
298
}
300
}
299
 
301
 
300
/** Find address space area and change it.
302
/** Find address space area and change it.
301
 *
303
 *
302
 * @param as Address space.
304
 * @param as Address space.
303
 * @param address Virtual address belonging to the area to be changed. Must be
305
 * @param address Virtual address belonging to the area to be changed. Must be
304
 *     page-aligned.
306
 *     page-aligned.
305
 * @param size New size of the virtual memory block starting at address.
307
 * @param size New size of the virtual memory block starting at address.
306
 * @param flags Flags influencing the remap operation. Currently unused.
308
 * @param flags Flags influencing the remap operation. Currently unused.
307
 *
309
 *
308
 * @return Zero on success or a value from @ref errno.h otherwise.
310
 * @return Zero on success or a value from @ref errno.h otherwise.
309
 */
311
 */
310
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
312
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
311
{
313
{
312
    as_area_t *area;
314
    as_area_t *area;
313
    ipl_t ipl;
315
    ipl_t ipl;
314
    size_t pages;
316
    size_t pages;
315
   
317
   
316
    ipl = interrupts_disable();
318
    ipl = interrupts_disable();
317
    mutex_lock(&as->lock);
319
    mutex_lock(&as->lock);
318
   
320
   
319
    /*
321
    /*
320
     * Locate the area.
322
     * Locate the area.
321
     */
323
     */
322
    area = find_area_and_lock(as, address);
324
    area = find_area_and_lock(as, address);
323
    if (!area) {
325
    if (!area) {
324
        mutex_unlock(&as->lock);
326
        mutex_unlock(&as->lock);
325
        interrupts_restore(ipl);
327
        interrupts_restore(ipl);
326
        return ENOENT;
328
        return ENOENT;
327
    }
329
    }
328
 
330
 
329
    if (area->backend == &phys_backend) {
331
    if (area->backend == &phys_backend) {
330
        /*
332
        /*
331
         * Remapping of address space areas associated
333
         * Remapping of address space areas associated
332
         * with memory mapped devices is not supported.
334
         * with memory mapped devices is not supported.
333
         */
335
         */
334
        mutex_unlock(&area->lock);
336
        mutex_unlock(&area->lock);
335
        mutex_unlock(&as->lock);
337
        mutex_unlock(&as->lock);
336
        interrupts_restore(ipl);
338
        interrupts_restore(ipl);
337
        return ENOTSUP;
339
        return ENOTSUP;
338
    }
340
    }
339
    if (area->sh_info) {
341
    if (area->sh_info) {
340
        /*
342
        /*
341
         * Remapping of shared address space areas
343
         * Remapping of shared address space areas
342
         * is not supported.
344
         * is not supported.
343
         */
345
         */
344
        mutex_unlock(&area->lock);
346
        mutex_unlock(&area->lock);
345
        mutex_unlock(&as->lock);
347
        mutex_unlock(&as->lock);
346
        interrupts_restore(ipl);
348
        interrupts_restore(ipl);
347
        return ENOTSUP;
349
        return ENOTSUP;
348
    }
350
    }
349
 
351
 
350
    pages = SIZE2FRAMES((address - area->base) + size);
352
    pages = SIZE2FRAMES((address - area->base) + size);
351
    if (!pages) {
353
    if (!pages) {
352
        /*
354
        /*
353
         * Zero size address space areas are not allowed.
355
         * Zero size address space areas are not allowed.
354
         */
356
         */
355
        mutex_unlock(&area->lock);
357
        mutex_unlock(&area->lock);
356
        mutex_unlock(&as->lock);
358
        mutex_unlock(&as->lock);
357
        interrupts_restore(ipl);
359
        interrupts_restore(ipl);
358
        return EPERM;
360
        return EPERM;
359
    }
361
    }
360
   
362
   
361
    if (pages < area->pages) {
363
    if (pages < area->pages) {
362
        bool cond;
364
        bool cond;
363
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
365
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
364
 
366
 
365
        /*
367
        /*
366
         * Shrinking the area.
368
         * Shrinking the area.
367
         * No need to check for overlaps.
369
         * No need to check for overlaps.
368
         */
370
         */
369
 
371
 
370
        /*
372
        /*
371
         * Start TLB shootdown sequence.
373
         * Start TLB shootdown sequence.
372
         */
374
         */
373
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
375
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
374
            pages * PAGE_SIZE, area->pages - pages);
376
            pages * PAGE_SIZE, area->pages - pages);
375
 
377
 
376
        /*
378
        /*
377
         * Remove frames belonging to used space starting from
379
         * Remove frames belonging to used space starting from
378
         * the highest addresses downwards until an overlap with
380
         * the highest addresses downwards until an overlap with
379
         * the resized address space area is found. Note that this
381
         * the resized address space area is found. Note that this
380
         * is also the right way to remove part of the used_space
382
         * is also the right way to remove part of the used_space
381
         * B+tree leaf list.
383
         * B+tree leaf list.
382
         */    
384
         */    
383
        for (cond = true; cond;) {
385
        for (cond = true; cond;) {
384
            btree_node_t *node;
386
            btree_node_t *node;
385
       
387
       
386
            ASSERT(!list_empty(&area->used_space.leaf_head));
388
            ASSERT(!list_empty(&area->used_space.leaf_head));
387
            node =
389
            node =
388
                list_get_instance(area->used_space.leaf_head.prev,
390
                list_get_instance(area->used_space.leaf_head.prev,
389
                btree_node_t, leaf_link);
391
                btree_node_t, leaf_link);
390
            if ((cond = (bool) node->keys)) {
392
            if ((cond = (bool) node->keys)) {
391
                uintptr_t b = node->key[node->keys - 1];
393
                uintptr_t b = node->key[node->keys - 1];
392
                count_t c =
394
                count_t c =
393
                    (count_t) node->value[node->keys - 1];
395
                    (count_t) node->value[node->keys - 1];
394
                int i = 0;
396
                int i = 0;
395
           
397
           
396
                if (overlaps(b, c * PAGE_SIZE, area->base,
398
                if (overlaps(b, c * PAGE_SIZE, area->base,
397
                    pages*PAGE_SIZE)) {
399
                    pages*PAGE_SIZE)) {
398
                   
400
                   
399
                    if (b + c * PAGE_SIZE <= start_free) {
401
                    if (b + c * PAGE_SIZE <= start_free) {
400
                        /*
402
                        /*
401
                         * The whole interval fits
403
                         * The whole interval fits
402
                         * completely in the resized
404
                         * completely in the resized
403
                         * address space area.
405
                         * address space area.
404
                         */
406
                         */
405
                        break;
407
                        break;
406
                    }
408
                    }
407
       
409
       
408
                    /*
410
                    /*
409
                     * Part of the interval corresponding
411
                     * Part of the interval corresponding
410
                     * to b and c overlaps with the resized
412
                     * to b and c overlaps with the resized
411
                     * address space area.
413
                     * address space area.
412
                     */
414
                     */
413
       
415
       
414
                    cond = false;   /* we are almost done */
416
                    cond = false;   /* we are almost done */
415
                    i = (start_free - b) >> PAGE_WIDTH;
417
                    i = (start_free - b) >> PAGE_WIDTH;
416
                    if (!used_space_remove(area, start_free,
418
                    if (!used_space_remove(area, start_free,
417
                        c - i))
419
                        c - i))
418
                        panic("Could not remove used "
420
                        panic("Could not remove used "
419
                            "space.\n");
421
                            "space.\n");
420
                } else {
422
                } else {
421
                    /*
423
                    /*
422
                     * The interval of used space can be
424
                     * The interval of used space can be
423
                     * completely removed.
425
                     * completely removed.
424
                     */
426
                     */
425
                    if (!used_space_remove(area, b, c))
427
                    if (!used_space_remove(area, b, c))
426
                        panic("Could not remove used "
428
                        panic("Could not remove used "
427
                            "space.\n");
429
                            "space.\n");
428
                }
430
                }
429
           
431
           
430
                for (; i < c; i++) {
432
                for (; i < c; i++) {
431
                    pte_t *pte;
433
                    pte_t *pte;
432
           
434
           
433
                    page_table_lock(as, false);
435
                    page_table_lock(as, false);
434
                    pte = page_mapping_find(as, b +
436
                    pte = page_mapping_find(as, b +
435
                        i * PAGE_SIZE);
437
                        i * PAGE_SIZE);
436
                    ASSERT(pte && PTE_VALID(pte) &&
438
                    ASSERT(pte && PTE_VALID(pte) &&
437
                        PTE_PRESENT(pte));
439
                        PTE_PRESENT(pte));
438
                    if (area->backend &&
440
                    if (area->backend &&
439
                        area->backend->frame_free) {
441
                        area->backend->frame_free) {
440
                        area->backend->frame_free(area,
442
                        area->backend->frame_free(area,
441
                            b + i * PAGE_SIZE,
443
                            b + i * PAGE_SIZE,
442
                            PTE_GET_FRAME(pte));
444
                            PTE_GET_FRAME(pte));
443
                    }
445
                    }
444
                    page_mapping_remove(as, b +
446
                    page_mapping_remove(as, b +
445
                        i * PAGE_SIZE);
447
                        i * PAGE_SIZE);
446
                    page_table_unlock(as, false);
448
                    page_table_unlock(as, false);
447
                }
449
                }
448
            }
450
            }
449
        }
451
        }
450
 
452
 
451
        /*
453
        /*
452
         * Finish TLB shootdown sequence.
454
         * Finish TLB shootdown sequence.
453
         */
455
         */
454
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
456
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
455
            area->pages - pages);
457
            area->pages - pages);
456
        tlb_shootdown_finalize();
458
        tlb_shootdown_finalize();
457
       
459
       
458
        /*
460
        /*
459
         * Invalidate software translation caches (e.g. TSB on sparc64).
461
         * Invalidate software translation caches (e.g. TSB on sparc64).
460
         */
462
         */
461
        as_invalidate_translation_cache(as, area->base +
463
        as_invalidate_translation_cache(as, area->base +
462
            pages * PAGE_SIZE, area->pages - pages);
464
            pages * PAGE_SIZE, area->pages - pages);
463
    } else {
465
    } else {
464
        /*
466
        /*
465
         * Growing the area.
467
         * Growing the area.
466
         * Check for overlaps with other address space areas.
468
         * Check for overlaps with other address space areas.
467
         */
469
         */
468
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
470
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
469
            area)) {
471
            area)) {
470
            mutex_unlock(&area->lock);
472
            mutex_unlock(&area->lock);
471
            mutex_unlock(&as->lock);       
473
            mutex_unlock(&as->lock);       
472
            interrupts_restore(ipl);
474
            interrupts_restore(ipl);
473
            return EADDRNOTAVAIL;
475
            return EADDRNOTAVAIL;
474
        }
476
        }
475
    }
477
    }
476
 
478
 
477
    area->pages = pages;
479
    area->pages = pages;
478
   
480
   
479
    mutex_unlock(&area->lock);
481
    mutex_unlock(&area->lock);
480
    mutex_unlock(&as->lock);
482
    mutex_unlock(&as->lock);
481
    interrupts_restore(ipl);
483
    interrupts_restore(ipl);
482
 
484
 
483
    return 0;
485
    return 0;
484
}
486
}
485
 
487
 
486
/** Destroy address space area.
488
/** Destroy address space area.
487
 *
489
 *
488
 * @param as Address space.
490
 * @param as Address space.
489
 * @param address Address withing the area to be deleted.
491
 * @param address Address withing the area to be deleted.
490
 *
492
 *
491
 * @return Zero on success or a value from @ref errno.h on failure.
493
 * @return Zero on success or a value from @ref errno.h on failure.
492
 */
494
 */
493
int as_area_destroy(as_t *as, uintptr_t address)
495
int as_area_destroy(as_t *as, uintptr_t address)
494
{
496
{
495
    as_area_t *area;
497
    as_area_t *area;
496
    uintptr_t base;
498
    uintptr_t base;
497
    link_t *cur;
499
    link_t *cur;
498
    ipl_t ipl;
500
    ipl_t ipl;
499
 
501
 
500
    ipl = interrupts_disable();
502
    ipl = interrupts_disable();
501
    mutex_lock(&as->lock);
503
    mutex_lock(&as->lock);
502
 
504
 
503
    area = find_area_and_lock(as, address);
505
    area = find_area_and_lock(as, address);
504
    if (!area) {
506
    if (!area) {
505
        mutex_unlock(&as->lock);
507
        mutex_unlock(&as->lock);
506
        interrupts_restore(ipl);
508
        interrupts_restore(ipl);
507
        return ENOENT;
509
        return ENOENT;
508
    }
510
    }
509
 
511
 
510
    base = area->base;
512
    base = area->base;
511
 
513
 
512
    /*
514
    /*
513
     * Start TLB shootdown sequence.
515
     * Start TLB shootdown sequence.
514
     */
516
     */
515
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
517
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
516
 
518
 
517
    /*
519
    /*
518
     * Visit only the pages mapped by used_space B+tree.
520
     * Visit only the pages mapped by used_space B+tree.
519
     */
521
     */
520
    for (cur = area->used_space.leaf_head.next;
522
    for (cur = area->used_space.leaf_head.next;
521
        cur != &area->used_space.leaf_head; cur = cur->next) {
523
        cur != &area->used_space.leaf_head; cur = cur->next) {
522
        btree_node_t *node;
524
        btree_node_t *node;
523
        int i;
525
        int i;
524
       
526
       
525
        node = list_get_instance(cur, btree_node_t, leaf_link);
527
        node = list_get_instance(cur, btree_node_t, leaf_link);
526
        for (i = 0; i < node->keys; i++) {
528
        for (i = 0; i < node->keys; i++) {
527
            uintptr_t b = node->key[i];
529
            uintptr_t b = node->key[i];
528
            count_t j;
530
            count_t j;
529
            pte_t *pte;
531
            pte_t *pte;
530
           
532
           
531
            for (j = 0; j < (count_t) node->value[i]; j++) {
533
            for (j = 0; j < (count_t) node->value[i]; j++) {
532
                page_table_lock(as, false);
534
                page_table_lock(as, false);
533
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
535
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
534
                ASSERT(pte && PTE_VALID(pte) &&
536
                ASSERT(pte && PTE_VALID(pte) &&
535
                    PTE_PRESENT(pte));
537
                    PTE_PRESENT(pte));
536
                if (area->backend &&
538
                if (area->backend &&
537
                    area->backend->frame_free) {
539
                    area->backend->frame_free) {
538
                    area->backend->frame_free(area, b +
540
                    area->backend->frame_free(area, b +
539
                    j * PAGE_SIZE, PTE_GET_FRAME(pte));
541
                    j * PAGE_SIZE, PTE_GET_FRAME(pte));
540
                }
542
                }
541
                page_mapping_remove(as, b + j * PAGE_SIZE);            
543
                page_mapping_remove(as, b + j * PAGE_SIZE);            
542
                page_table_unlock(as, false);
544
                page_table_unlock(as, false);
543
            }
545
            }
544
        }
546
        }
545
    }
547
    }
546
 
548
 
547
    /*
549
    /*
548
     * Finish TLB shootdown sequence.
550
     * Finish TLB shootdown sequence.
549
     */
551
     */
550
    tlb_invalidate_pages(as->asid, area->base, area->pages);
552
    tlb_invalidate_pages(as->asid, area->base, area->pages);
551
    tlb_shootdown_finalize();
553
    tlb_shootdown_finalize();
552
   
554
   
553
    /*
555
    /*
554
     * Invalidate potential software translation caches (e.g. TSB on
556
     * Invalidate potential software translation caches (e.g. TSB on
555
     * sparc64).
557
     * sparc64).
556
     */
558
     */
557
    as_invalidate_translation_cache(as, area->base, area->pages);
559
    as_invalidate_translation_cache(as, area->base, area->pages);
558
   
560
   
559
    btree_destroy(&area->used_space);
561
    btree_destroy(&area->used_space);
560
 
562
 
561
    area->attributes |= AS_AREA_ATTR_PARTIAL;
563
    area->attributes |= AS_AREA_ATTR_PARTIAL;
562
   
564
   
563
    if (area->sh_info)
565
    if (area->sh_info)
564
        sh_info_remove_reference(area->sh_info);
566
        sh_info_remove_reference(area->sh_info);
565
       
567
       
566
    mutex_unlock(&area->lock);
568
    mutex_unlock(&area->lock);
567
 
569
 
568
    /*
570
    /*
569
     * Remove the empty area from address space.
571
     * Remove the empty area from address space.
570
     */
572
     */
571
    btree_remove(&as->as_area_btree, base, NULL);
573
    btree_remove(&as->as_area_btree, base, NULL);
572
   
574
   
573
    free(area);
575
    free(area);
574
   
576
   
575
    mutex_unlock(&as->lock);
577
    mutex_unlock(&as->lock);
576
    interrupts_restore(ipl);
578
    interrupts_restore(ipl);
577
    return 0;
579
    return 0;
578
}
580
}
579
 
581
 
580
/** Share address space area with another or the same address space.
582
/** Share address space area with another or the same address space.
581
 *
583
 *
582
 * Address space area mapping is shared with a new address space area.
584
 * Address space area mapping is shared with a new address space area.
583
 * If the source address space area has not been shared so far,
585
 * If the source address space area has not been shared so far,
584
 * a new sh_info is created. The new address space area simply gets the
586
 * a new sh_info is created. The new address space area simply gets the
585
 * sh_info of the source area. The process of duplicating the
587
 * sh_info of the source area. The process of duplicating the
586
 * mapping is done through the backend share function.
588
 * mapping is done through the backend share function.
587
 *
589
 *
588
 * @param src_as Pointer to source address space.
590
 * @param src_as Pointer to source address space.
589
 * @param src_base Base address of the source address space area.
591
 * @param src_base Base address of the source address space area.
590
 * @param acc_size Expected size of the source area.
592
 * @param acc_size Expected size of the source area.
591
 * @param dst_as Pointer to destination address space.
593
 * @param dst_as Pointer to destination address space.
592
 * @param dst_base Target base address.
594
 * @param dst_base Target base address.
593
 * @param dst_flags_mask Destination address space area flags mask.
595
 * @param dst_flags_mask Destination address space area flags mask.
594
 *
596
 *
595
 * @return Zero on success or ENOENT if there is no such task or if there is no
597
 * @return Zero on success or ENOENT if there is no such task or if there is no
596
 * such address space area, EPERM if there was a problem in accepting the area
598
 * such address space area, EPERM if there was a problem in accepting the area
597
 * or ENOMEM if there was a problem in allocating destination address space
599
 * or ENOMEM if there was a problem in allocating destination address space
598
 * area. ENOTSUP is returned if the address space area backend does not support
600
 * area. ENOTSUP is returned if the address space area backend does not support
599
 * sharing or if the kernel detects an attempt to create an illegal address
601
 * sharing or if the kernel detects an attempt to create an illegal address
600
 * alias.
602
 * alias.
601
 */
603
 */
602
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
604
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
603
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
605
          as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
604
{
606
{
605
    ipl_t ipl;
607
    ipl_t ipl;
606
    int src_flags;
608
    int src_flags;
607
    size_t src_size;
609
    size_t src_size;
608
    as_area_t *src_area, *dst_area;
610
    as_area_t *src_area, *dst_area;
609
    share_info_t *sh_info;
611
    share_info_t *sh_info;
610
    mem_backend_t *src_backend;
612
    mem_backend_t *src_backend;
611
    mem_backend_data_t src_backend_data;
613
    mem_backend_data_t src_backend_data;
612
   
614
   
613
    ipl = interrupts_disable();
615
    ipl = interrupts_disable();
614
    mutex_lock(&src_as->lock);
616
    mutex_lock(&src_as->lock);
615
    src_area = find_area_and_lock(src_as, src_base);
617
    src_area = find_area_and_lock(src_as, src_base);
616
    if (!src_area) {
618
    if (!src_area) {
617
        /*
619
        /*
618
         * Could not find the source address space area.
620
         * Could not find the source address space area.
619
         */
621
         */
620
        mutex_unlock(&src_as->lock);
622
        mutex_unlock(&src_as->lock);
621
        interrupts_restore(ipl);
623
        interrupts_restore(ipl);
622
        return ENOENT;
624
        return ENOENT;
623
    }
625
    }
624
 
626
 
625
    if (!src_area->backend || !src_area->backend->share) {
627
    if (!src_area->backend || !src_area->backend->share) {
626
        /*
628
        /*
627
         * There is no backend or the backend does not
629
         * There is no backend or the backend does not
628
         * know how to share the area.
630
         * know how to share the area.
629
         */
631
         */
630
        mutex_unlock(&src_area->lock);
632
        mutex_unlock(&src_area->lock);
631
        mutex_unlock(&src_as->lock);
633
        mutex_unlock(&src_as->lock);
632
        interrupts_restore(ipl);
634
        interrupts_restore(ipl);
633
        return ENOTSUP;
635
        return ENOTSUP;
634
    }
636
    }
635
   
637
   
636
    src_size = src_area->pages * PAGE_SIZE;
638
    src_size = src_area->pages * PAGE_SIZE;
637
    src_flags = src_area->flags;
639
    src_flags = src_area->flags;
638
    src_backend = src_area->backend;
640
    src_backend = src_area->backend;
639
    src_backend_data = src_area->backend_data;
641
    src_backend_data = src_area->backend_data;
640
 
642
 
641
    /* Share the cacheable flag from the original mapping */
643
    /* Share the cacheable flag from the original mapping */
642
    if (src_flags & AS_AREA_CACHEABLE)
644
    if (src_flags & AS_AREA_CACHEABLE)
643
        dst_flags_mask |= AS_AREA_CACHEABLE;
645
        dst_flags_mask |= AS_AREA_CACHEABLE;
644
 
646
 
645
    if (src_size != acc_size ||
647
    if (src_size != acc_size ||
646
        (src_flags & dst_flags_mask) != dst_flags_mask) {
648
        (src_flags & dst_flags_mask) != dst_flags_mask) {
647
        mutex_unlock(&src_area->lock);
649
        mutex_unlock(&src_area->lock);
648
        mutex_unlock(&src_as->lock);
650
        mutex_unlock(&src_as->lock);
649
        interrupts_restore(ipl);
651
        interrupts_restore(ipl);
650
        return EPERM;
652
        return EPERM;
651
    }
653
    }
652
 
654
 
653
#ifdef CONFIG_VIRT_IDX_DCACHE
655
#ifdef CONFIG_VIRT_IDX_DCACHE
654
    if (!(dst_flags_mask & AS_AREA_EXEC)) {
656
    if (!(dst_flags_mask & AS_AREA_EXEC)) {
655
        if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
657
        if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
656
            /*
658
            /*
657
             * Refuse to create an illegal address alias.
659
             * Refuse to create an illegal address alias.
658
             */
660
             */
659
            mutex_unlock(&src_area->lock);
661
            mutex_unlock(&src_area->lock);
660
            mutex_unlock(&src_as->lock);
662
            mutex_unlock(&src_as->lock);
661
            interrupts_restore(ipl);
663
            interrupts_restore(ipl);
662
            return ENOTSUP;
664
            return ENOTSUP;
663
        }
665
        }
664
    }
666
    }
665
#endif /* CONFIG_VIRT_IDX_DCACHE */
667
#endif /* CONFIG_VIRT_IDX_DCACHE */
666
 
668
 
667
    /*
669
    /*
668
     * Now we are committed to sharing the area.
670
     * Now we are committed to sharing the area.
669
     * First, prepare the area for sharing.
671
     * First, prepare the area for sharing.
670
     * Then it will be safe to unlock it.
672
     * Then it will be safe to unlock it.
671
     */
673
     */
672
    sh_info = src_area->sh_info;
674
    sh_info = src_area->sh_info;
673
    if (!sh_info) {
675
    if (!sh_info) {
674
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
676
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
675
        mutex_initialize(&sh_info->lock);
677
        mutex_initialize(&sh_info->lock);
676
        sh_info->refcount = 2;
678
        sh_info->refcount = 2;
677
        btree_create(&sh_info->pagemap);
679
        btree_create(&sh_info->pagemap);
678
        src_area->sh_info = sh_info;
680
        src_area->sh_info = sh_info;
679
    } else {
681
    } else {
680
        mutex_lock(&sh_info->lock);
682
        mutex_lock(&sh_info->lock);
681
        sh_info->refcount++;
683
        sh_info->refcount++;
682
        mutex_unlock(&sh_info->lock);
684
        mutex_unlock(&sh_info->lock);
683
    }
685
    }
684
 
686
 
685
    src_area->backend->share(src_area);
687
    src_area->backend->share(src_area);
686
 
688
 
687
    mutex_unlock(&src_area->lock);
689
    mutex_unlock(&src_area->lock);
688
    mutex_unlock(&src_as->lock);
690
    mutex_unlock(&src_as->lock);
689
 
691
 
690
    /*
692
    /*
691
     * Create copy of the source address space area.
693
     * Create copy of the source address space area.
692
     * The destination area is created with AS_AREA_ATTR_PARTIAL
694
     * The destination area is created with AS_AREA_ATTR_PARTIAL
693
     * attribute set which prevents race condition with
695
     * attribute set which prevents race condition with
694
     * preliminary as_page_fault() calls.
696
     * preliminary as_page_fault() calls.
695
     * The flags of the source area are masked against dst_flags_mask
697
     * The flags of the source area are masked against dst_flags_mask
696
     * to support sharing in less privileged mode.
698
     * to support sharing in less privileged mode.
697
     */
699
     */
698
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
700
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
699
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
701
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
700
    if (!dst_area) {
702
    if (!dst_area) {
701
        /*
703
        /*
702
         * Destination address space area could not be created.
704
         * Destination address space area could not be created.
703
         */
705
         */
704
        sh_info_remove_reference(sh_info);
706
        sh_info_remove_reference(sh_info);
705
       
707
       
706
        interrupts_restore(ipl);
708
        interrupts_restore(ipl);
707
        return ENOMEM;
709
        return ENOMEM;
708
    }
710
    }
709
 
711
 
710
    /*
712
    /*
711
     * Now the destination address space area has been
713
     * Now the destination address space area has been
712
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
714
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
713
     * attribute and set the sh_info.
715
     * attribute and set the sh_info.
714
     */
716
     */
715
    mutex_lock(&dst_as->lock); 
717
    mutex_lock(&dst_as->lock); 
716
    mutex_lock(&dst_area->lock);
718
    mutex_lock(&dst_area->lock);
717
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
719
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
718
    dst_area->sh_info = sh_info;
720
    dst_area->sh_info = sh_info;
719
    mutex_unlock(&dst_area->lock);
721
    mutex_unlock(&dst_area->lock);
720
    mutex_unlock(&dst_as->lock);   
722
    mutex_unlock(&dst_as->lock);   
721
 
723
 
722
    interrupts_restore(ipl);
724
    interrupts_restore(ipl);
723
   
725
   
724
    return 0;
726
    return 0;
725
}
727
}
726
 
728
 
727
/** Check access mode for address space area.
729
/** Check access mode for address space area.
728
 *
730
 *
729
 * The address space area must be locked prior to this call.
731
 * The address space area must be locked prior to this call.
730
 *
732
 *
731
 * @param area Address space area.
733
 * @param area Address space area.
732
 * @param access Access mode.
734
 * @param access Access mode.
733
 *
735
 *
734
 * @return False if access violates area's permissions, true otherwise.
736
 * @return False if access violates area's permissions, true otherwise.
735
 */
737
 */
736
bool as_area_check_access(as_area_t *area, pf_access_t access)
738
bool as_area_check_access(as_area_t *area, pf_access_t access)
737
{
739
{
738
    int flagmap[] = {
740
    int flagmap[] = {
739
        [PF_ACCESS_READ] = AS_AREA_READ,
741
        [PF_ACCESS_READ] = AS_AREA_READ,
740
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
742
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
741
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
743
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
742
    };
744
    };
743
 
745
 
744
    if (!(area->flags & flagmap[access]))
746
    if (!(area->flags & flagmap[access]))
745
        return false;
747
        return false;
746
   
748
   
747
    return true;
749
    return true;
748
}
750
}
749
 
751
 
750
/** Handle page fault within the current address space.
752
/** Handle page fault within the current address space.
751
 *
753
 *
752
 * This is the high-level page fault handler. It decides
754
 * This is the high-level page fault handler. It decides
753
 * whether the page fault can be resolved by any backend
755
 * whether the page fault can be resolved by any backend
754
 * and if so, it invokes the backend to resolve the page
756
 * and if so, it invokes the backend to resolve the page
755
 * fault.
757
 * fault.
756
 *
758
 *
757
 * Interrupts are assumed disabled.
759
 * Interrupts are assumed disabled.
758
 *
760
 *
759
 * @param page Faulting page.
761
 * @param page Faulting page.
760
 * @param access Access mode that caused the fault (i.e. read/write/exec).
762
 * @param access Access mode that caused the fault (i.e. read/write/exec).
761
 * @param istate Pointer to interrupted state.
763
 * @param istate Pointer to interrupted state.
762
 *
764
 *
763
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
765
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
764
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
766
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
765
 */
767
 */
766
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
768
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
767
{
769
{
768
    pte_t *pte;
770
    pte_t *pte;
769
    as_area_t *area;
771
    as_area_t *area;
770
   
772
   
771
    if (!THREAD)
773
    if (!THREAD)
772
        return AS_PF_FAULT;
774
        return AS_PF_FAULT;
773
       
775
       
774
    ASSERT(AS);
776
    ASSERT(AS);
775
 
777
 
776
    mutex_lock(&AS->lock);
778
    mutex_lock(&AS->lock);
777
    area = find_area_and_lock(AS, page);   
779
    area = find_area_and_lock(AS, page);   
778
    if (!area) {
780
    if (!area) {
779
        /*
781
        /*
780
         * No area contained mapping for 'page'.
782
         * No area contained mapping for 'page'.
781
         * Signal page fault to low-level handler.
783
         * Signal page fault to low-level handler.
782
         */
784
         */
783
        mutex_unlock(&AS->lock);
785
        mutex_unlock(&AS->lock);
784
        goto page_fault;
786
        goto page_fault;
785
    }
787
    }
786
 
788
 
787
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
789
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
788
        /*
790
        /*
789
         * The address space area is not fully initialized.
791
         * The address space area is not fully initialized.
790
         * Avoid possible race by returning error.
792
         * Avoid possible race by returning error.
791
         */
793
         */
792
        mutex_unlock(&area->lock);
794
        mutex_unlock(&area->lock);
793
        mutex_unlock(&AS->lock);
795
        mutex_unlock(&AS->lock);
794
        goto page_fault;       
796
        goto page_fault;       
795
    }
797
    }
796
 
798
 
797
    if (!area->backend || !area->backend->page_fault) {
799
    if (!area->backend || !area->backend->page_fault) {
798
        /*
800
        /*
799
         * The address space area is not backed by any backend
801
         * The address space area is not backed by any backend
800
         * or the backend cannot handle page faults.
802
         * or the backend cannot handle page faults.
801
         */
803
         */
802
        mutex_unlock(&area->lock);
804
        mutex_unlock(&area->lock);
803
        mutex_unlock(&AS->lock);
805
        mutex_unlock(&AS->lock);
804
        goto page_fault;       
806
        goto page_fault;       
805
    }
807
    }
806
 
808
 
807
    page_table_lock(AS, false);
809
    page_table_lock(AS, false);
808
   
810
   
809
    /*
811
    /*
810
     * To avoid race condition between two page faults
812
     * To avoid race condition between two page faults
811
     * on the same address, we need to make sure
813
     * on the same address, we need to make sure
812
     * the mapping has not been already inserted.
814
     * the mapping has not been already inserted.
813
     */
815
     */
814
    if ((pte = page_mapping_find(AS, page))) {
816
    if ((pte = page_mapping_find(AS, page))) {
815
        if (PTE_PRESENT(pte)) {
817
        if (PTE_PRESENT(pte)) {
816
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
818
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
817
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
819
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
818
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
820
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
819
                page_table_unlock(AS, false);
821
                page_table_unlock(AS, false);
820
                mutex_unlock(&area->lock);
822
                mutex_unlock(&area->lock);
821
                mutex_unlock(&AS->lock);
823
                mutex_unlock(&AS->lock);
822
                return AS_PF_OK;
824
                return AS_PF_OK;
823
            }
825
            }
824
        }
826
        }
825
    }
827
    }
826
   
828
   
827
    /*
829
    /*
828
     * Resort to the backend page fault handler.
830
     * Resort to the backend page fault handler.
829
     */
831
     */
830
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
832
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
831
        page_table_unlock(AS, false);
833
        page_table_unlock(AS, false);
832
        mutex_unlock(&area->lock);
834
        mutex_unlock(&area->lock);
833
        mutex_unlock(&AS->lock);
835
        mutex_unlock(&AS->lock);
834
        goto page_fault;
836
        goto page_fault;
835
    }
837
    }
836
   
838
   
837
    page_table_unlock(AS, false);
839
    page_table_unlock(AS, false);
838
    mutex_unlock(&area->lock);
840
    mutex_unlock(&area->lock);
839
    mutex_unlock(&AS->lock);
841
    mutex_unlock(&AS->lock);
840
    return AS_PF_OK;
842
    return AS_PF_OK;
841
 
843
 
842
page_fault:
844
page_fault:
843
    if (THREAD->in_copy_from_uspace) {
845
    if (THREAD->in_copy_from_uspace) {
844
        THREAD->in_copy_from_uspace = false;
846
        THREAD->in_copy_from_uspace = false;
845
        istate_set_retaddr(istate,
847
        istate_set_retaddr(istate,
846
            (uintptr_t) &memcpy_from_uspace_failover_address);
848
            (uintptr_t) &memcpy_from_uspace_failover_address);
847
    } else if (THREAD->in_copy_to_uspace) {
849
    } else if (THREAD->in_copy_to_uspace) {
848
        THREAD->in_copy_to_uspace = false;
850
        THREAD->in_copy_to_uspace = false;
849
        istate_set_retaddr(istate,
851
        istate_set_retaddr(istate,
850
            (uintptr_t) &memcpy_to_uspace_failover_address);
852
            (uintptr_t) &memcpy_to_uspace_failover_address);
851
    } else {
853
    } else {
852
        return AS_PF_FAULT;
854
        return AS_PF_FAULT;
853
    }
855
    }
854
 
856
 
855
    return AS_PF_DEFER;
857
    return AS_PF_DEFER;
856
}
858
}
857
 
859
 
858
/** Switch address spaces.
860
/** Switch address spaces.
859
 *
861
 *
860
 * Note that this function cannot sleep as it is essentially a part of
862
 * Note that this function cannot sleep as it is essentially a part of
861
 * scheduling. Sleeping here would lead to deadlock on wakeup.
863
 * scheduling. Sleeping here would lead to deadlock on wakeup.
862
 *
864
 *
863
 * @param old Old address space or NULL.
865
 * @param old Old address space or NULL.
864
 * @param new New address space.
866
 * @param new New address space.
865
 */
867
 */
866
void as_switch(as_t *old_as, as_t *new_as)
868
void as_switch(as_t *old_as, as_t *new_as)
867
{
869
{
868
    ipl_t ipl;
870
    ipl_t ipl;
869
    bool needs_asid = false;
871
    bool needs_asid = false;
870
   
872
   
871
    ipl = interrupts_disable();
873
    ipl = interrupts_disable();
872
    spinlock_lock(&inactive_as_with_asid_lock);
874
    spinlock_lock(&inactive_as_with_asid_lock);
873
 
875
 
874
    /*
876
    /*
875
     * First, take care of the old address space.
877
     * First, take care of the old address space.
876
     */
878
     */
877
    if (old_as) {
879
    if (old_as) {
878
        mutex_lock_active(&old_as->lock);
880
        mutex_lock_active(&old_as->lock);
879
        ASSERT(old_as->cpu_refcount);
881
        ASSERT(old_as->cpu_refcount);
880
        if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
882
        if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
881
            /*
883
            /*
882
             * The old address space is no longer active on
884
             * The old address space is no longer active on
883
             * any processor. It can be appended to the
885
             * any processor. It can be appended to the
884
             * list of inactive address spaces with assigned
886
             * list of inactive address spaces with assigned
885
             * ASID.
887
             * ASID.
886
             */
888
             */
887
             ASSERT(old_as->asid != ASID_INVALID);
889
             ASSERT(old_as->asid != ASID_INVALID);
888
             list_append(&old_as->inactive_as_with_asid_link,
890
             list_append(&old_as->inactive_as_with_asid_link,
889
                 &inactive_as_with_asid_head);
891
                 &inactive_as_with_asid_head);
890
        }
892
        }
891
        mutex_unlock(&old_as->lock);
893
        mutex_unlock(&old_as->lock);
892
 
894
 
893
        /*
895
        /*
894
         * Perform architecture-specific tasks when the address space
896
         * Perform architecture-specific tasks when the address space
895
         * is being removed from the CPU.
897
         * is being removed from the CPU.
896
         */
898
         */
897
        as_deinstall_arch(old_as);
899
        as_deinstall_arch(old_as);
898
    }
900
    }
899
 
901
 
900
    /*
902
    /*
901
     * Second, prepare the new address space.
903
     * Second, prepare the new address space.
902
     */
904
     */
903
    mutex_lock_active(&new_as->lock);
905
    mutex_lock_active(&new_as->lock);
904
    if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
906
    if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
905
        if (new_as->asid != ASID_INVALID) {
907
        if (new_as->asid != ASID_INVALID) {
906
            list_remove(&new_as->inactive_as_with_asid_link);
908
            list_remove(&new_as->inactive_as_with_asid_link);
907
        } else {
909
        } else {
908
            /*
910
            /*
909
             * Defer call to asid_get() until new_as->lock is released.
911
             * Defer call to asid_get() until new_as->lock is released.
910
             */
912
             */
911
            needs_asid = true;
913
            needs_asid = true;
912
        }
914
        }
913
    }
915
    }
914
#ifdef AS_PAGE_TABLE
916
#ifdef AS_PAGE_TABLE
915
    SET_PTL0_ADDRESS(new_as->genarch.page_table);
917
    SET_PTL0_ADDRESS(new_as->genarch.page_table);
916
#endif
918
#endif
917
    mutex_unlock(&new_as->lock);
919
    mutex_unlock(&new_as->lock);
918
 
920
 
919
    if (needs_asid) {
921
    if (needs_asid) {
920
        /*
922
        /*
921
         * Allocation of new ASID was deferred
923
         * Allocation of new ASID was deferred
922
         * until now in order to avoid deadlock.
924
         * until now in order to avoid deadlock.
923
         */
925
         */
924
        asid_t asid;
926
        asid_t asid;
925
       
927
       
926
        asid = asid_get();
928
        asid = asid_get();
927
        mutex_lock_active(&new_as->lock);
929
        mutex_lock_active(&new_as->lock);
928
        new_as->asid = asid;
930
        new_as->asid = asid;
929
        mutex_unlock(&new_as->lock);
931
        mutex_unlock(&new_as->lock);
930
    }
932
    }
931
    spinlock_unlock(&inactive_as_with_asid_lock);
933
    spinlock_unlock(&inactive_as_with_asid_lock);
932
    interrupts_restore(ipl);
934
    interrupts_restore(ipl);
933
   
935
   
934
    /*
936
    /*
935
     * Perform architecture-specific steps.
937
     * Perform architecture-specific steps.
936
     * (e.g. write ASID to hardware register etc.)
938
     * (e.g. write ASID to hardware register etc.)
937
     */
939
     */
938
    as_install_arch(new_as);
940
    as_install_arch(new_as);
939
   
941
   
940
    AS = new_as;
942
    AS = new_as;
941
}
943
}
942
 
944
 
943
/** Convert address space area flags to page flags.
945
/** Convert address space area flags to page flags.
944
 *
946
 *
945
 * @param aflags Flags of some address space area.
947
 * @param aflags Flags of some address space area.
946
 *
948
 *
947
 * @return Flags to be passed to page_mapping_insert().
949
 * @return Flags to be passed to page_mapping_insert().
948
 */
950
 */
949
int area_flags_to_page_flags(int aflags)
951
int area_flags_to_page_flags(int aflags)
950
{
952
{
951
    int flags;
953
    int flags;
952
 
954
 
953
    flags = PAGE_USER | PAGE_PRESENT;
955
    flags = PAGE_USER | PAGE_PRESENT;
954
   
956
   
955
    if (aflags & AS_AREA_READ)
957
    if (aflags & AS_AREA_READ)
956
        flags |= PAGE_READ;
958
        flags |= PAGE_READ;
957
       
959
       
958
    if (aflags & AS_AREA_WRITE)
960
    if (aflags & AS_AREA_WRITE)
959
        flags |= PAGE_WRITE;
961
        flags |= PAGE_WRITE;
960
   
962
   
961
    if (aflags & AS_AREA_EXEC)
963
    if (aflags & AS_AREA_EXEC)
962
        flags |= PAGE_EXEC;
964
        flags |= PAGE_EXEC;
963
   
965
   
964
    if (aflags & AS_AREA_CACHEABLE)
966
    if (aflags & AS_AREA_CACHEABLE)
965
        flags |= PAGE_CACHEABLE;
967
        flags |= PAGE_CACHEABLE;
966
       
968
       
967
    return flags;
969
    return flags;
968
}
970
}
969
 
971
 
970
/** Compute flags for virtual address translation subsytem.
972
/** Compute flags for virtual address translation subsytem.
971
 *
973
 *
972
 * The address space area must be locked.
974
 * The address space area must be locked.
973
 * Interrupts must be disabled.
975
 * Interrupts must be disabled.
974
 *
976
 *
975
 * @param a Address space area.
977
 * @param a Address space area.
976
 *
978
 *
977
 * @return Flags to be used in page_mapping_insert().
979
 * @return Flags to be used in page_mapping_insert().
978
 */
980
 */
979
int as_area_get_flags(as_area_t *a)
981
int as_area_get_flags(as_area_t *a)
980
{
982
{
981
    return area_flags_to_page_flags(a->flags);
983
    return area_flags_to_page_flags(a->flags);
982
}
984
}
983
 
985
 
984
/** Create page table.
986
/** Create page table.
985
 *
987
 *
986
 * Depending on architecture, create either address space
988
 * Depending on architecture, create either address space
987
 * private or global page table.
989
 * private or global page table.
988
 *
990
 *
989
 * @param flags Flags saying whether the page table is for kernel address space.
991
 * @param flags Flags saying whether the page table is for kernel address space.
990
 *
992
 *
991
 * @return First entry of the page table.
993
 * @return First entry of the page table.
992
 */
994
 */
993
pte_t *page_table_create(int flags)
995
pte_t *page_table_create(int flags)
994
{
996
{
-
 
997
#ifdef __OBJC__
-
 
998
    return [as_t page_table_create: flags];
-
 
999
#else
995
        ASSERT(as_operations);
1000
    ASSERT(as_operations);
996
        ASSERT(as_operations->page_table_create);
1001
    ASSERT(as_operations->page_table_create);
997
 
1002
   
998
        return as_operations->page_table_create(flags);
1003
    return as_operations->page_table_create(flags);
-
 
1004
#endif
999
}
1005
}
1000
 
1006
 
1001
/** Destroy page table.
1007
/** Destroy page table.
1002
 *
1008
 *
1003
 * Destroy page table in architecture specific way.
1009
 * Destroy page table in architecture specific way.
1004
 *
1010
 *
1005
 * @param page_table Physical address of PTL0.
1011
 * @param page_table Physical address of PTL0.
1006
 */
1012
 */
1007
void page_table_destroy(pte_t *page_table)
1013
void page_table_destroy(pte_t *page_table)
1008
{
1014
{
-
 
1015
#ifdef __OBJC__
-
 
1016
    return [as_t page_table_destroy: page_table];
-
 
1017
#else
1009
        ASSERT(as_operations);
1018
    ASSERT(as_operations);
1010
        ASSERT(as_operations->page_table_destroy);
1019
    ASSERT(as_operations->page_table_destroy);
1011
 
1020
   
1012
        as_operations->page_table_destroy(page_table);
1021
    as_operations->page_table_destroy(page_table);
-
 
1022
#endif
1013
}
1023
}
1014
 
1024
 
1015
/** Lock page table.
1025
/** Lock page table.
1016
 *
1026
 *
1017
 * This function should be called before any page_mapping_insert(),
1027
 * This function should be called before any page_mapping_insert(),
1018
 * page_mapping_remove() and page_mapping_find().
1028
 * page_mapping_remove() and page_mapping_find().
1019
 *
1029
 *
1020
 * Locking order is such that address space areas must be locked
1030
 * Locking order is such that address space areas must be locked
1021
 * prior to this call. Address space can be locked prior to this
1031
 * prior to this call. Address space can be locked prior to this
1022
 * call in which case the lock argument is false.
1032
 * call in which case the lock argument is false.
1023
 *
1033
 *
1024
 * @param as Address space.
1034
 * @param as Address space.
1025
 * @param lock If false, do not attempt to lock as->lock.
1035
 * @param lock If false, do not attempt to lock as->lock.
1026
 */
1036
 */
1027
void page_table_lock(as_t *as, bool lock)
1037
void page_table_lock(as_t *as, bool lock)
1028
{
1038
{
-
 
1039
#ifdef __OBJC__
-
 
1040
    [as page_table_lock: lock];
-
 
1041
#else
1029
    ASSERT(as_operations);
1042
    ASSERT(as_operations);
1030
    ASSERT(as_operations->page_table_lock);
1043
    ASSERT(as_operations->page_table_lock);
1031
 
1044
   
1032
    as_operations->page_table_lock(as, lock);
1045
    as_operations->page_table_lock(as, lock);
-
 
1046
#endif
1033
}
1047
}
1034
 
1048
 
1035
/** Unlock page table.
1049
/** Unlock page table.
1036
 *
1050
 *
1037
 * @param as Address space.
1051
 * @param as Address space.
1038
 * @param unlock If false, do not attempt to unlock as->lock.
1052
 * @param unlock If false, do not attempt to unlock as->lock.
1039
 */
1053
 */
1040
void page_table_unlock(as_t *as, bool unlock)
1054
void page_table_unlock(as_t *as, bool unlock)
1041
{
1055
{
-
 
1056
#ifdef __OBJC__
-
 
1057
    [as page_table_unlock: unlock];
-
 
1058
#else
1042
    ASSERT(as_operations);
1059
    ASSERT(as_operations);
1043
    ASSERT(as_operations->page_table_unlock);
1060
    ASSERT(as_operations->page_table_unlock);
1044
 
1061
   
1045
    as_operations->page_table_unlock(as, unlock);
1062
    as_operations->page_table_unlock(as, unlock);
-
 
1063
#endif
1046
}
1064
}
1047
 
1065
 
1048
 
1066
 
1049
/** Find address space area and lock it.
1067
/** Find address space area and lock it.
1050
 *
1068
 *
1051
 * The address space must be locked and interrupts must be disabled.
1069
 * The address space must be locked and interrupts must be disabled.
1052
 *
1070
 *
1053
 * @param as Address space.
1071
 * @param as Address space.
1054
 * @param va Virtual address.
1072
 * @param va Virtual address.
1055
 *
1073
 *
1056
 * @return Locked address space area containing va on success or NULL on
1074
 * @return Locked address space area containing va on success or NULL on
1057
 *     failure.
1075
 *     failure.
1058
 */
1076
 */
1059
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1077
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1060
{
1078
{
1061
    as_area_t *a;
1079
    as_area_t *a;
1062
    btree_node_t *leaf, *lnode;
1080
    btree_node_t *leaf, *lnode;
1063
    int i;
1081
    int i;
1064
   
1082
   
1065
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1083
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1066
    if (a) {
1084
    if (a) {
1067
        /* va is the base address of an address space area */
1085
        /* va is the base address of an address space area */
1068
        mutex_lock(&a->lock);
1086
        mutex_lock(&a->lock);
1069
        return a;
1087
        return a;
1070
    }
1088
    }
1071
   
1089
   
1072
    /*
1090
    /*
1073
     * Search the leaf node and the righmost record of its left neighbour
1091
     * Search the leaf node and the righmost record of its left neighbour
1074
     * to find out whether this is a miss or va belongs to an address
1092
     * to find out whether this is a miss or va belongs to an address
1075
     * space area found there.
1093
     * space area found there.
1076
     */
1094
     */
1077
   
1095
   
1078
    /* First, search the leaf node itself. */
1096
    /* First, search the leaf node itself. */
1079
    for (i = 0; i < leaf->keys; i++) {
1097
    for (i = 0; i < leaf->keys; i++) {
1080
        a = (as_area_t *) leaf->value[i];
1098
        a = (as_area_t *) leaf->value[i];
1081
        mutex_lock(&a->lock);
1099
        mutex_lock(&a->lock);
1082
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1100
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1083
            return a;
1101
            return a;
1084
        }
1102
        }
1085
        mutex_unlock(&a->lock);
1103
        mutex_unlock(&a->lock);
1086
    }
1104
    }
1087
 
1105
 
1088
    /*
1106
    /*
1089
     * Second, locate the left neighbour and test its last record.
1107
     * Second, locate the left neighbour and test its last record.
1090
     * Because of its position in the B+tree, it must have base < va.
1108
     * Because of its position in the B+tree, it must have base < va.
1091
     */
1109
     */
1092
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1110
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1093
    if (lnode) {
1111
    if (lnode) {
1094
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1112
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1095
        mutex_lock(&a->lock);
1113
        mutex_lock(&a->lock);
1096
        if (va < a->base + a->pages * PAGE_SIZE) {
1114
        if (va < a->base + a->pages * PAGE_SIZE) {
1097
            return a;
1115
            return a;
1098
        }
1116
        }
1099
        mutex_unlock(&a->lock);
1117
        mutex_unlock(&a->lock);
1100
    }
1118
    }
1101
 
1119
 
1102
    return NULL;
1120
    return NULL;
1103
}
1121
}
1104
 
1122
 
1105
/** Check area conflicts with other areas.
1123
/** Check area conflicts with other areas.
1106
 *
1124
 *
1107
 * The address space must be locked and interrupts must be disabled.
1125
 * The address space must be locked and interrupts must be disabled.
1108
 *
1126
 *
1109
 * @param as Address space.
1127
 * @param as Address space.
1110
 * @param va Starting virtual address of the area being tested.
1128
 * @param va Starting virtual address of the area being tested.
1111
 * @param size Size of the area being tested.
1129
 * @param size Size of the area being tested.
1112
 * @param avoid_area Do not touch this area.
1130
 * @param avoid_area Do not touch this area.
1113
 *
1131
 *
1114
 * @return True if there is no conflict, false otherwise.
1132
 * @return True if there is no conflict, false otherwise.
1115
 */
1133
 */
1116
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1134
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1117
              as_area_t *avoid_area)
1135
              as_area_t *avoid_area)
1118
{
1136
{
1119
    as_area_t *a;
1137
    as_area_t *a;
1120
    btree_node_t *leaf, *node;
1138
    btree_node_t *leaf, *node;
1121
    int i;
1139
    int i;
1122
   
1140
   
1123
    /*
1141
    /*
1124
     * We don't want any area to have conflicts with NULL page.
1142
     * We don't want any area to have conflicts with NULL page.
1125
     */
1143
     */
1126
    if (overlaps(va, size, NULL, PAGE_SIZE))
1144
    if (overlaps(va, size, NULL, PAGE_SIZE))
1127
        return false;
1145
        return false;
1128
   
1146
   
1129
    /*
1147
    /*
1130
     * The leaf node is found in O(log n), where n is proportional to
1148
     * The leaf node is found in O(log n), where n is proportional to
1131
     * the number of address space areas belonging to as.
1149
     * the number of address space areas belonging to as.
1132
     * The check for conflicts is then attempted on the rightmost
1150
     * The check for conflicts is then attempted on the rightmost
1133
     * record in the left neighbour, the leftmost record in the right
1151
     * record in the left neighbour, the leftmost record in the right
1134
     * neighbour and all records in the leaf node itself.
1152
     * neighbour and all records in the leaf node itself.
1135
     */
1153
     */
1136
   
1154
   
1137
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1155
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1138
        if (a != avoid_area)
1156
        if (a != avoid_area)
1139
            return false;
1157
            return false;
1140
    }
1158
    }
1141
   
1159
   
1142
    /* First, check the two border cases. */
1160
    /* First, check the two border cases. */
1143
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1161
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1144
        a = (as_area_t *) node->value[node->keys - 1];
1162
        a = (as_area_t *) node->value[node->keys - 1];
1145
        mutex_lock(&a->lock);
1163
        mutex_lock(&a->lock);
1146
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1164
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1147
            mutex_unlock(&a->lock);
1165
            mutex_unlock(&a->lock);
1148
            return false;
1166
            return false;
1149
        }
1167
        }
1150
        mutex_unlock(&a->lock);
1168
        mutex_unlock(&a->lock);
1151
    }
1169
    }
1152
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1170
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1153
    if (node) {
1171
    if (node) {
1154
        a = (as_area_t *) node->value[0];
1172
        a = (as_area_t *) node->value[0];
1155
        mutex_lock(&a->lock);
1173
        mutex_lock(&a->lock);
1156
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1174
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1157
            mutex_unlock(&a->lock);
1175
            mutex_unlock(&a->lock);
1158
            return false;
1176
            return false;
1159
        }
1177
        }
1160
        mutex_unlock(&a->lock);
1178
        mutex_unlock(&a->lock);
1161
    }
1179
    }
1162
   
1180
   
1163
    /* Second, check the leaf node. */
1181
    /* Second, check the leaf node. */
1164
    for (i = 0; i < leaf->keys; i++) {
1182
    for (i = 0; i < leaf->keys; i++) {
1165
        a = (as_area_t *) leaf->value[i];
1183
        a = (as_area_t *) leaf->value[i];
1166
   
1184
   
1167
        if (a == avoid_area)
1185
        if (a == avoid_area)
1168
            continue;
1186
            continue;
1169
   
1187
   
1170
        mutex_lock(&a->lock);
1188
        mutex_lock(&a->lock);
1171
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1189
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1172
            mutex_unlock(&a->lock);
1190
            mutex_unlock(&a->lock);
1173
            return false;
1191
            return false;
1174
        }
1192
        }
1175
        mutex_unlock(&a->lock);
1193
        mutex_unlock(&a->lock);
1176
    }
1194
    }
1177
 
1195
 
1178
    /*
1196
    /*
1179
     * So far, the area does not conflict with other areas.
1197
     * So far, the area does not conflict with other areas.
1180
     * Check if it doesn't conflict with kernel address space.
1198
     * Check if it doesn't conflict with kernel address space.
1181
     */  
1199
     */  
1182
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1200
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1183
        return !overlaps(va, size,
1201
        return !overlaps(va, size,
1184
            KERNEL_ADDRESS_SPACE_START,
1202
            KERNEL_ADDRESS_SPACE_START,
1185
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1203
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1186
    }
1204
    }
1187
 
1205
 
1188
    return true;
1206
    return true;
1189
}
1207
}
1190
 
1208
 
1191
/** Return size of the address space area with given base.  */
1209
/** Return size of the address space area with given base.  */
1192
size_t as_get_size(uintptr_t base)
1210
size_t as_get_size(uintptr_t base)
1193
{
1211
{
1194
    ipl_t ipl;
1212
    ipl_t ipl;
1195
    as_area_t *src_area;
1213
    as_area_t *src_area;
1196
    size_t size;
1214
    size_t size;
1197
 
1215
 
1198
    ipl = interrupts_disable();
1216
    ipl = interrupts_disable();
1199
    src_area = find_area_and_lock(AS, base);
1217
    src_area = find_area_and_lock(AS, base);
1200
    if (src_area){
1218
    if (src_area){
1201
        size = src_area->pages * PAGE_SIZE;
1219
        size = src_area->pages * PAGE_SIZE;
1202
        mutex_unlock(&src_area->lock);
1220
        mutex_unlock(&src_area->lock);
1203
    } else {
1221
    } else {
1204
        size = 0;
1222
        size = 0;
1205
    }
1223
    }
1206
    interrupts_restore(ipl);
1224
    interrupts_restore(ipl);
1207
    return size;
1225
    return size;
1208
}
1226
}
1209
 
1227
 
1210
/** Mark portion of address space area as used.
1228
/** Mark portion of address space area as used.
1211
 *
1229
 *
1212
 * The address space area must be already locked.
1230
 * The address space area must be already locked.
1213
 *
1231
 *
1214
 * @param a Address space area.
1232
 * @param a Address space area.
1215
 * @param page First page to be marked.
1233
 * @param page First page to be marked.
1216
 * @param count Number of page to be marked.
1234
 * @param count Number of page to be marked.
1217
 *
1235
 *
1218
 * @return 0 on failure and 1 on success.
1236
 * @return 0 on failure and 1 on success.
1219
 */
1237
 */
1220
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1238
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1221
{
1239
{
1222
    btree_node_t *leaf, *node;
1240
    btree_node_t *leaf, *node;
1223
    count_t pages;
1241
    count_t pages;
1224
    int i;
1242
    int i;
1225
 
1243
 
1226
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1244
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1227
    ASSERT(count);
1245
    ASSERT(count);
1228
 
1246
 
1229
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1247
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1230
    if (pages) {
1248
    if (pages) {
1231
        /*
1249
        /*
1232
         * We hit the beginning of some used space.
1250
         * We hit the beginning of some used space.
1233
         */
1251
         */
1234
        return 0;
1252
        return 0;
1235
    }
1253
    }
1236
 
1254
 
1237
    if (!leaf->keys) {
1255
    if (!leaf->keys) {
1238
        btree_insert(&a->used_space, page, (void *) count, leaf);
1256
        btree_insert(&a->used_space, page, (void *) count, leaf);
1239
        return 1;
1257
        return 1;
1240
    }
1258
    }
1241
 
1259
 
1242
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1260
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1243
    if (node) {
1261
    if (node) {
1244
        uintptr_t left_pg = node->key[node->keys - 1];
1262
        uintptr_t left_pg = node->key[node->keys - 1];
1245
        uintptr_t right_pg = leaf->key[0];
1263
        uintptr_t right_pg = leaf->key[0];
1246
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1264
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1247
        count_t right_cnt = (count_t) leaf->value[0];
1265
        count_t right_cnt = (count_t) leaf->value[0];
1248
       
1266
       
1249
        /*
1267
        /*
1250
         * Examine the possibility that the interval fits
1268
         * Examine the possibility that the interval fits
1251
         * somewhere between the rightmost interval of
1269
         * somewhere between the rightmost interval of
1252
         * the left neigbour and the first interval of the leaf.
1270
         * the left neigbour and the first interval of the leaf.
1253
         */
1271
         */
1254
         
1272
         
1255
        if (page >= right_pg) {
1273
        if (page >= right_pg) {
1256
            /* Do nothing. */
1274
            /* Do nothing. */
1257
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1275
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1258
            left_cnt * PAGE_SIZE)) {
1276
            left_cnt * PAGE_SIZE)) {
1259
            /* The interval intersects with the left interval. */
1277
            /* The interval intersects with the left interval. */
1260
            return 0;
1278
            return 0;
1261
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1279
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1262
            right_cnt * PAGE_SIZE)) {
1280
            right_cnt * PAGE_SIZE)) {
1263
            /* The interval intersects with the right interval. */
1281
            /* The interval intersects with the right interval. */
1264
            return 0;          
1282
            return 0;          
1265
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1283
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1266
            (page + count * PAGE_SIZE == right_pg)) {
1284
            (page + count * PAGE_SIZE == right_pg)) {
1267
            /*
1285
            /*
1268
             * The interval can be added by merging the two already
1286
             * The interval can be added by merging the two already
1269
             * present intervals.
1287
             * present intervals.
1270
             */
1288
             */
1271
            node->value[node->keys - 1] += count + right_cnt;
1289
            node->value[node->keys - 1] += count + right_cnt;
1272
            btree_remove(&a->used_space, right_pg, leaf);
1290
            btree_remove(&a->used_space, right_pg, leaf);
1273
            return 1;
1291
            return 1;
1274
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1292
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1275
            /*
1293
            /*
1276
             * The interval can be added by simply growing the left
1294
             * The interval can be added by simply growing the left
1277
             * interval.
1295
             * interval.
1278
             */
1296
             */
1279
            node->value[node->keys - 1] += count;
1297
            node->value[node->keys - 1] += count;
1280
            return 1;
1298
            return 1;
1281
        } else if (page + count * PAGE_SIZE == right_pg) {
1299
        } else if (page + count * PAGE_SIZE == right_pg) {
1282
            /*
1300
            /*
1283
             * The interval can be addded by simply moving base of
1301
             * The interval can be addded by simply moving base of
1284
             * the right interval down and increasing its size
1302
             * the right interval down and increasing its size
1285
             * accordingly.
1303
             * accordingly.
1286
             */
1304
             */
1287
            leaf->value[0] += count;
1305
            leaf->value[0] += count;
1288
            leaf->key[0] = page;
1306
            leaf->key[0] = page;
1289
            return 1;
1307
            return 1;
1290
        } else {
1308
        } else {
1291
            /*
1309
            /*
1292
             * The interval is between both neigbouring intervals,
1310
             * The interval is between both neigbouring intervals,
1293
             * but cannot be merged with any of them.
1311
             * but cannot be merged with any of them.
1294
             */
1312
             */
1295
            btree_insert(&a->used_space, page, (void *) count,
1313
            btree_insert(&a->used_space, page, (void *) count,
1296
                leaf);
1314
                leaf);
1297
            return 1;
1315
            return 1;
1298
        }
1316
        }
1299
    } else if (page < leaf->key[0]) {
1317
    } else if (page < leaf->key[0]) {
1300
        uintptr_t right_pg = leaf->key[0];
1318
        uintptr_t right_pg = leaf->key[0];
1301
        count_t right_cnt = (count_t) leaf->value[0];
1319
        count_t right_cnt = (count_t) leaf->value[0];
1302
   
1320
   
1303
        /*
1321
        /*
1304
         * Investigate the border case in which the left neighbour does
1322
         * Investigate the border case in which the left neighbour does
1305
         * not exist but the interval fits from the left.
1323
         * not exist but the interval fits from the left.
1306
         */
1324
         */
1307
         
1325
         
1308
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1326
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1309
            right_cnt * PAGE_SIZE)) {
1327
            right_cnt * PAGE_SIZE)) {
1310
            /* The interval intersects with the right interval. */
1328
            /* The interval intersects with the right interval. */
1311
            return 0;
1329
            return 0;
1312
        } else if (page + count * PAGE_SIZE == right_pg) {
1330
        } else if (page + count * PAGE_SIZE == right_pg) {
1313
            /*
1331
            /*
1314
             * The interval can be added by moving the base of the
1332
             * The interval can be added by moving the base of the
1315
             * right interval down and increasing its size
1333
             * right interval down and increasing its size
1316
             * accordingly.
1334
             * accordingly.
1317
             */
1335
             */
1318
            leaf->key[0] = page;
1336
            leaf->key[0] = page;
1319
            leaf->value[0] += count;
1337
            leaf->value[0] += count;
1320
            return 1;
1338
            return 1;
1321
        } else {
1339
        } else {
1322
            /*
1340
            /*
1323
             * The interval doesn't adjoin with the right interval.
1341
             * The interval doesn't adjoin with the right interval.
1324
             * It must be added individually.
1342
             * It must be added individually.
1325
             */
1343
             */
1326
            btree_insert(&a->used_space, page, (void *) count,
1344
            btree_insert(&a->used_space, page, (void *) count,
1327
                leaf);
1345
                leaf);
1328
            return 1;
1346
            return 1;
1329
        }
1347
        }
1330
    }
1348
    }
1331
 
1349
 
1332
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1350
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1333
    if (node) {
1351
    if (node) {
1334
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1352
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1335
        uintptr_t right_pg = node->key[0];
1353
        uintptr_t right_pg = node->key[0];
1336
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1354
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1337
        count_t right_cnt = (count_t) node->value[0];
1355
        count_t right_cnt = (count_t) node->value[0];
1338
       
1356
       
1339
        /*
1357
        /*
1340
         * Examine the possibility that the interval fits
1358
         * Examine the possibility that the interval fits
1341
         * somewhere between the leftmost interval of
1359
         * somewhere between the leftmost interval of
1342
         * the right neigbour and the last interval of the leaf.
1360
         * the right neigbour and the last interval of the leaf.
1343
         */
1361
         */
1344
 
1362
 
1345
        if (page < left_pg) {
1363
        if (page < left_pg) {
1346
            /* Do nothing. */
1364
            /* Do nothing. */
1347
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1365
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1348
            left_cnt * PAGE_SIZE)) {
1366
            left_cnt * PAGE_SIZE)) {
1349
            /* The interval intersects with the left interval. */
1367
            /* The interval intersects with the left interval. */
1350
            return 0;
1368
            return 0;
1351
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1369
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1352
            right_cnt * PAGE_SIZE)) {
1370
            right_cnt * PAGE_SIZE)) {
1353
            /* The interval intersects with the right interval. */
1371
            /* The interval intersects with the right interval. */
1354
            return 0;          
1372
            return 0;          
1355
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1373
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1356
            (page + count * PAGE_SIZE == right_pg)) {
1374
            (page + count * PAGE_SIZE == right_pg)) {
1357
            /*
1375
            /*
1358
             * The interval can be added by merging the two already
1376
             * The interval can be added by merging the two already
1359
             * present intervals.
1377
             * present intervals.
1360
             * */
1378
             * */
1361
            leaf->value[leaf->keys - 1] += count + right_cnt;
1379
            leaf->value[leaf->keys - 1] += count + right_cnt;
1362
            btree_remove(&a->used_space, right_pg, node);
1380
            btree_remove(&a->used_space, right_pg, node);
1363
            return 1;
1381
            return 1;
1364
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1382
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1365
            /*
1383
            /*
1366
             * The interval can be added by simply growing the left
1384
             * The interval can be added by simply growing the left
1367
             * interval.
1385
             * interval.
1368
             * */
1386
             * */
1369
            leaf->value[leaf->keys - 1] +=  count;
1387
            leaf->value[leaf->keys - 1] +=  count;
1370
            return 1;
1388
            return 1;
1371
        } else if (page + count * PAGE_SIZE == right_pg) {
1389
        } else if (page + count * PAGE_SIZE == right_pg) {
1372
            /*
1390
            /*
1373
             * The interval can be addded by simply moving base of
1391
             * The interval can be addded by simply moving base of
1374
             * the right interval down and increasing its size
1392
             * the right interval down and increasing its size
1375
             * accordingly.
1393
             * accordingly.
1376
             */
1394
             */
1377
            node->value[0] += count;
1395
            node->value[0] += count;
1378
            node->key[0] = page;
1396
            node->key[0] = page;
1379
            return 1;
1397
            return 1;
1380
        } else {
1398
        } else {
1381
            /*
1399
            /*
1382
             * The interval is between both neigbouring intervals,
1400
             * The interval is between both neigbouring intervals,
1383
             * but cannot be merged with any of them.
1401
             * but cannot be merged with any of them.
1384
             */
1402
             */
1385
            btree_insert(&a->used_space, page, (void *) count,
1403
            btree_insert(&a->used_space, page, (void *) count,
1386
                leaf);
1404
                leaf);
1387
            return 1;
1405
            return 1;
1388
        }
1406
        }
1389
    } else if (page >= leaf->key[leaf->keys - 1]) {
1407
    } else if (page >= leaf->key[leaf->keys - 1]) {
1390
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1408
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1391
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1409
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1392
   
1410
   
1393
        /*
1411
        /*
1394
         * Investigate the border case in which the right neighbour
1412
         * Investigate the border case in which the right neighbour
1395
         * does not exist but the interval fits from the right.
1413
         * does not exist but the interval fits from the right.
1396
         */
1414
         */
1397
         
1415
         
1398
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1416
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1399
            left_cnt * PAGE_SIZE)) {
1417
            left_cnt * PAGE_SIZE)) {
1400
            /* The interval intersects with the left interval. */
1418
            /* The interval intersects with the left interval. */
1401
            return 0;
1419
            return 0;
1402
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1420
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1403
            /*
1421
            /*
1404
             * The interval can be added by growing the left
1422
             * The interval can be added by growing the left
1405
             * interval.
1423
             * interval.
1406
             */
1424
             */
1407
            leaf->value[leaf->keys - 1] += count;
1425
            leaf->value[leaf->keys - 1] += count;
1408
            return 1;
1426
            return 1;
1409
        } else {
1427
        } else {
1410
            /*
1428
            /*
1411
             * The interval doesn't adjoin with the left interval.
1429
             * The interval doesn't adjoin with the left interval.
1412
             * It must be added individually.
1430
             * It must be added individually.
1413
             */
1431
             */
1414
            btree_insert(&a->used_space, page, (void *) count,
1432
            btree_insert(&a->used_space, page, (void *) count,
1415
                leaf);
1433
                leaf);
1416
            return 1;
1434
            return 1;
1417
        }
1435
        }
1418
    }
1436
    }
1419
   
1437
   
1420
    /*
1438
    /*
1421
     * Note that if the algorithm made it thus far, the interval can fit
1439
     * Note that if the algorithm made it thus far, the interval can fit
1422
     * only between two other intervals of the leaf. The two border cases
1440
     * only between two other intervals of the leaf. The two border cases
1423
     * were already resolved.
1441
     * were already resolved.
1424
     */
1442
     */
1425
    for (i = 1; i < leaf->keys; i++) {
1443
    for (i = 1; i < leaf->keys; i++) {
1426
        if (page < leaf->key[i]) {
1444
        if (page < leaf->key[i]) {
1427
            uintptr_t left_pg = leaf->key[i - 1];
1445
            uintptr_t left_pg = leaf->key[i - 1];
1428
            uintptr_t right_pg = leaf->key[i];
1446
            uintptr_t right_pg = leaf->key[i];
1429
            count_t left_cnt = (count_t) leaf->value[i - 1];
1447
            count_t left_cnt = (count_t) leaf->value[i - 1];
1430
            count_t right_cnt = (count_t) leaf->value[i];
1448
            count_t right_cnt = (count_t) leaf->value[i];
1431
 
1449
 
1432
            /*
1450
            /*
1433
             * The interval fits between left_pg and right_pg.
1451
             * The interval fits between left_pg and right_pg.
1434
             */
1452
             */
1435
 
1453
 
1436
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1454
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1437
                left_cnt * PAGE_SIZE)) {
1455
                left_cnt * PAGE_SIZE)) {
1438
                /*
1456
                /*
1439
                 * The interval intersects with the left
1457
                 * The interval intersects with the left
1440
                 * interval.
1458
                 * interval.
1441
                 */
1459
                 */
1442
                return 0;
1460
                return 0;
1443
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1461
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1444
                right_cnt * PAGE_SIZE)) {
1462
                right_cnt * PAGE_SIZE)) {
1445
                /*
1463
                /*
1446
                 * The interval intersects with the right
1464
                 * The interval intersects with the right
1447
                 * interval.
1465
                 * interval.
1448
                 */
1466
                 */
1449
                return 0;          
1467
                return 0;          
1450
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1468
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1451
                (page + count * PAGE_SIZE == right_pg)) {
1469
                (page + count * PAGE_SIZE == right_pg)) {
1452
                /*
1470
                /*
1453
                 * The interval can be added by merging the two
1471
                 * The interval can be added by merging the two
1454
                 * already present intervals.
1472
                 * already present intervals.
1455
                 */
1473
                 */
1456
                leaf->value[i - 1] += count + right_cnt;
1474
                leaf->value[i - 1] += count + right_cnt;
1457
                btree_remove(&a->used_space, right_pg, leaf);
1475
                btree_remove(&a->used_space, right_pg, leaf);
1458
                return 1;
1476
                return 1;
1459
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1477
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1460
                /*
1478
                /*
1461
                 * The interval can be added by simply growing
1479
                 * The interval can be added by simply growing
1462
                 * the left interval.
1480
                 * the left interval.
1463
                 */
1481
                 */
1464
                leaf->value[i - 1] += count;
1482
                leaf->value[i - 1] += count;
1465
                return 1;
1483
                return 1;
1466
            } else if (page + count * PAGE_SIZE == right_pg) {
1484
            } else if (page + count * PAGE_SIZE == right_pg) {
1467
                /*
1485
                /*
1468
                     * The interval can be addded by simply moving
1486
                     * The interval can be addded by simply moving
1469
                 * base of the right interval down and
1487
                 * base of the right interval down and
1470
                 * increasing its size accordingly.
1488
                 * increasing its size accordingly.
1471
                 */
1489
                 */
1472
                leaf->value[i] += count;
1490
                leaf->value[i] += count;
1473
                leaf->key[i] = page;
1491
                leaf->key[i] = page;
1474
                return 1;
1492
                return 1;
1475
            } else {
1493
            } else {
1476
                /*
1494
                /*
1477
                 * The interval is between both neigbouring
1495
                 * The interval is between both neigbouring
1478
                 * intervals, but cannot be merged with any of
1496
                 * intervals, but cannot be merged with any of
1479
                 * them.
1497
                 * them.
1480
                 */
1498
                 */
1481
                btree_insert(&a->used_space, page,
1499
                btree_insert(&a->used_space, page,
1482
                    (void *) count, leaf);
1500
                    (void *) count, leaf);
1483
                return 1;
1501
                return 1;
1484
            }
1502
            }
1485
        }
1503
        }
1486
    }
1504
    }
1487
 
1505
 
1488
    panic("Inconsistency detected while adding %d pages of used space at "
1506
    panic("Inconsistency detected while adding %d pages of used space at "
1489
        "%p.\n", count, page);
1507
        "%p.\n", count, page);
1490
}
1508
}
1491
 
1509
 
1492
/** Mark portion of address space area as unused.
1510
/** Mark portion of address space area as unused.
1493
 *
1511
 *
1494
 * The address space area must be already locked.
1512
 * The address space area must be already locked.
1495
 *
1513
 *
1496
 * @param a Address space area.
1514
 * @param a Address space area.
1497
 * @param page First page to be marked.
1515
 * @param page First page to be marked.
1498
 * @param count Number of page to be marked.
1516
 * @param count Number of page to be marked.
1499
 *
1517
 *
1500
 * @return 0 on failure and 1 on success.
1518
 * @return 0 on failure and 1 on success.
1501
 */
1519
 */
1502
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1520
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1503
{
1521
{
1504
    btree_node_t *leaf, *node;
1522
    btree_node_t *leaf, *node;
1505
    count_t pages;
1523
    count_t pages;
1506
    int i;
1524
    int i;
1507
 
1525
 
1508
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1526
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1509
    ASSERT(count);
1527
    ASSERT(count);
1510
 
1528
 
1511
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1529
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1512
    if (pages) {
1530
    if (pages) {
1513
        /*
1531
        /*
1514
         * We are lucky, page is the beginning of some interval.
1532
         * We are lucky, page is the beginning of some interval.
1515
         */
1533
         */
1516
        if (count > pages) {
1534
        if (count > pages) {
1517
            return 0;
1535
            return 0;
1518
        } else if (count == pages) {
1536
        } else if (count == pages) {
1519
            btree_remove(&a->used_space, page, leaf);
1537
            btree_remove(&a->used_space, page, leaf);
1520
            return 1;
1538
            return 1;
1521
        } else {
1539
        } else {
1522
            /*
1540
            /*
1523
             * Find the respective interval.
1541
             * Find the respective interval.
1524
             * Decrease its size and relocate its start address.
1542
             * Decrease its size and relocate its start address.
1525
             */
1543
             */
1526
            for (i = 0; i < leaf->keys; i++) {
1544
            for (i = 0; i < leaf->keys; i++) {
1527
                if (leaf->key[i] == page) {
1545
                if (leaf->key[i] == page) {
1528
                    leaf->key[i] += count * PAGE_SIZE;
1546
                    leaf->key[i] += count * PAGE_SIZE;
1529
                    leaf->value[i] -= count;
1547
                    leaf->value[i] -= count;
1530
                    return 1;
1548
                    return 1;
1531
                }
1549
                }
1532
            }
1550
            }
1533
            goto error;
1551
            goto error;
1534
        }
1552
        }
1535
    }
1553
    }
1536
 
1554
 
1537
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1555
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1538
    if (node && page < leaf->key[0]) {
1556
    if (node && page < leaf->key[0]) {
1539
        uintptr_t left_pg = node->key[node->keys - 1];
1557
        uintptr_t left_pg = node->key[node->keys - 1];
1540
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1558
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1541
 
1559
 
1542
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1560
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1543
            count * PAGE_SIZE)) {
1561
            count * PAGE_SIZE)) {
1544
            if (page + count * PAGE_SIZE ==
1562
            if (page + count * PAGE_SIZE ==
1545
                left_pg + left_cnt * PAGE_SIZE) {
1563
                left_pg + left_cnt * PAGE_SIZE) {
1546
                /*
1564
                /*
1547
                 * The interval is contained in the rightmost
1565
                 * The interval is contained in the rightmost
1548
                 * interval of the left neighbour and can be
1566
                 * interval of the left neighbour and can be
1549
                 * removed by updating the size of the bigger
1567
                 * removed by updating the size of the bigger
1550
                 * interval.
1568
                 * interval.
1551
                 */
1569
                 */
1552
                node->value[node->keys - 1] -= count;
1570
                node->value[node->keys - 1] -= count;
1553
                return 1;
1571
                return 1;
1554
            } else if (page + count * PAGE_SIZE <
1572
            } else if (page + count * PAGE_SIZE <
1555
                left_pg + left_cnt*PAGE_SIZE) {
1573
                left_pg + left_cnt*PAGE_SIZE) {
1556
                count_t new_cnt;
1574
                count_t new_cnt;
1557
               
1575
               
1558
                /*
1576
                /*
1559
                 * The interval is contained in the rightmost
1577
                 * The interval is contained in the rightmost
1560
                 * interval of the left neighbour but its
1578
                 * interval of the left neighbour but its
1561
                 * removal requires both updating the size of
1579
                 * removal requires both updating the size of
1562
                 * the original interval and also inserting a
1580
                 * the original interval and also inserting a
1563
                 * new interval.
1581
                 * new interval.
1564
                 */
1582
                 */
1565
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1583
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1566
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1584
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1567
                node->value[node->keys - 1] -= count + new_cnt;
1585
                node->value[node->keys - 1] -= count + new_cnt;
1568
                btree_insert(&a->used_space, page +
1586
                btree_insert(&a->used_space, page +
1569
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1587
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1570
                return 1;
1588
                return 1;
1571
            }
1589
            }
1572
        }
1590
        }
1573
        return 0;
1591
        return 0;
1574
    } else if (page < leaf->key[0]) {
1592
    } else if (page < leaf->key[0]) {
1575
        return 0;
1593
        return 0;
1576
    }
1594
    }
1577
   
1595
   
1578
    if (page > leaf->key[leaf->keys - 1]) {
1596
    if (page > leaf->key[leaf->keys - 1]) {
1579
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1597
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1580
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1598
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1581
 
1599
 
1582
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1600
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1583
            count * PAGE_SIZE)) {
1601
            count * PAGE_SIZE)) {
1584
            if (page + count * PAGE_SIZE ==
1602
            if (page + count * PAGE_SIZE ==
1585
                left_pg + left_cnt * PAGE_SIZE) {
1603
                left_pg + left_cnt * PAGE_SIZE) {
1586
                /*
1604
                /*
1587
                 * The interval is contained in the rightmost
1605
                 * The interval is contained in the rightmost
1588
                 * interval of the leaf and can be removed by
1606
                 * interval of the leaf and can be removed by
1589
                 * updating the size of the bigger interval.
1607
                 * updating the size of the bigger interval.
1590
                 */
1608
                 */
1591
                leaf->value[leaf->keys - 1] -= count;
1609
                leaf->value[leaf->keys - 1] -= count;
1592
                return 1;
1610
                return 1;
1593
            } else if (page + count * PAGE_SIZE < left_pg +
1611
            } else if (page + count * PAGE_SIZE < left_pg +
1594
                left_cnt * PAGE_SIZE) {
1612
                left_cnt * PAGE_SIZE) {
1595
                count_t new_cnt;
1613
                count_t new_cnt;
1596
               
1614
               
1597
                /*
1615
                /*
1598
                 * The interval is contained in the rightmost
1616
                 * The interval is contained in the rightmost
1599
                 * interval of the leaf but its removal
1617
                 * interval of the leaf but its removal
1600
                 * requires both updating the size of the
1618
                 * requires both updating the size of the
1601
                 * original interval and also inserting a new
1619
                 * original interval and also inserting a new
1602
                 * interval.
1620
                 * interval.
1603
                 */
1621
                 */
1604
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1622
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1605
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1623
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1606
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1624
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1607
                btree_insert(&a->used_space, page +
1625
                btree_insert(&a->used_space, page +
1608
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1626
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1609
                return 1;
1627
                return 1;
1610
            }
1628
            }
1611
        }
1629
        }
1612
        return 0;
1630
        return 0;
1613
    }  
1631
    }  
1614
   
1632
   
1615
    /*
1633
    /*
1616
     * The border cases have been already resolved.
1634
     * The border cases have been already resolved.
1617
     * Now the interval can be only between intervals of the leaf.
1635
     * Now the interval can be only between intervals of the leaf.
1618
     */
1636
     */
1619
    for (i = 1; i < leaf->keys - 1; i++) {
1637
    for (i = 1; i < leaf->keys - 1; i++) {
1620
        if (page < leaf->key[i]) {
1638
        if (page < leaf->key[i]) {
1621
            uintptr_t left_pg = leaf->key[i - 1];
1639
            uintptr_t left_pg = leaf->key[i - 1];
1622
            count_t left_cnt = (count_t) leaf->value[i - 1];
1640
            count_t left_cnt = (count_t) leaf->value[i - 1];
1623
 
1641
 
1624
            /*
1642
            /*
1625
             * Now the interval is between intervals corresponding
1643
             * Now the interval is between intervals corresponding
1626
             * to (i - 1) and i.
1644
             * to (i - 1) and i.
1627
             */
1645
             */
1628
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1646
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1629
                count * PAGE_SIZE)) {
1647
                count * PAGE_SIZE)) {
1630
                if (page + count * PAGE_SIZE ==
1648
                if (page + count * PAGE_SIZE ==
1631
                    left_pg + left_cnt*PAGE_SIZE) {
1649
                    left_pg + left_cnt*PAGE_SIZE) {
1632
                    /*
1650
                    /*
1633
                     * The interval is contained in the
1651
                     * The interval is contained in the
1634
                     * interval (i - 1) of the leaf and can
1652
                     * interval (i - 1) of the leaf and can
1635
                     * be removed by updating the size of
1653
                     * be removed by updating the size of
1636
                     * the bigger interval.
1654
                     * the bigger interval.
1637
                     */
1655
                     */
1638
                    leaf->value[i - 1] -= count;
1656
                    leaf->value[i - 1] -= count;
1639
                    return 1;
1657
                    return 1;
1640
                } else if (page + count * PAGE_SIZE <
1658
                } else if (page + count * PAGE_SIZE <
1641
                    left_pg + left_cnt * PAGE_SIZE) {
1659
                    left_pg + left_cnt * PAGE_SIZE) {
1642
                    count_t new_cnt;
1660
                    count_t new_cnt;
1643
               
1661
               
1644
                    /*
1662
                    /*
1645
                     * The interval is contained in the
1663
                     * The interval is contained in the
1646
                     * interval (i - 1) of the leaf but its
1664
                     * interval (i - 1) of the leaf but its
1647
                     * removal requires both updating the
1665
                     * removal requires both updating the
1648
                     * size of the original interval and
1666
                     * size of the original interval and
1649
                     * also inserting a new interval.
1667
                     * also inserting a new interval.
1650
                     */
1668
                     */
1651
                    new_cnt = ((left_pg +
1669
                    new_cnt = ((left_pg +
1652
                        left_cnt * PAGE_SIZE) -
1670
                        left_cnt * PAGE_SIZE) -
1653
                        (page + count * PAGE_SIZE)) >>
1671
                        (page + count * PAGE_SIZE)) >>
1654
                        PAGE_WIDTH;
1672
                        PAGE_WIDTH;
1655
                    leaf->value[i - 1] -= count + new_cnt;
1673
                    leaf->value[i - 1] -= count + new_cnt;
1656
                    btree_insert(&a->used_space, page +
1674
                    btree_insert(&a->used_space, page +
1657
                        count * PAGE_SIZE, (void *) new_cnt,
1675
                        count * PAGE_SIZE, (void *) new_cnt,
1658
                        leaf);
1676
                        leaf);
1659
                    return 1;
1677
                    return 1;
1660
                }
1678
                }
1661
            }
1679
            }
1662
            return 0;
1680
            return 0;
1663
        }
1681
        }
1664
    }
1682
    }
1665
 
1683
 
1666
error:
1684
error:
1667
    panic("Inconsistency detected while removing %d pages of used space "
1685
    panic("Inconsistency detected while removing %d pages of used space "
1668
        "from %p.\n", count, page);
1686
        "from %p.\n", count, page);
1669
}
1687
}
1670
 
1688
 
1671
/** Remove reference to address space area share info.
1689
/** Remove reference to address space area share info.
1672
 *
1690
 *
1673
 * If the reference count drops to 0, the sh_info is deallocated.
1691
 * If the reference count drops to 0, the sh_info is deallocated.
1674
 *
1692
 *
1675
 * @param sh_info Pointer to address space area share info.
1693
 * @param sh_info Pointer to address space area share info.
1676
 */
1694
 */
1677
void sh_info_remove_reference(share_info_t *sh_info)
1695
void sh_info_remove_reference(share_info_t *sh_info)
1678
{
1696
{
1679
    bool dealloc = false;
1697
    bool dealloc = false;
1680
 
1698
 
1681
    mutex_lock(&sh_info->lock);
1699
    mutex_lock(&sh_info->lock);
1682
    ASSERT(sh_info->refcount);
1700
    ASSERT(sh_info->refcount);
1683
    if (--sh_info->refcount == 0) {
1701
    if (--sh_info->refcount == 0) {
1684
        dealloc = true;
1702
        dealloc = true;
1685
        link_t *cur;
1703
        link_t *cur;
1686
       
1704
       
1687
        /*
1705
        /*
1688
         * Now walk carefully the pagemap B+tree and free/remove
1706
         * Now walk carefully the pagemap B+tree and free/remove
1689
         * reference from all frames found there.
1707
         * reference from all frames found there.
1690
         */
1708
         */
1691
        for (cur = sh_info->pagemap.leaf_head.next;
1709
        for (cur = sh_info->pagemap.leaf_head.next;
1692
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1710
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1693
            btree_node_t *node;
1711
            btree_node_t *node;
1694
            int i;
1712
            int i;
1695
           
1713
           
1696
            node = list_get_instance(cur, btree_node_t, leaf_link);
1714
            node = list_get_instance(cur, btree_node_t, leaf_link);
1697
            for (i = 0; i < node->keys; i++)
1715
            for (i = 0; i < node->keys; i++)
1698
                frame_free((uintptr_t) node->value[i]);
1716
                frame_free((uintptr_t) node->value[i]);
1699
        }
1717
        }
1700
       
1718
       
1701
    }
1719
    }
1702
    mutex_unlock(&sh_info->lock);
1720
    mutex_unlock(&sh_info->lock);
1703
   
1721
   
1704
    if (dealloc) {
1722
    if (dealloc) {
1705
        btree_destroy(&sh_info->pagemap);
1723
        btree_destroy(&sh_info->pagemap);
1706
        free(sh_info);
1724
        free(sh_info);
1707
    }
1725
    }
1708
}
1726
}
1709
 
1727
 
1710
/*
1728
/*
1711
 * Address space related syscalls.
1729
 * Address space related syscalls.
1712
 */
1730
 */
1713
 
1731
 
1714
/** Wrapper for as_area_create(). */
1732
/** Wrapper for as_area_create(). */
1715
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1733
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1716
{
1734
{
1717
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1735
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1718
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
1736
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
1719
        return (unative_t) address;
1737
        return (unative_t) address;
1720
    else
1738
    else
1721
        return (unative_t) -1;
1739
        return (unative_t) -1;
1722
}
1740
}
1723
 
1741
 
1724
/** Wrapper for as_area_resize(). */
1742
/** Wrapper for as_area_resize(). */
1725
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1743
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1726
{
1744
{
1727
    return (unative_t) as_area_resize(AS, address, size, 0);
1745
    return (unative_t) as_area_resize(AS, address, size, 0);
1728
}
1746
}
1729
 
1747
 
1730
/** Wrapper for as_area_destroy(). */
1748
/** Wrapper for as_area_destroy(). */
1731
unative_t sys_as_area_destroy(uintptr_t address)
1749
unative_t sys_as_area_destroy(uintptr_t address)
1732
{
1750
{
1733
    return (unative_t) as_area_destroy(AS, address);
1751
    return (unative_t) as_area_destroy(AS, address);
1734
}
1752
}
1735
 
1753
 
1736
/** Print out information about address space.
1754
/** Print out information about address space.
1737
 *
1755
 *
1738
 * @param as Address space.
1756
 * @param as Address space.
1739
 */
1757
 */
1740
void as_print(as_t *as)
1758
void as_print(as_t *as)
1741
{
1759
{
1742
    ipl_t ipl;
1760
    ipl_t ipl;
1743
   
1761
   
1744
    ipl = interrupts_disable();
1762
    ipl = interrupts_disable();
1745
    mutex_lock(&as->lock);
1763
    mutex_lock(&as->lock);
1746
   
1764
   
1747
    /* print out info about address space areas */
1765
    /* print out info about address space areas */
1748
    link_t *cur;
1766
    link_t *cur;
1749
    for (cur = as->as_area_btree.leaf_head.next;
1767
    for (cur = as->as_area_btree.leaf_head.next;
1750
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1768
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1751
        btree_node_t *node;
1769
        btree_node_t *node;
1752
       
1770
       
1753
        node = list_get_instance(cur, btree_node_t, leaf_link);
1771
        node = list_get_instance(cur, btree_node_t, leaf_link);
1754
       
1772
       
1755
        int i;
1773
        int i;
1756
        for (i = 0; i < node->keys; i++) {
1774
        for (i = 0; i < node->keys; i++) {
1757
            as_area_t *area = node->value[i];
1775
            as_area_t *area = node->value[i];
1758
       
1776
       
1759
            mutex_lock(&area->lock);
1777
            mutex_lock(&area->lock);
1760
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1778
            printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
1761
                area, area->base, area->pages, area->base,
1779
                area, area->base, area->pages, area->base,
1762
                area->base + area->pages*PAGE_SIZE);
1780
                area->base + area->pages*PAGE_SIZE);
1763
            mutex_unlock(&area->lock);
1781
            mutex_unlock(&area->lock);
1764
        }
1782
        }
1765
    }
1783
    }
1766
   
1784
   
1767
    mutex_unlock(&as->lock);
1785
    mutex_unlock(&as->lock);
1768
    interrupts_restore(ipl);
1786
    interrupts_restore(ipl);
1769
}
1787
}
1770
 
1788
 
1771
/** @}
1789
/** @}
1772
 */
1790
 */
1773
 
1791