Subversion Repositories HelenOS

Rev

Rev 3424 | Rev 3431 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3424 Rev 3425
1
/*
1
/*
2
 * Copyright (c) 2001-2006 Jakub Jermar
2
 * Copyright (c) 2001-2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericmm
29
/** @addtogroup genericmm
30
 * @{
30
 * @{
31
 */
31
 */
32
 
32
 
33
/**
33
/**
34
 * @file
34
 * @file
35
 * @brief   Address space related functions.
35
 * @brief   Address space related functions.
36
 *
36
 *
37
 * This file contains address space manipulation functions.
37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
39
 * Virtual Address Translation (VAT) subsystem.
40
 *
40
 *
41
 * Functionality provided by this file allows one to
41
 * Functionality provided by this file allows one to
42
 * create address spaces and create, resize and share
42
 * create address spaces and create, resize and share
43
 * address space areas.
43
 * address space areas.
44
 *
44
 *
45
 * @see page.c
45
 * @see page.c
46
 *
46
 *
47
 */
47
 */
48
 
48
 
49
#include <mm/as.h>
49
#include <mm/as.h>
50
#include <arch/mm/as.h>
50
#include <arch/mm/as.h>
51
#include <mm/page.h>
51
#include <mm/page.h>
52
#include <mm/frame.h>
52
#include <mm/frame.h>
53
#include <mm/slab.h>
53
#include <mm/slab.h>
54
#include <mm/tlb.h>
54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
56
#include <genarch/mm/page_pt.h>
57
#include <genarch/mm/page_ht.h>
57
#include <genarch/mm/page_ht.h>
58
#include <mm/asid.h>
58
#include <mm/asid.h>
59
#include <arch/mm/asid.h>
59
#include <arch/mm/asid.h>
60
#include <preemption.h>
60
#include <preemption.h>
61
#include <synch/spinlock.h>
61
#include <synch/spinlock.h>
62
#include <synch/mutex.h>
62
#include <synch/mutex.h>
63
#include <adt/list.h>
63
#include <adt/list.h>
64
#include <adt/btree.h>
64
#include <adt/btree.h>
65
#include <proc/task.h>
65
#include <proc/task.h>
66
#include <proc/thread.h>
66
#include <proc/thread.h>
67
#include <arch/asm.h>
67
#include <arch/asm.h>
68
#include <panic.h>
68
#include <panic.h>
69
#include <debug.h>
69
#include <debug.h>
70
#include <print.h>
70
#include <print.h>
71
#include <memstr.h>
71
#include <memstr.h>
72
#include <macros.h>
72
#include <macros.h>
73
#include <arch.h>
73
#include <arch.h>
74
#include <errno.h>
74
#include <errno.h>
75
#include <config.h>
75
#include <config.h>
76
#include <align.h>
76
#include <align.h>
77
#include <arch/types.h>
77
#include <arch/types.h>
78
#include <syscall/copy.h>
78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
79
#include <arch/interrupt.h>
80
 
80
 
81
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#ifdef CONFIG_VIRT_IDX_DCACHE
82
#include <arch/mm/cache.h>
82
#include <arch/mm/cache.h>
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
84
 
84
 
85
#ifndef __OBJC__
-
 
86
/**
85
/**
87
 * Each architecture decides what functions will be used to carry out
86
 * Each architecture decides what functions will be used to carry out
88
 * address space operations such as creating or locking page tables.
87
 * address space operations such as creating or locking page tables.
89
 */
88
 */
90
as_operations_t *as_operations = NULL;
89
as_operations_t *as_operations = NULL;
91
 
90
 
92
/**
91
/**
93
 * Slab for as_t objects.
92
 * Slab for as_t objects.
94
 */
93
 */
95
static slab_cache_t *as_slab;
94
static slab_cache_t *as_slab;
96
#endif
-
 
97
 
95
 
98
/**
96
/**
99
 * This lock serializes access to the ASID subsystem.
97
 * This lock serializes access to the ASID subsystem.
100
 * It protects:
98
 * It protects:
101
 * - inactive_as_with_asid_head list
99
 * - inactive_as_with_asid_head list
102
 * - as->asid for each as of the as_t type
100
 * - as->asid for each as of the as_t type
103
 * - asids_allocated counter
101
 * - asids_allocated counter
104
 */
102
 */
105
SPINLOCK_INITIALIZE(asidlock);
103
SPINLOCK_INITIALIZE(asidlock);
106
 
104
 
107
/**
105
/**
108
 * This list contains address spaces that are not active on any
106
 * This list contains address spaces that are not active on any
109
 * processor and that have valid ASID.
107
 * processor and that have valid ASID.
110
 */
108
 */
111
LIST_INITIALIZE(inactive_as_with_asid_head);
109
LIST_INITIALIZE(inactive_as_with_asid_head);
112
 
110
 
113
/** Kernel address space. */
111
/** Kernel address space. */
114
as_t *AS_KERNEL = NULL;
112
as_t *AS_KERNEL = NULL;
115
 
113
 
116
static int area_flags_to_page_flags(int aflags);
114
static int area_flags_to_page_flags(int);
117
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
115
static as_area_t *find_area_and_lock(as_t *, uintptr_t);
118
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
116
static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *);
119
    as_area_t *avoid_area);
-
 
120
static void sh_info_remove_reference(share_info_t *sh_info);
117
static void sh_info_remove_reference(share_info_t *);
121
 
118
 
122
#ifndef __OBJC__
-
 
123
static int as_constructor(void *obj, int flags)
119
static int as_constructor(void *obj, int flags)
124
{
120
{
125
    as_t *as = (as_t *) obj;
121
    as_t *as = (as_t *) obj;
126
    int rc;
122
    int rc;
127
 
123
 
128
    link_initialize(&as->inactive_as_with_asid_link);
124
    link_initialize(&as->inactive_as_with_asid_link);
129
    mutex_initialize(&as->lock);   
125
    mutex_initialize(&as->lock, MUTEX_PASSIVE);
130
   
126
   
131
    rc = as_constructor_arch(as, flags);
127
    rc = as_constructor_arch(as, flags);
132
   
128
   
133
    return rc;
129
    return rc;
134
}
130
}
135
 
131
 
136
static int as_destructor(void *obj)
132
static int as_destructor(void *obj)
137
{
133
{
138
    as_t *as = (as_t *) obj;
134
    as_t *as = (as_t *) obj;
139
 
135
 
140
    return as_destructor_arch(as);
136
    return as_destructor_arch(as);
141
}
137
}
142
#endif
-
 
143
 
138
 
144
/** Initialize address space subsystem. */
139
/** Initialize address space subsystem. */
145
void as_init(void)
140
void as_init(void)
146
{
141
{
147
    as_arch_init();
142
    as_arch_init();
148
 
143
 
149
#ifndef __OBJC__
-
 
150
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
144
    as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
151
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
145
        as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
152
#endif
-
 
153
   
146
   
154
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
147
    AS_KERNEL = as_create(FLAG_AS_KERNEL);
155
    if (!AS_KERNEL)
148
    if (!AS_KERNEL)
156
        panic("can't create kernel address space\n");
149
        panic("can't create kernel address space\n");
157
   
150
   
158
}
151
}
159
 
152
 
160
/** Create address space.
153
/** Create address space.
161
 *
154
 *
162
 * @param flags Flags that influence way in wich the address space is created.
155
 * @param flags     Flags that influence the way in wich the address space
-
 
156
 *          is created.
163
 */
157
 */
164
as_t *as_create(int flags)
158
as_t *as_create(int flags)
165
{
159
{
166
    as_t *as;
160
    as_t *as;
167
 
161
 
168
#ifdef __OBJC__
-
 
169
    as = [as_t new];
-
 
170
    link_initialize(&as->inactive_as_with_asid_link);
-
 
171
    mutex_initialize(&as->lock);   
-
 
172
    (void) as_constructor_arch(as, flags);
-
 
173
#else
-
 
174
    as = (as_t *) slab_alloc(as_slab, 0);
162
    as = (as_t *) slab_alloc(as_slab, 0);
175
#endif
-
 
176
    (void) as_create_arch(as, 0);
163
    (void) as_create_arch(as, 0);
177
   
164
   
178
    btree_create(&as->as_area_btree);
165
    btree_create(&as->as_area_btree);
179
   
166
   
180
    if (flags & FLAG_AS_KERNEL)
167
    if (flags & FLAG_AS_KERNEL)
181
        as->asid = ASID_KERNEL;
168
        as->asid = ASID_KERNEL;
182
    else
169
    else
183
        as->asid = ASID_INVALID;
170
        as->asid = ASID_INVALID;
184
   
171
   
185
    atomic_set(&as->refcount, 0);
172
    atomic_set(&as->refcount, 0);
186
    as->cpu_refcount = 0;
173
    as->cpu_refcount = 0;
187
#ifdef AS_PAGE_TABLE
174
#ifdef AS_PAGE_TABLE
188
    as->genarch.page_table = page_table_create(flags);
175
    as->genarch.page_table = page_table_create(flags);
189
#else
176
#else
190
    page_table_create(flags);
177
    page_table_create(flags);
191
#endif
178
#endif
192
 
179
 
193
    return as;
180
    return as;
194
}
181
}
195
 
182
 
196
/** Destroy adress space.
183
/** Destroy adress space.
197
 *
184
 *
198
 * When there are no tasks referencing this address space (i.e. its refcount is
185
 * When there are no tasks referencing this address space (i.e. its refcount is
199
 * zero), the address space can be destroyed.
186
 * zero), the address space can be destroyed.
200
 *
187
 *
201
 * We know that we don't hold any spinlock.
188
 * We know that we don't hold any spinlock.
-
 
189
 *
-
 
190
 * @param as        Address space to be destroyed.
202
 */
191
 */
203
void as_destroy(as_t *as)
192
void as_destroy(as_t *as)
204
{
193
{
205
    ipl_t ipl;
194
    ipl_t ipl;
206
    bool cond;
195
    bool cond;
207
    DEADLOCK_PROBE_INIT(p_asidlock);
196
    DEADLOCK_PROBE_INIT(p_asidlock);
208
 
197
 
209
    ASSERT(atomic_get(&as->refcount) == 0);
198
    ASSERT(atomic_get(&as->refcount) == 0);
210
   
199
   
211
    /*
200
    /*
212
     * Since there is no reference to this area,
201
     * Since there is no reference to this area,
213
     * it is safe not to lock its mutex.
202
     * it is safe not to lock its mutex.
214
     */
203
     */
215
 
204
 
216
    /*
205
    /*
217
     * We need to avoid deadlock between TLB shootdown and asidlock.
206
     * We need to avoid deadlock between TLB shootdown and asidlock.
218
     * We therefore try to take asid conditionally and if we don't succeed,
207
     * We therefore try to take asid conditionally and if we don't succeed,
219
     * we enable interrupts and try again. This is done while preemption is
208
     * we enable interrupts and try again. This is done while preemption is
220
     * disabled to prevent nested context switches. We also depend on the
209
     * disabled to prevent nested context switches. We also depend on the
221
     * fact that so far no spinlocks are held.
210
     * fact that so far no spinlocks are held.
222
     */
211
     */
223
    preemption_disable();
212
    preemption_disable();
224
    ipl = interrupts_read();
213
    ipl = interrupts_read();
225
retry:
214
retry:
226
    interrupts_disable();
215
    interrupts_disable();
227
    if (!spinlock_trylock(&asidlock)) {
216
    if (!spinlock_trylock(&asidlock)) {
228
        interrupts_enable();
217
        interrupts_enable();
229
        DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
218
        DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
230
        goto retry;
219
        goto retry;
231
    }
220
    }
232
    preemption_enable();    /* Interrupts disabled, enable preemption */
221
    preemption_enable();    /* Interrupts disabled, enable preemption */
233
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
222
    if (as->asid != ASID_INVALID && as != AS_KERNEL) {
234
        if (as != AS && as->cpu_refcount == 0)
223
        if (as != AS && as->cpu_refcount == 0)
235
            list_remove(&as->inactive_as_with_asid_link);
224
            list_remove(&as->inactive_as_with_asid_link);
236
        asid_put(as->asid);
225
        asid_put(as->asid);
237
    }
226
    }
238
    spinlock_unlock(&asidlock);
227
    spinlock_unlock(&asidlock);
239
 
228
 
240
    /*
229
    /*
241
     * Destroy address space areas of the address space.
230
     * Destroy address space areas of the address space.
242
     * The B+tree must be walked carefully because it is
231
     * The B+tree must be walked carefully because it is
243
     * also being destroyed.
232
     * also being destroyed.
244
     */
233
     */
245
    for (cond = true; cond; ) {
234
    for (cond = true; cond; ) {
246
        btree_node_t *node;
235
        btree_node_t *node;
247
 
236
 
248
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
237
        ASSERT(!list_empty(&as->as_area_btree.leaf_head));
249
        node = list_get_instance(as->as_area_btree.leaf_head.next,
238
        node = list_get_instance(as->as_area_btree.leaf_head.next,
250
            btree_node_t, leaf_link);
239
            btree_node_t, leaf_link);
251
 
240
 
252
        if ((cond = node->keys)) {
241
        if ((cond = node->keys)) {
253
            as_area_destroy(as, node->key[0]);
242
            as_area_destroy(as, node->key[0]);
254
        }
243
        }
255
    }
244
    }
256
 
245
 
257
    btree_destroy(&as->as_area_btree);
246
    btree_destroy(&as->as_area_btree);
258
#ifdef AS_PAGE_TABLE
247
#ifdef AS_PAGE_TABLE
259
    page_table_destroy(as->genarch.page_table);
248
    page_table_destroy(as->genarch.page_table);
260
#else
249
#else
261
    page_table_destroy(NULL);
250
    page_table_destroy(NULL);
262
#endif
251
#endif
263
 
252
 
264
    interrupts_restore(ipl);
253
    interrupts_restore(ipl);
265
 
254
 
266
#ifdef __OBJC__
-
 
267
    [as free];
-
 
268
#else
-
 
269
    slab_free(as_slab, as);
255
    slab_free(as_slab, as);
270
#endif
-
 
271
}
256
}
272
 
257
 
273
/** Create address space area of common attributes.
258
/** Create address space area of common attributes.
274
 *
259
 *
275
 * The created address space area is added to the target address space.
260
 * The created address space area is added to the target address space.
276
 *
261
 *
277
 * @param as Target address space.
262
 * @param as        Target address space.
278
 * @param flags Flags of the area memory.
263
 * @param flags     Flags of the area memory.
279
 * @param size Size of area.
264
 * @param size      Size of area.
280
 * @param base Base address of area.
265
 * @param base      Base address of area.
281
 * @param attrs Attributes of the area.
266
 * @param attrs     Attributes of the area.
282
 * @param backend Address space area backend. NULL if no backend is used.
267
 * @param backend   Address space area backend. NULL if no backend is used.
283
 * @param backend_data NULL or a pointer to an array holding two void *.
268
 * @param backend_data  NULL or a pointer to an array holding two void *.
284
 *
269
 *
285
 * @return Address space area on success or NULL on failure.
270
 * @return      Address space area on success or NULL on failure.
286
 */
271
 */
287
as_area_t *
272
as_area_t *
288
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
273
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
289
           mem_backend_t *backend, mem_backend_data_t *backend_data)
274
    mem_backend_t *backend, mem_backend_data_t *backend_data)
290
{
275
{
291
    ipl_t ipl;
276
    ipl_t ipl;
292
    as_area_t *a;
277
    as_area_t *a;
293
   
278
   
294
    if (base % PAGE_SIZE)
279
    if (base % PAGE_SIZE)
295
        return NULL;
280
        return NULL;
296
 
281
 
297
    if (!size)
282
    if (!size)
298
        return NULL;
283
        return NULL;
299
 
284
 
300
    /* Writeable executable areas are not supported. */
285
    /* Writeable executable areas are not supported. */
301
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
286
    if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
302
        return NULL;
287
        return NULL;
303
   
288
   
304
    ipl = interrupts_disable();
289
    ipl = interrupts_disable();
305
    mutex_lock(&as->lock);
290
    mutex_lock(&as->lock);
306
   
291
   
307
    if (!check_area_conflicts(as, base, size, NULL)) {
292
    if (!check_area_conflicts(as, base, size, NULL)) {
308
        mutex_unlock(&as->lock);
293
        mutex_unlock(&as->lock);
309
        interrupts_restore(ipl);
294
        interrupts_restore(ipl);
310
        return NULL;
295
        return NULL;
311
    }
296
    }
312
   
297
   
313
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
298
    a = (as_area_t *) malloc(sizeof(as_area_t), 0);
314
 
299
 
315
    mutex_initialize(&a->lock);
300
    mutex_initialize(&a->lock, MUTEX_PASSIVE);
316
   
301
   
317
    a->as = as;
302
    a->as = as;
318
    a->flags = flags;
303
    a->flags = flags;
319
    a->attributes = attrs;
304
    a->attributes = attrs;
320
    a->pages = SIZE2FRAMES(size);
305
    a->pages = SIZE2FRAMES(size);
321
    a->base = base;
306
    a->base = base;
322
    a->sh_info = NULL;
307
    a->sh_info = NULL;
323
    a->backend = backend;
308
    a->backend = backend;
324
    if (backend_data)
309
    if (backend_data)
325
        a->backend_data = *backend_data;
310
        a->backend_data = *backend_data;
326
    else
311
    else
327
        memsetb(&a->backend_data, sizeof(a->backend_data), 0);
312
        memsetb(&a->backend_data, sizeof(a->backend_data), 0);
328
 
313
 
329
    btree_create(&a->used_space);
314
    btree_create(&a->used_space);
330
   
315
   
331
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
316
    btree_insert(&as->as_area_btree, base, (void *) a, NULL);
332
 
317
 
333
    mutex_unlock(&as->lock);
318
    mutex_unlock(&as->lock);
334
    interrupts_restore(ipl);
319
    interrupts_restore(ipl);
335
 
320
 
336
    return a;
321
    return a;
337
}
322
}
338
 
323
 
339
/** Find address space area and change it.
324
/** Find address space area and change it.
340
 *
325
 *
341
 * @param as Address space.
326
 * @param as        Address space.
342
 * @param address Virtual address belonging to the area to be changed. Must be
327
 * @param address   Virtual address belonging to the area to be changed.
343
 *     page-aligned.
328
 *          Must be page-aligned.
344
 * @param size New size of the virtual memory block starting at address.
329
 * @param size      New size of the virtual memory block starting at
-
 
330
 *          address.
345
 * @param flags Flags influencing the remap operation. Currently unused.
331
 * @param flags     Flags influencing the remap operation. Currently unused.
346
 *
332
 *
347
 * @return Zero on success or a value from @ref errno.h otherwise.
333
 * @return      Zero on success or a value from @ref errno.h otherwise.
348
 */
334
 */
349
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
335
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
350
{
336
{
351
    as_area_t *area;
337
    as_area_t *area;
352
    ipl_t ipl;
338
    ipl_t ipl;
353
    size_t pages;
339
    size_t pages;
354
   
340
   
355
    ipl = interrupts_disable();
341
    ipl = interrupts_disable();
356
    mutex_lock(&as->lock);
342
    mutex_lock(&as->lock);
357
   
343
   
358
    /*
344
    /*
359
     * Locate the area.
345
     * Locate the area.
360
     */
346
     */
361
    area = find_area_and_lock(as, address);
347
    area = find_area_and_lock(as, address);
362
    if (!area) {
348
    if (!area) {
363
        mutex_unlock(&as->lock);
349
        mutex_unlock(&as->lock);
364
        interrupts_restore(ipl);
350
        interrupts_restore(ipl);
365
        return ENOENT;
351
        return ENOENT;
366
    }
352
    }
367
 
353
 
368
    if (area->backend == &phys_backend) {
354
    if (area->backend == &phys_backend) {
369
        /*
355
        /*
370
         * Remapping of address space areas associated
356
         * Remapping of address space areas associated
371
         * with memory mapped devices is not supported.
357
         * with memory mapped devices is not supported.
372
         */
358
         */
373
        mutex_unlock(&area->lock);
359
        mutex_unlock(&area->lock);
374
        mutex_unlock(&as->lock);
360
        mutex_unlock(&as->lock);
375
        interrupts_restore(ipl);
361
        interrupts_restore(ipl);
376
        return ENOTSUP;
362
        return ENOTSUP;
377
    }
363
    }
378
    if (area->sh_info) {
364
    if (area->sh_info) {
379
        /*
365
        /*
380
         * Remapping of shared address space areas
366
         * Remapping of shared address space areas
381
         * is not supported.
367
         * is not supported.
382
         */
368
         */
383
        mutex_unlock(&area->lock);
369
        mutex_unlock(&area->lock);
384
        mutex_unlock(&as->lock);
370
        mutex_unlock(&as->lock);
385
        interrupts_restore(ipl);
371
        interrupts_restore(ipl);
386
        return ENOTSUP;
372
        return ENOTSUP;
387
    }
373
    }
388
 
374
 
389
    pages = SIZE2FRAMES((address - area->base) + size);
375
    pages = SIZE2FRAMES((address - area->base) + size);
390
    if (!pages) {
376
    if (!pages) {
391
        /*
377
        /*
392
         * Zero size address space areas are not allowed.
378
         * Zero size address space areas are not allowed.
393
         */
379
         */
394
        mutex_unlock(&area->lock);
380
        mutex_unlock(&area->lock);
395
        mutex_unlock(&as->lock);
381
        mutex_unlock(&as->lock);
396
        interrupts_restore(ipl);
382
        interrupts_restore(ipl);
397
        return EPERM;
383
        return EPERM;
398
    }
384
    }
399
   
385
   
400
    if (pages < area->pages) {
386
    if (pages < area->pages) {
401
        bool cond;
387
        bool cond;
402
        uintptr_t start_free = area->base + pages*PAGE_SIZE;
388
        uintptr_t start_free = area->base + pages * PAGE_SIZE;
403
 
389
 
404
        /*
390
        /*
405
         * Shrinking the area.
391
         * Shrinking the area.
406
         * No need to check for overlaps.
392
         * No need to check for overlaps.
407
         */
393
         */
408
 
394
 
409
        /*
395
        /*
410
         * Start TLB shootdown sequence.
396
         * Start TLB shootdown sequence.
411
         */
397
         */
412
        tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
398
        tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +
413
            pages * PAGE_SIZE, area->pages - pages);
399
            pages * PAGE_SIZE, area->pages - pages);
414
 
400
 
415
        /*
401
        /*
416
         * Remove frames belonging to used space starting from
402
         * Remove frames belonging to used space starting from
417
         * the highest addresses downwards until an overlap with
403
         * the highest addresses downwards until an overlap with
418
         * the resized address space area is found. Note that this
404
         * the resized address space area is found. Note that this
419
         * is also the right way to remove part of the used_space
405
         * is also the right way to remove part of the used_space
420
         * B+tree leaf list.
406
         * B+tree leaf list.
421
         */    
407
         */    
422
        for (cond = true; cond;) {
408
        for (cond = true; cond;) {
423
            btree_node_t *node;
409
            btree_node_t *node;
424
       
410
       
425
            ASSERT(!list_empty(&area->used_space.leaf_head));
411
            ASSERT(!list_empty(&area->used_space.leaf_head));
426
            node =
412
            node =
427
                list_get_instance(area->used_space.leaf_head.prev,
413
                list_get_instance(area->used_space.leaf_head.prev,
428
                btree_node_t, leaf_link);
414
                btree_node_t, leaf_link);
429
            if ((cond = (bool) node->keys)) {
415
            if ((cond = (bool) node->keys)) {
430
                uintptr_t b = node->key[node->keys - 1];
416
                uintptr_t b = node->key[node->keys - 1];
431
                count_t c =
417
                count_t c =
432
                    (count_t) node->value[node->keys - 1];
418
                    (count_t) node->value[node->keys - 1];
433
                unsigned int i = 0;
419
                unsigned int i = 0;
434
           
420
           
435
                if (overlaps(b, c * PAGE_SIZE, area->base,
421
                if (overlaps(b, c * PAGE_SIZE, area->base,
436
                    pages * PAGE_SIZE)) {
422
                    pages * PAGE_SIZE)) {
437
                   
423
                   
438
                    if (b + c * PAGE_SIZE <= start_free) {
424
                    if (b + c * PAGE_SIZE <= start_free) {
439
                        /*
425
                        /*
440
                         * The whole interval fits
426
                         * The whole interval fits
441
                         * completely in the resized
427
                         * completely in the resized
442
                         * address space area.
428
                         * address space area.
443
                         */
429
                         */
444
                        break;
430
                        break;
445
                    }
431
                    }
446
       
432
       
447
                    /*
433
                    /*
448
                     * Part of the interval corresponding
434
                     * Part of the interval corresponding
449
                     * to b and c overlaps with the resized
435
                     * to b and c overlaps with the resized
450
                     * address space area.
436
                     * address space area.
451
                     */
437
                     */
452
       
438
       
453
                    cond = false;   /* we are almost done */
439
                    cond = false;   /* we are almost done */
454
                    i = (start_free - b) >> PAGE_WIDTH;
440
                    i = (start_free - b) >> PAGE_WIDTH;
455
                    if (!used_space_remove(area, start_free, c - i))
441
                    if (!used_space_remove(area, start_free,
-
 
442
                        c - i))
456
                        panic("Could not remove used space.\n");
443
                        panic("Could not remove used "
-
 
444
                            "space.\n");
457
                } else {
445
                } else {
458
                    /*
446
                    /*
459
                     * The interval of used space can be
447
                     * The interval of used space can be
460
                     * completely removed.
448
                     * completely removed.
461
                     */
449
                     */
462
                    if (!used_space_remove(area, b, c))
450
                    if (!used_space_remove(area, b, c))
463
                        panic("Could not remove used space.\n");
451
                        panic("Could not remove used "
-
 
452
                            "space.\n");
464
                }
453
                }
465
           
454
           
466
                for (; i < c; i++) {
455
                for (; i < c; i++) {
467
                    pte_t *pte;
456
                    pte_t *pte;
468
           
457
           
469
                    page_table_lock(as, false);
458
                    page_table_lock(as, false);
470
                    pte = page_mapping_find(as, b +
459
                    pte = page_mapping_find(as, b +
471
                        i * PAGE_SIZE);
460
                        i * PAGE_SIZE);
472
                    ASSERT(pte && PTE_VALID(pte) &&
461
                    ASSERT(pte && PTE_VALID(pte) &&
473
                        PTE_PRESENT(pte));
462
                        PTE_PRESENT(pte));
474
                    if (area->backend &&
463
                    if (area->backend &&
475
                        area->backend->frame_free) {
464
                        area->backend->frame_free) {
476
                        area->backend->frame_free(area,
465
                        area->backend->frame_free(area,
477
                            b + i * PAGE_SIZE,
466
                            b + i * PAGE_SIZE,
478
                            PTE_GET_FRAME(pte));
467
                            PTE_GET_FRAME(pte));
479
                    }
468
                    }
480
                    page_mapping_remove(as, b +
469
                    page_mapping_remove(as, b +
481
                        i * PAGE_SIZE);
470
                        i * PAGE_SIZE);
482
                    page_table_unlock(as, false);
471
                    page_table_unlock(as, false);
483
                }
472
                }
484
            }
473
            }
485
        }
474
        }
486
 
475
 
487
        /*
476
        /*
488
         * Finish TLB shootdown sequence.
477
         * Finish TLB shootdown sequence.
489
         */
478
         */
490
 
479
 
491
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
480
        tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
492
            area->pages - pages);
481
            area->pages - pages);
493
        /*
482
        /*
494
         * Invalidate software translation caches (e.g. TSB on sparc64).
483
         * Invalidate software translation caches (e.g. TSB on sparc64).
495
         */
484
         */
496
        as_invalidate_translation_cache(as, area->base +
485
        as_invalidate_translation_cache(as, area->base +
497
            pages * PAGE_SIZE, area->pages - pages);
486
            pages * PAGE_SIZE, area->pages - pages);
498
        tlb_shootdown_finalize();
487
        tlb_shootdown_finalize();
499
       
488
       
500
    } else {
489
    } else {
501
        /*
490
        /*
502
         * Growing the area.
491
         * Growing the area.
503
         * Check for overlaps with other address space areas.
492
         * Check for overlaps with other address space areas.
504
         */
493
         */
505
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
494
        if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
506
            area)) {
495
            area)) {
507
            mutex_unlock(&area->lock);
496
            mutex_unlock(&area->lock);
508
            mutex_unlock(&as->lock);       
497
            mutex_unlock(&as->lock);       
509
            interrupts_restore(ipl);
498
            interrupts_restore(ipl);
510
            return EADDRNOTAVAIL;
499
            return EADDRNOTAVAIL;
511
        }
500
        }
512
    }
501
    }
513
 
502
 
514
    area->pages = pages;
503
    area->pages = pages;
515
   
504
   
516
    mutex_unlock(&area->lock);
505
    mutex_unlock(&area->lock);
517
    mutex_unlock(&as->lock);
506
    mutex_unlock(&as->lock);
518
    interrupts_restore(ipl);
507
    interrupts_restore(ipl);
519
 
508
 
520
    return 0;
509
    return 0;
521
}
510
}
522
 
511
 
523
/** Destroy address space area.
512
/** Destroy address space area.
524
 *
513
 *
525
 * @param as Address space.
514
 * @param as        Address space.
526
 * @param address Address withing the area to be deleted.
515
 * @param address   Address within the area to be deleted.
527
 *
516
 *
528
 * @return Zero on success or a value from @ref errno.h on failure.
517
 * @return      Zero on success or a value from @ref errno.h on failure.
529
 */
518
 */
530
int as_area_destroy(as_t *as, uintptr_t address)
519
int as_area_destroy(as_t *as, uintptr_t address)
531
{
520
{
532
    as_area_t *area;
521
    as_area_t *area;
533
    uintptr_t base;
522
    uintptr_t base;
534
    link_t *cur;
523
    link_t *cur;
535
    ipl_t ipl;
524
    ipl_t ipl;
536
 
525
 
537
    ipl = interrupts_disable();
526
    ipl = interrupts_disable();
538
    mutex_lock(&as->lock);
527
    mutex_lock(&as->lock);
539
 
528
 
540
    area = find_area_and_lock(as, address);
529
    area = find_area_and_lock(as, address);
541
    if (!area) {
530
    if (!area) {
542
        mutex_unlock(&as->lock);
531
        mutex_unlock(&as->lock);
543
        interrupts_restore(ipl);
532
        interrupts_restore(ipl);
544
        return ENOENT;
533
        return ENOENT;
545
    }
534
    }
546
 
535
 
547
    base = area->base;
536
    base = area->base;
548
 
537
 
549
    /*
538
    /*
550
     * Start TLB shootdown sequence.
539
     * Start TLB shootdown sequence.
551
     */
540
     */
552
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
541
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
553
 
542
 
554
    /*
543
    /*
555
     * Visit only the pages mapped by used_space B+tree.
544
     * Visit only the pages mapped by used_space B+tree.
556
     */
545
     */
557
    for (cur = area->used_space.leaf_head.next;
546
    for (cur = area->used_space.leaf_head.next;
558
        cur != &area->used_space.leaf_head; cur = cur->next) {
547
        cur != &area->used_space.leaf_head; cur = cur->next) {
559
        btree_node_t *node;
548
        btree_node_t *node;
560
        unsigned int i;
549
        unsigned int i;
561
       
550
       
562
        node = list_get_instance(cur, btree_node_t, leaf_link);
551
        node = list_get_instance(cur, btree_node_t, leaf_link);
563
        for (i = 0; i < node->keys; i++) {
552
        for (i = 0; i < node->keys; i++) {
564
            uintptr_t b = node->key[i];
553
            uintptr_t b = node->key[i];
565
            count_t j;
554
            count_t j;
566
            pte_t *pte;
555
            pte_t *pte;
567
           
556
           
568
            for (j = 0; j < (count_t) node->value[i]; j++) {
557
            for (j = 0; j < (count_t) node->value[i]; j++) {
569
                page_table_lock(as, false);
558
                page_table_lock(as, false);
570
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
559
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
571
                ASSERT(pte && PTE_VALID(pte) &&
560
                ASSERT(pte && PTE_VALID(pte) &&
572
                    PTE_PRESENT(pte));
561
                    PTE_PRESENT(pte));
573
                if (area->backend &&
562
                if (area->backend &&
574
                    area->backend->frame_free) {
563
                    area->backend->frame_free) {
575
                    area->backend->frame_free(area, b +
564
                    area->backend->frame_free(area, b +
576
                        j * PAGE_SIZE, PTE_GET_FRAME(pte));
565
                        j * PAGE_SIZE, PTE_GET_FRAME(pte));
577
                }
566
                }
578
                page_mapping_remove(as, b + j * PAGE_SIZE);            
567
                page_mapping_remove(as, b + j * PAGE_SIZE);            
579
                page_table_unlock(as, false);
568
                page_table_unlock(as, false);
580
            }
569
            }
581
        }
570
        }
582
    }
571
    }
583
 
572
 
584
    /*
573
    /*
585
     * Finish TLB shootdown sequence.
574
     * Finish TLB shootdown sequence.
586
     */
575
     */
587
 
576
 
588
    tlb_invalidate_pages(as->asid, area->base, area->pages);
577
    tlb_invalidate_pages(as->asid, area->base, area->pages);
589
    /*
578
    /*
590
     * Invalidate potential software translation caches (e.g. TSB on
579
     * Invalidate potential software translation caches (e.g. TSB on
591
     * sparc64).
580
     * sparc64).
592
     */
581
     */
593
    as_invalidate_translation_cache(as, area->base, area->pages);
582
    as_invalidate_translation_cache(as, area->base, area->pages);
594
    tlb_shootdown_finalize();
583
    tlb_shootdown_finalize();
595
   
584
   
596
    btree_destroy(&area->used_space);
585
    btree_destroy(&area->used_space);
597
 
586
 
598
    area->attributes |= AS_AREA_ATTR_PARTIAL;
587
    area->attributes |= AS_AREA_ATTR_PARTIAL;
599
   
588
   
600
    if (area->sh_info)
589
    if (area->sh_info)
601
        sh_info_remove_reference(area->sh_info);
590
        sh_info_remove_reference(area->sh_info);
602
       
591
       
603
    mutex_unlock(&area->lock);
592
    mutex_unlock(&area->lock);
604
 
593
 
605
    /*
594
    /*
606
     * Remove the empty area from address space.
595
     * Remove the empty area from address space.
607
     */
596
     */
608
    btree_remove(&as->as_area_btree, base, NULL);
597
    btree_remove(&as->as_area_btree, base, NULL);
609
   
598
   
610
    free(area);
599
    free(area);
611
   
600
   
612
    mutex_unlock(&as->lock);
601
    mutex_unlock(&as->lock);
613
    interrupts_restore(ipl);
602
    interrupts_restore(ipl);
614
    return 0;
603
    return 0;
615
}
604
}
616
 
605
 
617
/** Share address space area with another or the same address space.
606
/** Share address space area with another or the same address space.
618
 *
607
 *
619
 * Address space area mapping is shared with a new address space area.
608
 * Address space area mapping is shared with a new address space area.
620
 * If the source address space area has not been shared so far,
609
 * If the source address space area has not been shared so far,
621
 * a new sh_info is created. The new address space area simply gets the
610
 * a new sh_info is created. The new address space area simply gets the
622
 * sh_info of the source area. The process of duplicating the
611
 * sh_info of the source area. The process of duplicating the
623
 * mapping is done through the backend share function.
612
 * mapping is done through the backend share function.
624
 *
613
 *
625
 * @param src_as Pointer to source address space.
614
 * @param src_as    Pointer to source address space.
626
 * @param src_base Base address of the source address space area.
615
 * @param src_base  Base address of the source address space area.
627
 * @param acc_size Expected size of the source area.
616
 * @param acc_size  Expected size of the source area.
628
 * @param dst_as Pointer to destination address space.
617
 * @param dst_as    Pointer to destination address space.
629
 * @param dst_base Target base address.
618
 * @param dst_base  Target base address.
630
 * @param dst_flags_mask Destination address space area flags mask.
619
 * @param dst_flags_mask Destination address space area flags mask.
631
 *
620
 *
632
 * @return Zero on success or ENOENT if there is no such task or if there is no
621
 * @return      Zero on success or ENOENT if there is no such task or if
633
 * such address space area, EPERM if there was a problem in accepting the area
622
 *          there is no such address space area, EPERM if there was
-
 
623
 *          a problem in accepting the area or ENOMEM if there was a
634
 * or ENOMEM if there was a problem in allocating destination address space
624
 *          problem in allocating destination address space area.
635
 * area. ENOTSUP is returned if the address space area backend does not support
625
 *          ENOTSUP is returned if the address space area backend
636
 * sharing.
626
 *          does not support sharing.
637
 */
627
 */
638
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
628
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
639
    as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
629
    as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
640
{
630
{
641
    ipl_t ipl;
631
    ipl_t ipl;
642
    int src_flags;
632
    int src_flags;
643
    size_t src_size;
633
    size_t src_size;
644
    as_area_t *src_area, *dst_area;
634
    as_area_t *src_area, *dst_area;
645
    share_info_t *sh_info;
635
    share_info_t *sh_info;
646
    mem_backend_t *src_backend;
636
    mem_backend_t *src_backend;
647
    mem_backend_data_t src_backend_data;
637
    mem_backend_data_t src_backend_data;
648
   
638
   
649
    ipl = interrupts_disable();
639
    ipl = interrupts_disable();
650
    mutex_lock(&src_as->lock);
640
    mutex_lock(&src_as->lock);
651
    src_area = find_area_and_lock(src_as, src_base);
641
    src_area = find_area_and_lock(src_as, src_base);
652
    if (!src_area) {
642
    if (!src_area) {
653
        /*
643
        /*
654
         * Could not find the source address space area.
644
         * Could not find the source address space area.
655
         */
645
         */
656
        mutex_unlock(&src_as->lock);
646
        mutex_unlock(&src_as->lock);
657
        interrupts_restore(ipl);
647
        interrupts_restore(ipl);
658
        return ENOENT;
648
        return ENOENT;
659
    }
649
    }
660
 
650
 
661
    if (!src_area->backend || !src_area->backend->share) {
651
    if (!src_area->backend || !src_area->backend->share) {
662
        /*
652
        /*
663
         * There is no backend or the backend does not
653
         * There is no backend or the backend does not
664
         * know how to share the area.
654
         * know how to share the area.
665
         */
655
         */
666
        mutex_unlock(&src_area->lock);
656
        mutex_unlock(&src_area->lock);
667
        mutex_unlock(&src_as->lock);
657
        mutex_unlock(&src_as->lock);
668
        interrupts_restore(ipl);
658
        interrupts_restore(ipl);
669
        return ENOTSUP;
659
        return ENOTSUP;
670
    }
660
    }
671
   
661
   
672
    src_size = src_area->pages * PAGE_SIZE;
662
    src_size = src_area->pages * PAGE_SIZE;
673
    src_flags = src_area->flags;
663
    src_flags = src_area->flags;
674
    src_backend = src_area->backend;
664
    src_backend = src_area->backend;
675
    src_backend_data = src_area->backend_data;
665
    src_backend_data = src_area->backend_data;
676
 
666
 
677
    /* Share the cacheable flag from the original mapping */
667
    /* Share the cacheable flag from the original mapping */
678
    if (src_flags & AS_AREA_CACHEABLE)
668
    if (src_flags & AS_AREA_CACHEABLE)
679
        dst_flags_mask |= AS_AREA_CACHEABLE;
669
        dst_flags_mask |= AS_AREA_CACHEABLE;
680
 
670
 
681
    if (src_size != acc_size ||
671
    if (src_size != acc_size ||
682
        (src_flags & dst_flags_mask) != dst_flags_mask) {
672
        (src_flags & dst_flags_mask) != dst_flags_mask) {
683
        mutex_unlock(&src_area->lock);
673
        mutex_unlock(&src_area->lock);
684
        mutex_unlock(&src_as->lock);
674
        mutex_unlock(&src_as->lock);
685
        interrupts_restore(ipl);
675
        interrupts_restore(ipl);
686
        return EPERM;
676
        return EPERM;
687
    }
677
    }
688
 
678
 
689
    /*
679
    /*
690
     * Now we are committed to sharing the area.
680
     * Now we are committed to sharing the area.
691
     * First, prepare the area for sharing.
681
     * First, prepare the area for sharing.
692
     * Then it will be safe to unlock it.
682
     * Then it will be safe to unlock it.
693
     */
683
     */
694
    sh_info = src_area->sh_info;
684
    sh_info = src_area->sh_info;
695
    if (!sh_info) {
685
    if (!sh_info) {
696
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
686
        sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
697
        mutex_initialize(&sh_info->lock);
687
        mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
698
        sh_info->refcount = 2;
688
        sh_info->refcount = 2;
699
        btree_create(&sh_info->pagemap);
689
        btree_create(&sh_info->pagemap);
700
        src_area->sh_info = sh_info;
690
        src_area->sh_info = sh_info;
701
        /*
691
        /*
702
         * Call the backend to setup sharing.
692
         * Call the backend to setup sharing.
703
         */
693
         */
704
        src_area->backend->share(src_area);
694
        src_area->backend->share(src_area);
705
    } else {
695
    } else {
706
        mutex_lock(&sh_info->lock);
696
        mutex_lock(&sh_info->lock);
707
        sh_info->refcount++;
697
        sh_info->refcount++;
708
        mutex_unlock(&sh_info->lock);
698
        mutex_unlock(&sh_info->lock);
709
    }
699
    }
710
 
700
 
711
    mutex_unlock(&src_area->lock);
701
    mutex_unlock(&src_area->lock);
712
    mutex_unlock(&src_as->lock);
702
    mutex_unlock(&src_as->lock);
713
 
703
 
714
    /*
704
    /*
715
     * Create copy of the source address space area.
705
     * Create copy of the source address space area.
716
     * The destination area is created with AS_AREA_ATTR_PARTIAL
706
     * The destination area is created with AS_AREA_ATTR_PARTIAL
717
     * attribute set which prevents race condition with
707
     * attribute set which prevents race condition with
718
     * preliminary as_page_fault() calls.
708
     * preliminary as_page_fault() calls.
719
     * The flags of the source area are masked against dst_flags_mask
709
     * The flags of the source area are masked against dst_flags_mask
720
     * to support sharing in less privileged mode.
710
     * to support sharing in less privileged mode.
721
     */
711
     */
722
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
712
    dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
723
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
713
        AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
724
    if (!dst_area) {
714
    if (!dst_area) {
725
        /*
715
        /*
726
         * Destination address space area could not be created.
716
         * Destination address space area could not be created.
727
         */
717
         */
728
        sh_info_remove_reference(sh_info);
718
        sh_info_remove_reference(sh_info);
729
       
719
       
730
        interrupts_restore(ipl);
720
        interrupts_restore(ipl);
731
        return ENOMEM;
721
        return ENOMEM;
732
    }
722
    }
733
 
723
 
734
    /*
724
    /*
735
     * Now the destination address space area has been
725
     * Now the destination address space area has been
736
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
726
     * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
737
     * attribute and set the sh_info.
727
     * attribute and set the sh_info.
738
     */
728
     */
739
    mutex_lock(&dst_as->lock); 
729
    mutex_lock(&dst_as->lock); 
740
    mutex_lock(&dst_area->lock);
730
    mutex_lock(&dst_area->lock);
741
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
731
    dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
742
    dst_area->sh_info = sh_info;
732
    dst_area->sh_info = sh_info;
743
    mutex_unlock(&dst_area->lock);
733
    mutex_unlock(&dst_area->lock);
744
    mutex_unlock(&dst_as->lock);   
734
    mutex_unlock(&dst_as->lock);   
745
 
735
 
746
    interrupts_restore(ipl);
736
    interrupts_restore(ipl);
747
   
737
   
748
    return 0;
738
    return 0;
749
}
739
}
750
 
740
 
751
/** Check access mode for address space area.
741
/** Check access mode for address space area.
752
 *
742
 *
753
 * The address space area must be locked prior to this call.
743
 * The address space area must be locked prior to this call.
754
 *
744
 *
755
 * @param area Address space area.
745
 * @param area      Address space area.
756
 * @param access Access mode.
746
 * @param access    Access mode.
757
 *
747
 *
758
 * @return False if access violates area's permissions, true otherwise.
748
 * @return      False if access violates area's permissions, true
-
 
749
 *          otherwise.
759
 */
750
 */
760
bool as_area_check_access(as_area_t *area, pf_access_t access)
751
bool as_area_check_access(as_area_t *area, pf_access_t access)
761
{
752
{
762
    int flagmap[] = {
753
    int flagmap[] = {
763
        [PF_ACCESS_READ] = AS_AREA_READ,
754
        [PF_ACCESS_READ] = AS_AREA_READ,
764
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
755
        [PF_ACCESS_WRITE] = AS_AREA_WRITE,
765
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
756
        [PF_ACCESS_EXEC] = AS_AREA_EXEC
766
    };
757
    };
767
 
758
 
768
    if (!(area->flags & flagmap[access]))
759
    if (!(area->flags & flagmap[access]))
769
        return false;
760
        return false;
770
   
761
   
771
    return true;
762
    return true;
772
}
763
}
773
 
764
 
-
 
765
/** Change adress space area flags.
-
 
766
 *
-
 
767
 * The idea is to have the same data, but with a different access mode.
-
 
768
 * This is needed e.g. for writing code into memory and then executing it.
-
 
769
 * In order for this to work properly, this may copy the data
-
 
770
 * into private anonymous memory (unless it's already there).
-
 
771
 *
-
 
772
 * @param as        Address space.
-
 
773
 * @param flags     Flags of the area memory.
-
 
774
 * @param address   Address withing the area to be changed.
-
 
775
 *
-
 
776
 * @return      Zero on success or a value from @ref errno.h on failure.
-
 
777
 */
-
 
778
int as_area_change_flags(as_t *as, int flags, uintptr_t address)
-
 
779
{
-
 
780
    as_area_t *area;
-
 
781
    uintptr_t base;
-
 
782
    link_t *cur;
-
 
783
    ipl_t ipl;
-
 
784
    int page_flags;
-
 
785
    uintptr_t *old_frame;
-
 
786
    index_t frame_idx;
-
 
787
    count_t used_pages;
-
 
788
 
-
 
789
    /* Flags for the new memory mapping */
-
 
790
    page_flags = area_flags_to_page_flags(flags);
-
 
791
 
-
 
792
    ipl = interrupts_disable();
-
 
793
    mutex_lock(&as->lock);
-
 
794
 
-
 
795
    area = find_area_and_lock(as, address);
-
 
796
    if (!area) {
-
 
797
        mutex_unlock(&as->lock);
-
 
798
        interrupts_restore(ipl);
-
 
799
        return ENOENT;
-
 
800
    }
-
 
801
 
-
 
802
    if (area->sh_info || area->backend != &anon_backend) {
-
 
803
        /* Copying shared areas not supported yet */
-
 
804
        /* Copying non-anonymous memory not supported yet */
-
 
805
        mutex_unlock(&area->lock);
-
 
806
        mutex_unlock(&as->lock);
-
 
807
        interrupts_restore(ipl);
-
 
808
        return ENOTSUP;
-
 
809
    }
-
 
810
 
-
 
811
    base = area->base;
-
 
812
 
-
 
813
    /*
-
 
814
     * Compute total number of used pages in the used_space B+tree
-
 
815
     */
-
 
816
    used_pages = 0;
-
 
817
 
-
 
818
    for (cur = area->used_space.leaf_head.next;
-
 
819
        cur != &area->used_space.leaf_head; cur = cur->next) {
-
 
820
        btree_node_t *node;
-
 
821
        unsigned int i;
-
 
822
       
-
 
823
        node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
824
        for (i = 0; i < node->keys; i++) {
-
 
825
            used_pages += (count_t) node->value[i];
-
 
826
        }
-
 
827
    }
-
 
828
 
-
 
829
    /* An array for storing frame numbers */
-
 
830
    old_frame = malloc(used_pages * sizeof(uintptr_t), 0);
-
 
831
 
-
 
832
    /*
-
 
833
     * Start TLB shootdown sequence.
-
 
834
     */
-
 
835
    tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
-
 
836
 
-
 
837
    /*
-
 
838
     * Remove used pages from page tables and remember their frame
-
 
839
     * numbers.
-
 
840
     */
-
 
841
    frame_idx = 0;
-
 
842
 
-
 
843
    for (cur = area->used_space.leaf_head.next;
-
 
844
        cur != &area->used_space.leaf_head; cur = cur->next) {
-
 
845
        btree_node_t *node;
-
 
846
        unsigned int i;
-
 
847
       
-
 
848
        node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
849
        for (i = 0; i < node->keys; i++) {
-
 
850
            uintptr_t b = node->key[i];
-
 
851
            count_t j;
-
 
852
            pte_t *pte;
-
 
853
           
-
 
854
            for (j = 0; j < (count_t) node->value[i]; j++) {
-
 
855
                page_table_lock(as, false);
-
 
856
                pte = page_mapping_find(as, b + j * PAGE_SIZE);
-
 
857
                ASSERT(pte && PTE_VALID(pte) &&
-
 
858
                    PTE_PRESENT(pte));
-
 
859
                old_frame[frame_idx++] = PTE_GET_FRAME(pte);
-
 
860
 
-
 
861
                /* Remove old mapping */
-
 
862
                page_mapping_remove(as, b + j * PAGE_SIZE);
-
 
863
                page_table_unlock(as, false);
-
 
864
            }
-
 
865
        }
-
 
866
    }
-
 
867
 
-
 
868
    /*
-
 
869
     * Finish TLB shootdown sequence.
-
 
870
     */
-
 
871
 
-
 
872
    tlb_invalidate_pages(as->asid, area->base, area->pages);
-
 
873
    /*
-
 
874
     * Invalidate potential software translation caches (e.g. TSB on
-
 
875
     * sparc64).
-
 
876
     */
-
 
877
    as_invalidate_translation_cache(as, area->base, area->pages);
-
 
878
    tlb_shootdown_finalize();
-
 
879
 
-
 
880
    /*
-
 
881
     * Set the new flags.
-
 
882
     */
-
 
883
    area->flags = flags;
-
 
884
 
-
 
885
    /*
-
 
886
     * Map pages back in with new flags. This step is kept separate
-
 
887
     * so that the memory area could not be accesed with both the old and
-
 
888
     * the new flags at once.
-
 
889
     */
-
 
890
    frame_idx = 0;
-
 
891
 
-
 
892
    for (cur = area->used_space.leaf_head.next;
-
 
893
        cur != &area->used_space.leaf_head; cur = cur->next) {
-
 
894
        btree_node_t *node;
-
 
895
        unsigned int i;
-
 
896
       
-
 
897
        node = list_get_instance(cur, btree_node_t, leaf_link);
-
 
898
        for (i = 0; i < node->keys; i++) {
-
 
899
            uintptr_t b = node->key[i];
-
 
900
            count_t j;
-
 
901
           
-
 
902
            for (j = 0; j < (count_t) node->value[i]; j++) {
-
 
903
                page_table_lock(as, false);
-
 
904
 
-
 
905
                /* Insert the new mapping */
-
 
906
                page_mapping_insert(as, b + j * PAGE_SIZE,
-
 
907
                    old_frame[frame_idx++], page_flags);
-
 
908
 
-
 
909
                page_table_unlock(as, false);
-
 
910
            }
-
 
911
        }
-
 
912
    }
-
 
913
 
-
 
914
    free(old_frame);
-
 
915
 
-
 
916
    mutex_unlock(&area->lock);
-
 
917
    mutex_unlock(&as->lock);
-
 
918
    interrupts_restore(ipl);
-
 
919
 
-
 
920
    return 0;
-
 
921
}
-
 
922
 
-
 
923
 
774
/** Handle page fault within the current address space.
924
/** Handle page fault within the current address space.
775
 *
925
 *
776
 * This is the high-level page fault handler. It decides
926
 * This is the high-level page fault handler. It decides whether the page fault
777
 * whether the page fault can be resolved by any backend
-
 
778
 * and if so, it invokes the backend to resolve the page
927
 * can be resolved by any backend and if so, it invokes the backend to resolve
779
 * fault.
928
 * the page fault.
780
 *
929
 *
781
 * Interrupts are assumed disabled.
930
 * Interrupts are assumed disabled.
782
 *
931
 *
783
 * @param page Faulting page.
932
 * @param page      Faulting page.
784
 * @param access Access mode that caused the fault (i.e. read/write/exec).
933
 * @param access    Access mode that caused the page fault (i.e.
-
 
934
 *          read/write/exec).
785
 * @param istate Pointer to interrupted state.
935
 * @param istate    Pointer to the interrupted state.
786
 *
936
 *
787
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
937
 * @return      AS_PF_FAULT on page fault, AS_PF_OK on success or
788
 *     fault was caused by copy_to_uspace() or copy_from_uspace().
938
 *          AS_PF_DEFER if the fault was caused by copy_to_uspace()
-
 
939
 *          or copy_from_uspace().
789
 */
940
 */
790
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
941
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
791
{
942
{
792
    pte_t *pte;
943
    pte_t *pte;
793
    as_area_t *area;
944
    as_area_t *area;
794
   
945
   
795
    if (!THREAD)
946
    if (!THREAD)
796
        return AS_PF_FAULT;
947
        return AS_PF_FAULT;
797
       
948
       
798
    ASSERT(AS);
949
    ASSERT(AS);
799
 
950
 
800
    mutex_lock(&AS->lock);
951
    mutex_lock(&AS->lock);
801
    area = find_area_and_lock(AS, page);   
952
    area = find_area_and_lock(AS, page);   
802
    if (!area) {
953
    if (!area) {
803
        /*
954
        /*
804
         * No area contained mapping for 'page'.
955
         * No area contained mapping for 'page'.
805
         * Signal page fault to low-level handler.
956
         * Signal page fault to low-level handler.
806
         */
957
         */
807
        mutex_unlock(&AS->lock);
958
        mutex_unlock(&AS->lock);
808
        goto page_fault;
959
        goto page_fault;
809
    }
960
    }
810
 
961
 
811
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
962
    if (area->attributes & AS_AREA_ATTR_PARTIAL) {
812
        /*
963
        /*
813
         * The address space area is not fully initialized.
964
         * The address space area is not fully initialized.
814
         * Avoid possible race by returning error.
965
         * Avoid possible race by returning error.
815
         */
966
         */
816
        mutex_unlock(&area->lock);
967
        mutex_unlock(&area->lock);
817
        mutex_unlock(&AS->lock);
968
        mutex_unlock(&AS->lock);
818
        goto page_fault;       
969
        goto page_fault;       
819
    }
970
    }
820
 
971
 
821
    if (!area->backend || !area->backend->page_fault) {
972
    if (!area->backend || !area->backend->page_fault) {
822
        /*
973
        /*
823
         * The address space area is not backed by any backend
974
         * The address space area is not backed by any backend
824
         * or the backend cannot handle page faults.
975
         * or the backend cannot handle page faults.
825
         */
976
         */
826
        mutex_unlock(&area->lock);
977
        mutex_unlock(&area->lock);
827
        mutex_unlock(&AS->lock);
978
        mutex_unlock(&AS->lock);
828
        goto page_fault;       
979
        goto page_fault;       
829
    }
980
    }
830
 
981
 
831
    page_table_lock(AS, false);
982
    page_table_lock(AS, false);
832
   
983
   
833
    /*
984
    /*
834
     * To avoid race condition between two page faults
985
     * To avoid race condition between two page faults on the same address,
835
     * on the same address, we need to make sure
-
 
836
     * the mapping has not been already inserted.
986
     * we need to make sure the mapping has not been already inserted.
837
     */
987
     */
838
    if ((pte = page_mapping_find(AS, page))) {
988
    if ((pte = page_mapping_find(AS, page))) {
839
        if (PTE_PRESENT(pte)) {
989
        if (PTE_PRESENT(pte)) {
840
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
990
            if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
841
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
991
                (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
842
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
992
                (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
843
                page_table_unlock(AS, false);
993
                page_table_unlock(AS, false);
844
                mutex_unlock(&area->lock);
994
                mutex_unlock(&area->lock);
845
                mutex_unlock(&AS->lock);
995
                mutex_unlock(&AS->lock);
846
                return AS_PF_OK;
996
                return AS_PF_OK;
847
            }
997
            }
848
        }
998
        }
849
    }
999
    }
850
   
1000
   
851
    /*
1001
    /*
852
     * Resort to the backend page fault handler.
1002
     * Resort to the backend page fault handler.
853
     */
1003
     */
854
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1004
    if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
855
        page_table_unlock(AS, false);
1005
        page_table_unlock(AS, false);
856
        mutex_unlock(&area->lock);
1006
        mutex_unlock(&area->lock);
857
        mutex_unlock(&AS->lock);
1007
        mutex_unlock(&AS->lock);
858
        goto page_fault;
1008
        goto page_fault;
859
    }
1009
    }
860
   
1010
   
861
    page_table_unlock(AS, false);
1011
    page_table_unlock(AS, false);
862
    mutex_unlock(&area->lock);
1012
    mutex_unlock(&area->lock);
863
    mutex_unlock(&AS->lock);
1013
    mutex_unlock(&AS->lock);
864
    return AS_PF_OK;
1014
    return AS_PF_OK;
865
 
1015
 
866
page_fault:
1016
page_fault:
867
    if (THREAD->in_copy_from_uspace) {
1017
    if (THREAD->in_copy_from_uspace) {
868
        THREAD->in_copy_from_uspace = false;
1018
        THREAD->in_copy_from_uspace = false;
869
        istate_set_retaddr(istate,
1019
        istate_set_retaddr(istate,
870
            (uintptr_t) &memcpy_from_uspace_failover_address);
1020
            (uintptr_t) &memcpy_from_uspace_failover_address);
871
    } else if (THREAD->in_copy_to_uspace) {
1021
    } else if (THREAD->in_copy_to_uspace) {
872
        THREAD->in_copy_to_uspace = false;
1022
        THREAD->in_copy_to_uspace = false;
873
        istate_set_retaddr(istate,
1023
        istate_set_retaddr(istate,
874
            (uintptr_t) &memcpy_to_uspace_failover_address);
1024
            (uintptr_t) &memcpy_to_uspace_failover_address);
875
    } else {
1025
    } else {
876
        return AS_PF_FAULT;
1026
        return AS_PF_FAULT;
877
    }
1027
    }
878
 
1028
 
879
    return AS_PF_DEFER;
1029
    return AS_PF_DEFER;
880
}
1030
}
881
 
1031
 
882
/** Switch address spaces.
1032
/** Switch address spaces.
883
 *
1033
 *
884
 * Note that this function cannot sleep as it is essentially a part of
1034
 * Note that this function cannot sleep as it is essentially a part of
885
 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
1035
 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
886
 * thing which is forbidden in this context is locking the address space.
1036
 * thing which is forbidden in this context is locking the address space.
887
 *
1037
 *
888
 * When this function is enetered, no spinlocks may be held.
1038
 * When this function is enetered, no spinlocks may be held.
889
 *
1039
 *
890
 * @param old Old address space or NULL.
1040
 * @param old       Old address space or NULL.
891
 * @param new New address space.
1041
 * @param new       New address space.
892
 */
1042
 */
893
void as_switch(as_t *old_as, as_t *new_as)
1043
void as_switch(as_t *old_as, as_t *new_as)
894
{
1044
{
895
    DEADLOCK_PROBE_INIT(p_asidlock);
1045
    DEADLOCK_PROBE_INIT(p_asidlock);
896
    preemption_disable();
1046
    preemption_disable();
897
retry:
1047
retry:
898
    (void) interrupts_disable();
1048
    (void) interrupts_disable();
899
    if (!spinlock_trylock(&asidlock)) {
1049
    if (!spinlock_trylock(&asidlock)) {
900
        /*
1050
        /*
901
         * Avoid deadlock with TLB shootdown.
1051
         * Avoid deadlock with TLB shootdown.
902
         * We can enable interrupts here because
1052
         * We can enable interrupts here because
903
         * preemption is disabled. We should not be
1053
         * preemption is disabled. We should not be
904
         * holding any other lock.
1054
         * holding any other lock.
905
         */
1055
         */
906
        (void) interrupts_enable();
1056
        (void) interrupts_enable();
907
        DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
1057
        DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
908
        goto retry;
1058
        goto retry;
909
    }
1059
    }
910
    preemption_enable();
1060
    preemption_enable();
911
 
1061
 
912
    /*
1062
    /*
913
     * First, take care of the old address space.
1063
     * First, take care of the old address space.
914
     */
1064
     */
915
    if (old_as) {
1065
    if (old_as) {
916
        ASSERT(old_as->cpu_refcount);
1066
        ASSERT(old_as->cpu_refcount);
917
        if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
1067
        if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
918
            /*
1068
            /*
919
             * The old address space is no longer active on
1069
             * The old address space is no longer active on
920
             * any processor. It can be appended to the
1070
             * any processor. It can be appended to the
921
             * list of inactive address spaces with assigned
1071
             * list of inactive address spaces with assigned
922
             * ASID.
1072
             * ASID.
923
             */
1073
             */
924
            ASSERT(old_as->asid != ASID_INVALID);
1074
            ASSERT(old_as->asid != ASID_INVALID);
925
            list_append(&old_as->inactive_as_with_asid_link,
1075
            list_append(&old_as->inactive_as_with_asid_link,
926
                &inactive_as_with_asid_head);
1076
                &inactive_as_with_asid_head);
927
        }
1077
        }
928
 
1078
 
929
        /*
1079
        /*
930
         * Perform architecture-specific tasks when the address space
1080
         * Perform architecture-specific tasks when the address space
931
         * is being removed from the CPU.
1081
         * is being removed from the CPU.
932
         */
1082
         */
933
        as_deinstall_arch(old_as);
1083
        as_deinstall_arch(old_as);
934
    }
1084
    }
935
 
1085
 
936
    /*
1086
    /*
937
     * Second, prepare the new address space.
1087
     * Second, prepare the new address space.
938
     */
1088
     */
939
    if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
1089
    if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
940
        if (new_as->asid != ASID_INVALID)
1090
        if (new_as->asid != ASID_INVALID)
941
            list_remove(&new_as->inactive_as_with_asid_link);
1091
            list_remove(&new_as->inactive_as_with_asid_link);
942
        else
1092
        else
943
            new_as->asid = asid_get();
1093
            new_as->asid = asid_get();
944
    }
1094
    }
945
#ifdef AS_PAGE_TABLE
1095
#ifdef AS_PAGE_TABLE
946
    SET_PTL0_ADDRESS(new_as->genarch.page_table);
1096
    SET_PTL0_ADDRESS(new_as->genarch.page_table);
947
#endif
1097
#endif
948
   
1098
   
949
    /*
1099
    /*
950
     * Perform architecture-specific steps.
1100
     * Perform architecture-specific steps.
951
     * (e.g. write ASID to hardware register etc.)
1101
     * (e.g. write ASID to hardware register etc.)
952
     */
1102
     */
953
    as_install_arch(new_as);
1103
    as_install_arch(new_as);
954
 
1104
 
955
    spinlock_unlock(&asidlock);
1105
    spinlock_unlock(&asidlock);
956
   
1106
   
957
    AS = new_as;
1107
    AS = new_as;
958
}
1108
}
959
 
1109
 
960
/** Write directly into a page, bypassing area flags.
1110
/** Write directly into a page, bypassing area flags.
961
 *
1111
 *
962
 * This allows a debugger to write into a page that is mapped read-only
1112
 * This allows a debugger to write into a page that is mapped read-only
963
 * (such as the text segment). Naturally, this is only possible if the
1113
 * (such as the text segment). Naturally, this is only possible if the
964
 * correspoinding area is not shared and anonymous.
1114
 * correspoinding area is not shared and anonymous.
965
 *
1115
 *
966
 * FIXME: doesn't take into account that it isn't a good idea to write
1116
 * FIXME: doesn't take into account that it isn't a good idea to write
967
 * into the frame if the area is shared or isn't anonymous
1117
 * into the frame if the area is shared or isn't anonymous
968
 */
1118
 */
969
static int debug_write_inside_page(uintptr_t va, void *data, size_t n)
1119
static int debug_write_inside_page(uintptr_t va, void *data, size_t n)
970
{
1120
{
971
    uintptr_t page;
1121
    uintptr_t page;
972
    pte_t *pte;
1122
    pte_t *pte;
973
    as_area_t *area;
1123
    as_area_t *area;
974
    uintptr_t frame;
1124
    uintptr_t frame;
975
    ipl_t ipl;
1125
    ipl_t ipl;
976
    int rc;
1126
    int rc;
977
 
1127
 
978
    page = ALIGN_DOWN(va, PAGE_SIZE);
1128
    page = ALIGN_DOWN(va, PAGE_SIZE);
979
    ASSERT(ALIGN_DOWN(va + n - 1, PAGE_SIZE) == page);
1129
    ASSERT(ALIGN_DOWN(va + n - 1, PAGE_SIZE) == page);
980
 
1130
 
981
restart:
1131
restart:
982
    mutex_lock(&AS->lock);
1132
    mutex_lock(&AS->lock);
983
    ipl = interrupts_disable();
1133
    ipl = interrupts_disable();
984
    area = find_area_and_lock(AS, page);
1134
    area = find_area_and_lock(AS, page);
985
    if (area->backend != &anon_backend || area->sh_info != NULL) {
1135
    if (area->backend != &anon_backend || area->sh_info != NULL) {
986
        mutex_unlock(&area->lock);
1136
        mutex_unlock(&area->lock);
987
        mutex_unlock(&AS->lock);
1137
        mutex_unlock(&AS->lock);
988
        interrupts_restore(ipl);
1138
        interrupts_restore(ipl);
989
 
1139
 
990
        rc = as_area_make_writeable(area->base);
1140
        rc = as_area_make_writeable(area->base);
991
        if (rc != 0) return rc;
1141
        if (rc != 0) return rc;
992
 
1142
 
993
        goto restart;
1143
        goto restart;
994
    }
1144
    }
995
 
1145
 
996
    pte = page_mapping_find(AS, page);
1146
    pte = page_mapping_find(AS, page);
997
    if (! (pte && PTE_VALID(pte) && PTE_PRESENT(pte)) ) {
1147
    if (! (pte && PTE_VALID(pte) && PTE_PRESENT(pte)) ) {
998
        mutex_unlock(&area->lock);
1148
        mutex_unlock(&area->lock);
999
        mutex_unlock(&AS->lock);
1149
        mutex_unlock(&AS->lock);
1000
        interrupts_restore(ipl);
1150
        interrupts_restore(ipl);
1001
 
1151
 
1002
        rc = as_page_fault(page, PF_ACCESS_WRITE, NULL);
1152
        rc = as_page_fault(page, PF_ACCESS_WRITE, NULL);
1003
        if (rc == AS_PF_FAULT) return EINVAL;
1153
        if (rc == AS_PF_FAULT) return EINVAL;
1004
 
1154
 
1005
        goto restart;
1155
        goto restart;
1006
    }
1156
    }
1007
 
1157
 
1008
    frame = PTE_GET_FRAME(pte);
1158
    frame = PTE_GET_FRAME(pte);
1009
    memcpy((void *)(PA2KA(frame) + (va - page)), data, n);
1159
    memcpy((void *)(PA2KA(frame) + (va - page)), data, n);
1010
 
1160
 
1011
    mutex_unlock(&area->lock);
1161
    mutex_unlock(&area->lock);
1012
    mutex_unlock(&AS->lock);
1162
    mutex_unlock(&AS->lock);
1013
    interrupts_restore(ipl);
1163
    interrupts_restore(ipl);
1014
 
1164
 
1015
    return EOK;
1165
    return EOK;
1016
}
1166
}
1017
 
1167
 
1018
/** Write data bypassing area flags.
1168
/** Write data bypassing area flags.
1019
 *
1169
 *
1020
 * See debug_write_inside_page().
1170
 * See debug_write_inside_page().
1021
 */
1171
 */
1022
int as_debug_write(uintptr_t va, void *data, size_t n)
1172
int as_debug_write(uintptr_t va, void *data, size_t n)
1023
{
1173
{
1024
    size_t now;
1174
    size_t now;
1025
    int rc;
1175
    int rc;
1026
 
1176
 
1027
    while (n > 0) {
1177
    while (n > 0) {
1028
        /* Number of bytes until the end of page */
1178
        /* Number of bytes until the end of page */
1029
        now = ALIGN_DOWN(va, PAGE_SIZE) + PAGE_SIZE - va;
1179
        now = ALIGN_DOWN(va, PAGE_SIZE) + PAGE_SIZE - va;
1030
        if (now > n) now = n;
1180
        if (now > n) now = n;
1031
 
1181
 
1032
        rc = debug_write_inside_page(va, data, now);
1182
        rc = debug_write_inside_page(va, data, now);
1033
        if (rc != EOK) return rc;
1183
        if (rc != EOK) return rc;
1034
 
1184
 
1035
        va += now;
1185
        va += now;
1036
        data += now;
1186
        data += now;
1037
        n -= now;
1187
        n -= now;
1038
    }
1188
    }
1039
 
1189
 
1040
    return EOK;
1190
    return EOK;
1041
}
1191
}
1042
 
1192
 
1043
/** Make sure area is private and anonymous.
1193
/** Make sure area is private and anonymous.
1044
 *
1194
 *
1045
 * Not atomic atm.
1195
 * Not atomic atm.
1046
 * @param address   Virtual address in AS.
1196
 * @param address   Virtual address in AS.
1047
 */
1197
 */
1048
int as_area_make_writeable(uintptr_t address)
1198
int as_area_make_writeable(uintptr_t address)
1049
{
1199
{
1050
    ipl_t ipl;
1200
    ipl_t ipl;
1051
    as_area_t *area;
1201
    as_area_t *area;
1052
    uintptr_t base, page;
1202
    uintptr_t base, page;
1053
    uintptr_t old_frame, frame;
1203
    uintptr_t old_frame, frame;
1054
    size_t size;
1204
    size_t size;
1055
    int flags;
1205
    int flags;
1056
    int page_flags;
1206
    int page_flags;
1057
    pte_t *pte;
1207
    pte_t *pte;
1058
    int rc;
1208
    int rc;
1059
    uintptr_t *pagemap;
1209
    uintptr_t *pagemap;
1060
 
1210
 
1061
    ipl = interrupts_disable();
1211
    ipl = interrupts_disable();
1062
    mutex_lock(&AS->lock);
1212
    mutex_lock(&AS->lock);
1063
    area = find_area_and_lock(AS, address);
1213
    area = find_area_and_lock(AS, address);
1064
    if (!area) {
1214
    if (!area) {
1065
        /*
1215
        /*
1066
         * Could not find the address space area.
1216
         * Could not find the address space area.
1067
         */
1217
         */
1068
        mutex_unlock(&AS->lock);
1218
        mutex_unlock(&AS->lock);
1069
        interrupts_restore(ipl);
1219
        interrupts_restore(ipl);
1070
        return ENOENT;
1220
        return ENOENT;
1071
    }
1221
    }
1072
 
1222
 
1073
    if (area->backend == &anon_backend && !area->sh_info) {
1223
    if (area->backend == &anon_backend && !area->sh_info) {
1074
        /* Nothing to do */
1224
        /* Nothing to do */
1075
        mutex_unlock(&area->lock);
1225
        mutex_unlock(&area->lock);
1076
        mutex_unlock(&AS->lock);
1226
        mutex_unlock(&AS->lock);
1077
        interrupts_restore(ipl);
1227
        interrupts_restore(ipl);
1078
        return EOK;
1228
        return EOK;
1079
    }
1229
    }
1080
 
1230
 
1081
    base = area->base;
1231
    base = area->base;
1082
    size = area->pages * PAGE_SIZE;
1232
    size = area->pages * PAGE_SIZE;
1083
    flags = area->flags;
1233
    flags = area->flags;
1084
    page_flags = as_area_get_flags(area);
1234
    page_flags = as_area_get_flags(area);
1085
 
1235
 
1086
    pagemap = malloc(area->pages * sizeof(uintptr_t), 0);
1236
    pagemap = malloc(area->pages * sizeof(uintptr_t), 0);
1087
    page_table_lock(AS, false);
1237
    page_table_lock(AS, false);
1088
 
1238
 
1089
    for (page = base; page < base + size; page += PAGE_SIZE) {
1239
    for (page = base; page < base + size; page += PAGE_SIZE) {
1090
        pte = page_mapping_find(AS, page);
1240
        pte = page_mapping_find(AS, page);
1091
        if (!pte || !PTE_PRESENT(pte) || !PTE_READABLE(pte)) {
1241
        if (!pte || !PTE_PRESENT(pte) || !PTE_READABLE(pte)) {
1092
            /* Fetch the missing page */
1242
            /* Fetch the missing page */
1093
            if (!area->backend || !area->backend->page_fault) {
1243
            if (!area->backend || !area->backend->page_fault) {
1094
                page_table_unlock(AS, false);
1244
                page_table_unlock(AS, false);
1095
                mutex_unlock(&area->lock);
1245
                mutex_unlock(&area->lock);
1096
                mutex_unlock(&AS->lock);
1246
                mutex_unlock(&AS->lock);
1097
                interrupts_restore(ipl);
1247
                interrupts_restore(ipl);
1098
                return EINVAL;
1248
                return EINVAL;
1099
            }
1249
            }
1100
            if (area->backend->page_fault(area, page, PF_ACCESS_READ) != AS_PF_OK) {
1250
            if (area->backend->page_fault(area, page, PF_ACCESS_READ) != AS_PF_OK) {
1101
                page_table_unlock(AS, false);
1251
                page_table_unlock(AS, false);
1102
                mutex_unlock(&area->lock);
1252
                mutex_unlock(&area->lock);
1103
                mutex_unlock(&AS->lock);
1253
                mutex_unlock(&AS->lock);
1104
                interrupts_restore(ipl);
1254
                interrupts_restore(ipl);
1105
                return EINVAL;
1255
                return EINVAL;
1106
            }
1256
            }
1107
        }
1257
        }
1108
        ASSERT(PTE_VALID(pte));
1258
        ASSERT(PTE_VALID(pte));
1109
 
1259
 
1110
        old_frame = PTE_GET_FRAME(pte);
1260
        old_frame = PTE_GET_FRAME(pte);
1111
 
1261
 
1112
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
1262
        frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
1113
        memcpy((void *) PA2KA(frame), (void *)PA2KA(old_frame),
1263
        memcpy((void *) PA2KA(frame), (void *)PA2KA(old_frame),
1114
            FRAME_SIZE);
1264
            FRAME_SIZE);
1115
 
1265
 
1116
        pagemap[(page - base) / PAGE_SIZE] = frame;
1266
        pagemap[(page - base) / PAGE_SIZE] = frame;
1117
    }
1267
    }
1118
 
1268
 
1119
    page_table_unlock(AS, false);
1269
    page_table_unlock(AS, false);
1120
    mutex_unlock(&area->lock);
1270
    mutex_unlock(&area->lock);
1121
    mutex_unlock(&AS->lock);
1271
    mutex_unlock(&AS->lock);
1122
    interrupts_restore(ipl);
1272
    interrupts_restore(ipl);
1123
 
1273
 
1124
    rc = as_area_destroy(AS, address);
1274
    rc = as_area_destroy(AS, address);
1125
    if (rc < 0) {
1275
    if (rc < 0) {
1126
        free(pagemap);
1276
        free(pagemap);
1127
        return rc;
1277
        return rc;
1128
    }
1278
    }
1129
 
1279
 
1130
    area = as_area_create(AS, flags, size, base, AS_AREA_ATTR_PARTIAL,
1280
    area = as_area_create(AS, flags, size, base, AS_AREA_ATTR_PARTIAL,
1131
        &anon_backend, NULL);
1281
        &anon_backend, NULL);
1132
    if (area == NULL) {
1282
    if (area == NULL) {
1133
        free(pagemap);
1283
        free(pagemap);
1134
        return rc;
1284
        return rc;
1135
    }
1285
    }
1136
 
1286
 
1137
    mutex_lock(&AS->lock);
1287
    mutex_lock(&AS->lock);
1138
    mutex_lock(&area->lock);
1288
    mutex_lock(&area->lock);
1139
    page_table_lock(AS, false);
1289
    page_table_lock(AS, false);
1140
    for (page = base; page < base + size; page += PAGE_SIZE) {
1290
    for (page = base; page < base + size; page += PAGE_SIZE) {
1141
        frame = pagemap[(page - base) / PAGE_SIZE];
1291
        frame = pagemap[(page - base) / PAGE_SIZE];
1142
 
1292
 
1143
        page_mapping_insert(AS, page, frame, page_flags);
1293
        page_mapping_insert(AS, page, frame, page_flags);
1144
        if (!used_space_insert(area, page, 1))
1294
        if (!used_space_insert(area, page, 1))
1145
            panic("Could not insert used space.\n");
1295
            panic("Could not insert used space.\n");
1146
    }
1296
    }
1147
 
1297
 
1148
    page_table_unlock(AS, false);
1298
    page_table_unlock(AS, false);
1149
 
1299
 
1150
    area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1300
    area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1151
 
1301
 
1152
    mutex_unlock(&area->lock);
1302
    mutex_unlock(&area->lock);
1153
    mutex_unlock(&AS->lock);
1303
    mutex_unlock(&AS->lock);
1154
 
1304
 
1155
    free(pagemap);
1305
    free(pagemap);
1156
 
1306
 
1157
    return EOK;
1307
    return EOK;
1158
}
1308
}
1159
 
1309
 
1160
/** Convert address space area flags to page flags.
1310
/** Convert address space area flags to page flags.
1161
 *
1311
 *
1162
 * @param aflags Flags of some address space area.
1312
 * @param aflags    Flags of some address space area.
1163
 *
1313
 *
1164
 * @return Flags to be passed to page_mapping_insert().
1314
 * @return      Flags to be passed to page_mapping_insert().
1165
 */
1315
 */
1166
int area_flags_to_page_flags(int aflags)
1316
int area_flags_to_page_flags(int aflags)
1167
{
1317
{
1168
    int flags;
1318
    int flags;
1169
 
1319
 
1170
    flags = PAGE_USER | PAGE_PRESENT;
1320
    flags = PAGE_USER | PAGE_PRESENT;
1171
   
1321
   
1172
    if (aflags & AS_AREA_READ)
1322
    if (aflags & AS_AREA_READ)
1173
        flags |= PAGE_READ;
1323
        flags |= PAGE_READ;
1174
       
1324
       
1175
    if (aflags & AS_AREA_WRITE)
1325
    if (aflags & AS_AREA_WRITE)
1176
        flags |= PAGE_WRITE;
1326
        flags |= PAGE_WRITE;
1177
   
1327
   
1178
    if (aflags & AS_AREA_EXEC)
1328
    if (aflags & AS_AREA_EXEC)
1179
        flags |= PAGE_EXEC;
1329
        flags |= PAGE_EXEC;
1180
   
1330
   
1181
    if (aflags & AS_AREA_CACHEABLE)
1331
    if (aflags & AS_AREA_CACHEABLE)
1182
        flags |= PAGE_CACHEABLE;
1332
        flags |= PAGE_CACHEABLE;
1183
       
1333
       
1184
    return flags;
1334
    return flags;
1185
}
1335
}
1186
 
1336
 
1187
/** Compute flags for virtual address translation subsytem.
1337
/** Compute flags for virtual address translation subsytem.
1188
 *
1338
 *
1189
 * The address space area must be locked.
1339
 * The address space area must be locked.
1190
 * Interrupts must be disabled.
1340
 * Interrupts must be disabled.
1191
 *
1341
 *
1192
 * @param a Address space area.
1342
 * @param a     Address space area.
1193
 *
1343
 *
1194
 * @return Flags to be used in page_mapping_insert().
1344
 * @return      Flags to be used in page_mapping_insert().
1195
 */
1345
 */
1196
int as_area_get_flags(as_area_t *a)
1346
int as_area_get_flags(as_area_t *a)
1197
{
1347
{
1198
    return area_flags_to_page_flags(a->flags);
1348
    return area_flags_to_page_flags(a->flags);
1199
}
1349
}
1200
 
1350
 
1201
/** Create page table.
1351
/** Create page table.
1202
 *
1352
 *
1203
 * Depending on architecture, create either address space
1353
 * Depending on architecture, create either address space private or global page
1204
 * private or global page table.
1354
 * table.
1205
 *
1355
 *
1206
 * @param flags Flags saying whether the page table is for kernel address space.
1356
 * @param flags     Flags saying whether the page table is for the kernel
-
 
1357
 *          address space.
1207
 *
1358
 *
1208
 * @return First entry of the page table.
1359
 * @return      First entry of the page table.
1209
 */
1360
 */
1210
pte_t *page_table_create(int flags)
1361
pte_t *page_table_create(int flags)
1211
{
1362
{
1212
#ifdef __OBJC__
-
 
1213
    return [as_t page_table_create: flags];
-
 
1214
#else
-
 
1215
    ASSERT(as_operations);
1363
    ASSERT(as_operations);
1216
    ASSERT(as_operations->page_table_create);
1364
    ASSERT(as_operations->page_table_create);
1217
   
1365
   
1218
    return as_operations->page_table_create(flags);
1366
    return as_operations->page_table_create(flags);
1219
#endif
-
 
1220
}
1367
}
1221
 
1368
 
1222
/** Destroy page table.
1369
/** Destroy page table.
1223
 *
1370
 *
1224
 * Destroy page table in architecture specific way.
1371
 * Destroy page table in architecture specific way.
1225
 *
1372
 *
1226
 * @param page_table Physical address of PTL0.
1373
 * @param page_table    Physical address of PTL0.
1227
 */
1374
 */
1228
void page_table_destroy(pte_t *page_table)
1375
void page_table_destroy(pte_t *page_table)
1229
{
1376
{
1230
#ifdef __OBJC__
-
 
1231
    return [as_t page_table_destroy: page_table];
-
 
1232
#else
-
 
1233
    ASSERT(as_operations);
1377
    ASSERT(as_operations);
1234
    ASSERT(as_operations->page_table_destroy);
1378
    ASSERT(as_operations->page_table_destroy);
1235
   
1379
   
1236
    as_operations->page_table_destroy(page_table);
1380
    as_operations->page_table_destroy(page_table);
1237
#endif
-
 
1238
}
1381
}
1239
 
1382
 
1240
/** Lock page table.
1383
/** Lock page table.
1241
 *
1384
 *
1242
 * This function should be called before any page_mapping_insert(),
1385
 * This function should be called before any page_mapping_insert(),
1243
 * page_mapping_remove() and page_mapping_find().
1386
 * page_mapping_remove() and page_mapping_find().
1244
 *
1387
 *
1245
 * Locking order is such that address space areas must be locked
1388
 * Locking order is such that address space areas must be locked
1246
 * prior to this call. Address space can be locked prior to this
1389
 * prior to this call. Address space can be locked prior to this
1247
 * call in which case the lock argument is false.
1390
 * call in which case the lock argument is false.
1248
 *
1391
 *
1249
 * @param as Address space.
1392
 * @param as        Address space.
1250
 * @param lock If false, do not attempt to lock as->lock.
1393
 * @param lock      If false, do not attempt to lock as->lock.
1251
 */
1394
 */
1252
void page_table_lock(as_t *as, bool lock)
1395
void page_table_lock(as_t *as, bool lock)
1253
{
1396
{
1254
#ifdef __OBJC__
-
 
1255
    [as page_table_lock: lock];
-
 
1256
#else
-
 
1257
    ASSERT(as_operations);
1397
    ASSERT(as_operations);
1258
    ASSERT(as_operations->page_table_lock);
1398
    ASSERT(as_operations->page_table_lock);
1259
   
1399
   
1260
    as_operations->page_table_lock(as, lock);
1400
    as_operations->page_table_lock(as, lock);
1261
#endif
-
 
1262
}
1401
}
1263
 
1402
 
1264
/** Unlock page table.
1403
/** Unlock page table.
1265
 *
1404
 *
1266
 * @param as Address space.
1405
 * @param as        Address space.
1267
 * @param unlock If false, do not attempt to unlock as->lock.
1406
 * @param unlock    If false, do not attempt to unlock as->lock.
1268
 */
1407
 */
1269
void page_table_unlock(as_t *as, bool unlock)
1408
void page_table_unlock(as_t *as, bool unlock)
1270
{
1409
{
1271
#ifdef __OBJC__
-
 
1272
    [as page_table_unlock: unlock];
-
 
1273
#else
-
 
1274
    ASSERT(as_operations);
1410
    ASSERT(as_operations);
1275
    ASSERT(as_operations->page_table_unlock);
1411
    ASSERT(as_operations->page_table_unlock);
1276
   
1412
   
1277
    as_operations->page_table_unlock(as, unlock);
1413
    as_operations->page_table_unlock(as, unlock);
1278
#endif
-
 
1279
}
1414
}
1280
 
1415
 
1281
 
1416
 
1282
/** Find address space area and lock it.
1417
/** Find address space area and lock it.
1283
 *
1418
 *
1284
 * The address space must be locked and interrupts must be disabled.
1419
 * The address space must be locked and interrupts must be disabled.
1285
 *
1420
 *
1286
 * @param as Address space.
1421
 * @param as        Address space.
1287
 * @param va Virtual address.
1422
 * @param va        Virtual address.
1288
 *
1423
 *
1289
 * @return Locked address space area containing va on success or NULL on
1424
 * @return      Locked address space area containing va on success or
1290
 *     failure.
1425
 *          NULL on failure.
1291
 */
1426
 */
1292
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1427
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
1293
{
1428
{
1294
    as_area_t *a;
1429
    as_area_t *a;
1295
    btree_node_t *leaf, *lnode;
1430
    btree_node_t *leaf, *lnode;
1296
    unsigned int i;
1431
    unsigned int i;
1297
   
1432
   
1298
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1433
    a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1299
    if (a) {
1434
    if (a) {
1300
        /* va is the base address of an address space area */
1435
        /* va is the base address of an address space area */
1301
        mutex_lock(&a->lock);
1436
        mutex_lock(&a->lock);
1302
        return a;
1437
        return a;
1303
    }
1438
    }
1304
   
1439
   
1305
    /*
1440
    /*
1306
     * Search the leaf node and the righmost record of its left neighbour
1441
     * Search the leaf node and the righmost record of its left neighbour
1307
     * to find out whether this is a miss or va belongs to an address
1442
     * to find out whether this is a miss or va belongs to an address
1308
     * space area found there.
1443
     * space area found there.
1309
     */
1444
     */
1310
   
1445
   
1311
    /* First, search the leaf node itself. */
1446
    /* First, search the leaf node itself. */
1312
    for (i = 0; i < leaf->keys; i++) {
1447
    for (i = 0; i < leaf->keys; i++) {
1313
        a = (as_area_t *) leaf->value[i];
1448
        a = (as_area_t *) leaf->value[i];
1314
        mutex_lock(&a->lock);
1449
        mutex_lock(&a->lock);
1315
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1450
        if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1316
            return a;
1451
            return a;
1317
        }
1452
        }
1318
        mutex_unlock(&a->lock);
1453
        mutex_unlock(&a->lock);
1319
    }
1454
    }
1320
 
1455
 
1321
    /*
1456
    /*
1322
     * Second, locate the left neighbour and test its last record.
1457
     * Second, locate the left neighbour and test its last record.
1323
     * Because of its position in the B+tree, it must have base < va.
1458
     * Because of its position in the B+tree, it must have base < va.
1324
     */
1459
     */
1325
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1460
    lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1326
    if (lnode) {
1461
    if (lnode) {
1327
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1462
        a = (as_area_t *) lnode->value[lnode->keys - 1];
1328
        mutex_lock(&a->lock);
1463
        mutex_lock(&a->lock);
1329
        if (va < a->base + a->pages * PAGE_SIZE) {
1464
        if (va < a->base + a->pages * PAGE_SIZE) {
1330
            return a;
1465
            return a;
1331
        }
1466
        }
1332
        mutex_unlock(&a->lock);
1467
        mutex_unlock(&a->lock);
1333
    }
1468
    }
1334
 
1469
 
1335
    return NULL;
1470
    return NULL;
1336
}
1471
}
1337
 
1472
 
1338
/** Check area conflicts with other areas.
1473
/** Check area conflicts with other areas.
1339
 *
1474
 *
1340
 * The address space must be locked and interrupts must be disabled.
1475
 * The address space must be locked and interrupts must be disabled.
1341
 *
1476
 *
1342
 * @param as Address space.
1477
 * @param as        Address space.
1343
 * @param va Starting virtual address of the area being tested.
1478
 * @param va        Starting virtual address of the area being tested.
1344
 * @param size Size of the area being tested.
1479
 * @param size      Size of the area being tested.
1345
 * @param avoid_area Do not touch this area.
1480
 * @param avoid_area    Do not touch this area.
1346
 *
1481
 *
1347
 * @return True if there is no conflict, false otherwise.
1482
 * @return      True if there is no conflict, false otherwise.
1348
 */
1483
 */
-
 
1484
bool
1349
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1485
check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area)
1350
              as_area_t *avoid_area)
-
 
1351
{
1486
{
1352
    as_area_t *a;
1487
    as_area_t *a;
1353
    btree_node_t *leaf, *node;
1488
    btree_node_t *leaf, *node;
1354
    unsigned int i;
1489
    unsigned int i;
1355
   
1490
   
1356
    /*
1491
    /*
1357
     * We don't want any area to have conflicts with NULL page.
1492
     * We don't want any area to have conflicts with NULL page.
1358
     */
1493
     */
1359
    if (overlaps(va, size, NULL, PAGE_SIZE))
1494
    if (overlaps(va, size, NULL, PAGE_SIZE))
1360
        return false;
1495
        return false;
1361
   
1496
   
1362
    /*
1497
    /*
1363
     * The leaf node is found in O(log n), where n is proportional to
1498
     * The leaf node is found in O(log n), where n is proportional to
1364
     * the number of address space areas belonging to as.
1499
     * the number of address space areas belonging to as.
1365
     * The check for conflicts is then attempted on the rightmost
1500
     * The check for conflicts is then attempted on the rightmost
1366
     * record in the left neighbour, the leftmost record in the right
1501
     * record in the left neighbour, the leftmost record in the right
1367
     * neighbour and all records in the leaf node itself.
1502
     * neighbour and all records in the leaf node itself.
1368
     */
1503
     */
1369
   
1504
   
1370
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1505
    if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1371
        if (a != avoid_area)
1506
        if (a != avoid_area)
1372
            return false;
1507
            return false;
1373
    }
1508
    }
1374
   
1509
   
1375
    /* First, check the two border cases. */
1510
    /* First, check the two border cases. */
1376
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1511
    if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1377
        a = (as_area_t *) node->value[node->keys - 1];
1512
        a = (as_area_t *) node->value[node->keys - 1];
1378
        mutex_lock(&a->lock);
1513
        mutex_lock(&a->lock);
1379
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1514
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380
            mutex_unlock(&a->lock);
1515
            mutex_unlock(&a->lock);
1381
            return false;
1516
            return false;
1382
        }
1517
        }
1383
        mutex_unlock(&a->lock);
1518
        mutex_unlock(&a->lock);
1384
    }
1519
    }
1385
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1520
    node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1386
    if (node) {
1521
    if (node) {
1387
        a = (as_area_t *) node->value[0];
1522
        a = (as_area_t *) node->value[0];
1388
        mutex_lock(&a->lock);
1523
        mutex_lock(&a->lock);
1389
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1524
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1390
            mutex_unlock(&a->lock);
1525
            mutex_unlock(&a->lock);
1391
            return false;
1526
            return false;
1392
        }
1527
        }
1393
        mutex_unlock(&a->lock);
1528
        mutex_unlock(&a->lock);
1394
    }
1529
    }
1395
   
1530
   
1396
    /* Second, check the leaf node. */
1531
    /* Second, check the leaf node. */
1397
    for (i = 0; i < leaf->keys; i++) {
1532
    for (i = 0; i < leaf->keys; i++) {
1398
        a = (as_area_t *) leaf->value[i];
1533
        a = (as_area_t *) leaf->value[i];
1399
   
1534
   
1400
        if (a == avoid_area)
1535
        if (a == avoid_area)
1401
            continue;
1536
            continue;
1402
   
1537
   
1403
        mutex_lock(&a->lock);
1538
        mutex_lock(&a->lock);
1404
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1539
        if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1405
            mutex_unlock(&a->lock);
1540
            mutex_unlock(&a->lock);
1406
            return false;
1541
            return false;
1407
        }
1542
        }
1408
        mutex_unlock(&a->lock);
1543
        mutex_unlock(&a->lock);
1409
    }
1544
    }
1410
 
1545
 
1411
    /*
1546
    /*
1412
     * So far, the area does not conflict with other areas.
1547
     * So far, the area does not conflict with other areas.
1413
     * Check if it doesn't conflict with kernel address space.
1548
     * Check if it doesn't conflict with kernel address space.
1414
     */  
1549
     */  
1415
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1550
    if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1416
        return !overlaps(va, size,
1551
        return !overlaps(va, size,
1417
            KERNEL_ADDRESS_SPACE_START,
1552
            KERNEL_ADDRESS_SPACE_START,
1418
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1553
            KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1419
    }
1554
    }
1420
 
1555
 
1421
    return true;
1556
    return true;
1422
}
1557
}
1423
 
1558
 
1424
/** Return size of the address space area with given base.
1559
/** Return size of the address space area with given base.
1425
 *
1560
 *
1426
 * @param base      Arbitrary address insede the address space area.
1561
 * @param base      Arbitrary address insede the address space area.
1427
 *
1562
 *
1428
 * @return      Size of the address space area in bytes or zero if it
1563
 * @return      Size of the address space area in bytes or zero if it
1429
 *          does not exist.
1564
 *          does not exist.
1430
 */
1565
 */
1431
size_t as_area_get_size(uintptr_t base)
1566
size_t as_area_get_size(uintptr_t base)
1432
{
1567
{
1433
    ipl_t ipl;
1568
    ipl_t ipl;
1434
    as_area_t *src_area;
1569
    as_area_t *src_area;
1435
    size_t size;
1570
    size_t size;
1436
 
1571
 
1437
    ipl = interrupts_disable();
1572
    ipl = interrupts_disable();
1438
    src_area = find_area_and_lock(AS, base);
1573
    src_area = find_area_and_lock(AS, base);
1439
    if (src_area){
1574
    if (src_area) {
1440
        size = src_area->pages * PAGE_SIZE;
1575
        size = src_area->pages * PAGE_SIZE;
1441
        mutex_unlock(&src_area->lock);
1576
        mutex_unlock(&src_area->lock);
1442
    } else {
1577
    } else {
1443
        size = 0;
1578
        size = 0;
1444
    }
1579
    }
1445
    interrupts_restore(ipl);
1580
    interrupts_restore(ipl);
1446
    return size;
1581
    return size;
1447
}
1582
}
1448
 
1583
 
1449
/** Mark portion of address space area as used.
1584
/** Mark portion of address space area as used.
1450
 *
1585
 *
1451
 * The address space area must be already locked.
1586
 * The address space area must be already locked.
1452
 *
1587
 *
1453
 * @param a Address space area.
1588
 * @param a     Address space area.
1454
 * @param page First page to be marked.
1589
 * @param page      First page to be marked.
1455
 * @param count Number of page to be marked.
1590
 * @param count     Number of page to be marked.
1456
 *
1591
 *
1457
 * @return 0 on failure and 1 on success.
1592
 * @return      Zero on failure and non-zero on success.
1458
 */
1593
 */
1459
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1594
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1460
{
1595
{
1461
    btree_node_t *leaf, *node;
1596
    btree_node_t *leaf, *node;
1462
    count_t pages;
1597
    count_t pages;
1463
    unsigned int i;
1598
    unsigned int i;
1464
 
1599
 
1465
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1600
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1466
    ASSERT(count);
1601
    ASSERT(count);
1467
 
1602
 
1468
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1603
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1469
    if (pages) {
1604
    if (pages) {
1470
        /*
1605
        /*
1471
         * We hit the beginning of some used space.
1606
         * We hit the beginning of some used space.
1472
         */
1607
         */
1473
        return 0;
1608
        return 0;
1474
    }
1609
    }
1475
 
1610
 
1476
    if (!leaf->keys) {
1611
    if (!leaf->keys) {
1477
        btree_insert(&a->used_space, page, (void *) count, leaf);
1612
        btree_insert(&a->used_space, page, (void *) count, leaf);
1478
        return 1;
1613
        return 1;
1479
    }
1614
    }
1480
 
1615
 
1481
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1616
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1482
    if (node) {
1617
    if (node) {
1483
        uintptr_t left_pg = node->key[node->keys - 1];
1618
        uintptr_t left_pg = node->key[node->keys - 1];
1484
        uintptr_t right_pg = leaf->key[0];
1619
        uintptr_t right_pg = leaf->key[0];
1485
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1620
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1486
        count_t right_cnt = (count_t) leaf->value[0];
1621
        count_t right_cnt = (count_t) leaf->value[0];
1487
       
1622
       
1488
        /*
1623
        /*
1489
         * Examine the possibility that the interval fits
1624
         * Examine the possibility that the interval fits
1490
         * somewhere between the rightmost interval of
1625
         * somewhere between the rightmost interval of
1491
         * the left neigbour and the first interval of the leaf.
1626
         * the left neigbour and the first interval of the leaf.
1492
         */
1627
         */
1493
         
1628
         
1494
        if (page >= right_pg) {
1629
        if (page >= right_pg) {
1495
            /* Do nothing. */
1630
            /* Do nothing. */
1496
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1631
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1497
            left_cnt * PAGE_SIZE)) {
1632
            left_cnt * PAGE_SIZE)) {
1498
            /* The interval intersects with the left interval. */
1633
            /* The interval intersects with the left interval. */
1499
            return 0;
1634
            return 0;
1500
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1635
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1501
            right_cnt * PAGE_SIZE)) {
1636
            right_cnt * PAGE_SIZE)) {
1502
            /* The interval intersects with the right interval. */
1637
            /* The interval intersects with the right interval. */
1503
            return 0;          
1638
            return 0;          
1504
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1639
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1505
            (page + count * PAGE_SIZE == right_pg)) {
1640
            (page + count * PAGE_SIZE == right_pg)) {
1506
            /*
1641
            /*
1507
             * The interval can be added by merging the two already
1642
             * The interval can be added by merging the two already
1508
             * present intervals.
1643
             * present intervals.
1509
             */
1644
             */
1510
            node->value[node->keys - 1] += count + right_cnt;
1645
            node->value[node->keys - 1] += count + right_cnt;
1511
            btree_remove(&a->used_space, right_pg, leaf);
1646
            btree_remove(&a->used_space, right_pg, leaf);
1512
            return 1;
1647
            return 1;
1513
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1648
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1514
            /*
1649
            /*
1515
             * The interval can be added by simply growing the left
1650
             * The interval can be added by simply growing the left
1516
             * interval.
1651
             * interval.
1517
             */
1652
             */
1518
            node->value[node->keys - 1] += count;
1653
            node->value[node->keys - 1] += count;
1519
            return 1;
1654
            return 1;
1520
        } else if (page + count * PAGE_SIZE == right_pg) {
1655
        } else if (page + count * PAGE_SIZE == right_pg) {
1521
            /*
1656
            /*
1522
             * The interval can be addded by simply moving base of
1657
             * The interval can be addded by simply moving base of
1523
             * the right interval down and increasing its size
1658
             * the right interval down and increasing its size
1524
             * accordingly.
1659
             * accordingly.
1525
             */
1660
             */
1526
            leaf->value[0] += count;
1661
            leaf->value[0] += count;
1527
            leaf->key[0] = page;
1662
            leaf->key[0] = page;
1528
            return 1;
1663
            return 1;
1529
        } else {
1664
        } else {
1530
            /*
1665
            /*
1531
             * The interval is between both neigbouring intervals,
1666
             * The interval is between both neigbouring intervals,
1532
             * but cannot be merged with any of them.
1667
             * but cannot be merged with any of them.
1533
             */
1668
             */
1534
            btree_insert(&a->used_space, page, (void *) count,
1669
            btree_insert(&a->used_space, page, (void *) count,
1535
                leaf);
1670
                leaf);
1536
            return 1;
1671
            return 1;
1537
        }
1672
        }
1538
    } else if (page < leaf->key[0]) {
1673
    } else if (page < leaf->key[0]) {
1539
        uintptr_t right_pg = leaf->key[0];
1674
        uintptr_t right_pg = leaf->key[0];
1540
        count_t right_cnt = (count_t) leaf->value[0];
1675
        count_t right_cnt = (count_t) leaf->value[0];
1541
   
1676
   
1542
        /*
1677
        /*
1543
         * Investigate the border case in which the left neighbour does
1678
         * Investigate the border case in which the left neighbour does
1544
         * not exist but the interval fits from the left.
1679
         * not exist but the interval fits from the left.
1545
         */
1680
         */
1546
         
1681
         
1547
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1682
        if (overlaps(page, count * PAGE_SIZE, right_pg,
1548
            right_cnt * PAGE_SIZE)) {
1683
            right_cnt * PAGE_SIZE)) {
1549
            /* The interval intersects with the right interval. */
1684
            /* The interval intersects with the right interval. */
1550
            return 0;
1685
            return 0;
1551
        } else if (page + count * PAGE_SIZE == right_pg) {
1686
        } else if (page + count * PAGE_SIZE == right_pg) {
1552
            /*
1687
            /*
1553
             * The interval can be added by moving the base of the
1688
             * The interval can be added by moving the base of the
1554
             * right interval down and increasing its size
1689
             * right interval down and increasing its size
1555
             * accordingly.
1690
             * accordingly.
1556
             */
1691
             */
1557
            leaf->key[0] = page;
1692
            leaf->key[0] = page;
1558
            leaf->value[0] += count;
1693
            leaf->value[0] += count;
1559
            return 1;
1694
            return 1;
1560
        } else {
1695
        } else {
1561
            /*
1696
            /*
1562
             * The interval doesn't adjoin with the right interval.
1697
             * The interval doesn't adjoin with the right interval.
1563
             * It must be added individually.
1698
             * It must be added individually.
1564
             */
1699
             */
1565
            btree_insert(&a->used_space, page, (void *) count,
1700
            btree_insert(&a->used_space, page, (void *) count,
1566
                leaf);
1701
                leaf);
1567
            return 1;
1702
            return 1;
1568
        }
1703
        }
1569
    }
1704
    }
1570
 
1705
 
1571
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1706
    node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1572
    if (node) {
1707
    if (node) {
1573
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1708
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1574
        uintptr_t right_pg = node->key[0];
1709
        uintptr_t right_pg = node->key[0];
1575
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1710
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1576
        count_t right_cnt = (count_t) node->value[0];
1711
        count_t right_cnt = (count_t) node->value[0];
1577
       
1712
       
1578
        /*
1713
        /*
1579
         * Examine the possibility that the interval fits
1714
         * Examine the possibility that the interval fits
1580
         * somewhere between the leftmost interval of
1715
         * somewhere between the leftmost interval of
1581
         * the right neigbour and the last interval of the leaf.
1716
         * the right neigbour and the last interval of the leaf.
1582
         */
1717
         */
1583
 
1718
 
1584
        if (page < left_pg) {
1719
        if (page < left_pg) {
1585
            /* Do nothing. */
1720
            /* Do nothing. */
1586
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1721
        } else if (overlaps(page, count * PAGE_SIZE, left_pg,
1587
            left_cnt * PAGE_SIZE)) {
1722
            left_cnt * PAGE_SIZE)) {
1588
            /* The interval intersects with the left interval. */
1723
            /* The interval intersects with the left interval. */
1589
            return 0;
1724
            return 0;
1590
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1725
        } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1591
            right_cnt * PAGE_SIZE)) {
1726
            right_cnt * PAGE_SIZE)) {
1592
            /* The interval intersects with the right interval. */
1727
            /* The interval intersects with the right interval. */
1593
            return 0;          
1728
            return 0;          
1594
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1729
        } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1595
            (page + count * PAGE_SIZE == right_pg)) {
1730
            (page + count * PAGE_SIZE == right_pg)) {
1596
            /*
1731
            /*
1597
             * The interval can be added by merging the two already
1732
             * The interval can be added by merging the two already
1598
             * present intervals.
1733
             * present intervals.
1599
             * */
1734
             * */
1600
            leaf->value[leaf->keys - 1] += count + right_cnt;
1735
            leaf->value[leaf->keys - 1] += count + right_cnt;
1601
            btree_remove(&a->used_space, right_pg, node);
1736
            btree_remove(&a->used_space, right_pg, node);
1602
            return 1;
1737
            return 1;
1603
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1738
        } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1604
            /*
1739
            /*
1605
             * The interval can be added by simply growing the left
1740
             * The interval can be added by simply growing the left
1606
             * interval.
1741
             * interval.
1607
             * */
1742
             * */
1608
            leaf->value[leaf->keys - 1] +=  count;
1743
            leaf->value[leaf->keys - 1] +=  count;
1609
            return 1;
1744
            return 1;
1610
        } else if (page + count * PAGE_SIZE == right_pg) {
1745
        } else if (page + count * PAGE_SIZE == right_pg) {
1611
            /*
1746
            /*
1612
             * The interval can be addded by simply moving base of
1747
             * The interval can be addded by simply moving base of
1613
             * the right interval down and increasing its size
1748
             * the right interval down and increasing its size
1614
             * accordingly.
1749
             * accordingly.
1615
             */
1750
             */
1616
            node->value[0] += count;
1751
            node->value[0] += count;
1617
            node->key[0] = page;
1752
            node->key[0] = page;
1618
            return 1;
1753
            return 1;
1619
        } else {
1754
        } else {
1620
            /*
1755
            /*
1621
             * The interval is between both neigbouring intervals,
1756
             * The interval is between both neigbouring intervals,
1622
             * but cannot be merged with any of them.
1757
             * but cannot be merged with any of them.
1623
             */
1758
             */
1624
            btree_insert(&a->used_space, page, (void *) count,
1759
            btree_insert(&a->used_space, page, (void *) count,
1625
                leaf);
1760
                leaf);
1626
            return 1;
1761
            return 1;
1627
        }
1762
        }
1628
    } else if (page >= leaf->key[leaf->keys - 1]) {
1763
    } else if (page >= leaf->key[leaf->keys - 1]) {
1629
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1764
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1630
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1765
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1631
   
1766
   
1632
        /*
1767
        /*
1633
         * Investigate the border case in which the right neighbour
1768
         * Investigate the border case in which the right neighbour
1634
         * does not exist but the interval fits from the right.
1769
         * does not exist but the interval fits from the right.
1635
         */
1770
         */
1636
         
1771
         
1637
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1772
        if (overlaps(page, count * PAGE_SIZE, left_pg,
1638
            left_cnt * PAGE_SIZE)) {
1773
            left_cnt * PAGE_SIZE)) {
1639
            /* The interval intersects with the left interval. */
1774
            /* The interval intersects with the left interval. */
1640
            return 0;
1775
            return 0;
1641
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1776
        } else if (left_pg + left_cnt * PAGE_SIZE == page) {
1642
            /*
1777
            /*
1643
             * The interval can be added by growing the left
1778
             * The interval can be added by growing the left
1644
             * interval.
1779
             * interval.
1645
             */
1780
             */
1646
            leaf->value[leaf->keys - 1] += count;
1781
            leaf->value[leaf->keys - 1] += count;
1647
            return 1;
1782
            return 1;
1648
        } else {
1783
        } else {
1649
            /*
1784
            /*
1650
             * The interval doesn't adjoin with the left interval.
1785
             * The interval doesn't adjoin with the left interval.
1651
             * It must be added individually.
1786
             * It must be added individually.
1652
             */
1787
             */
1653
            btree_insert(&a->used_space, page, (void *) count,
1788
            btree_insert(&a->used_space, page, (void *) count,
1654
                leaf);
1789
                leaf);
1655
            return 1;
1790
            return 1;
1656
        }
1791
        }
1657
    }
1792
    }
1658
   
1793
   
1659
    /*
1794
    /*
1660
     * Note that if the algorithm made it thus far, the interval can fit
1795
     * Note that if the algorithm made it thus far, the interval can fit
1661
     * only between two other intervals of the leaf. The two border cases
1796
     * only between two other intervals of the leaf. The two border cases
1662
     * were already resolved.
1797
     * were already resolved.
1663
     */
1798
     */
1664
    for (i = 1; i < leaf->keys; i++) {
1799
    for (i = 1; i < leaf->keys; i++) {
1665
        if (page < leaf->key[i]) {
1800
        if (page < leaf->key[i]) {
1666
            uintptr_t left_pg = leaf->key[i - 1];
1801
            uintptr_t left_pg = leaf->key[i - 1];
1667
            uintptr_t right_pg = leaf->key[i];
1802
            uintptr_t right_pg = leaf->key[i];
1668
            count_t left_cnt = (count_t) leaf->value[i - 1];
1803
            count_t left_cnt = (count_t) leaf->value[i - 1];
1669
            count_t right_cnt = (count_t) leaf->value[i];
1804
            count_t right_cnt = (count_t) leaf->value[i];
1670
 
1805
 
1671
            /*
1806
            /*
1672
             * The interval fits between left_pg and right_pg.
1807
             * The interval fits between left_pg and right_pg.
1673
             */
1808
             */
1674
 
1809
 
1675
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1810
            if (overlaps(page, count * PAGE_SIZE, left_pg,
1676
                left_cnt * PAGE_SIZE)) {
1811
                left_cnt * PAGE_SIZE)) {
1677
                /*
1812
                /*
1678
                 * The interval intersects with the left
1813
                 * The interval intersects with the left
1679
                 * interval.
1814
                 * interval.
1680
                 */
1815
                 */
1681
                return 0;
1816
                return 0;
1682
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1817
            } else if (overlaps(page, count * PAGE_SIZE, right_pg,
1683
                right_cnt * PAGE_SIZE)) {
1818
                right_cnt * PAGE_SIZE)) {
1684
                /*
1819
                /*
1685
                 * The interval intersects with the right
1820
                 * The interval intersects with the right
1686
                 * interval.
1821
                 * interval.
1687
                 */
1822
                 */
1688
                return 0;          
1823
                return 0;          
1689
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1824
            } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1690
                (page + count * PAGE_SIZE == right_pg)) {
1825
                (page + count * PAGE_SIZE == right_pg)) {
1691
                /*
1826
                /*
1692
                 * The interval can be added by merging the two
1827
                 * The interval can be added by merging the two
1693
                 * already present intervals.
1828
                 * already present intervals.
1694
                 */
1829
                 */
1695
                leaf->value[i - 1] += count + right_cnt;
1830
                leaf->value[i - 1] += count + right_cnt;
1696
                btree_remove(&a->used_space, right_pg, leaf);
1831
                btree_remove(&a->used_space, right_pg, leaf);
1697
                return 1;
1832
                return 1;
1698
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1833
            } else if (page == left_pg + left_cnt * PAGE_SIZE) {
1699
                /*
1834
                /*
1700
                 * The interval can be added by simply growing
1835
                 * The interval can be added by simply growing
1701
                 * the left interval.
1836
                 * the left interval.
1702
                 */
1837
                 */
1703
                leaf->value[i - 1] += count;
1838
                leaf->value[i - 1] += count;
1704
                return 1;
1839
                return 1;
1705
            } else if (page + count * PAGE_SIZE == right_pg) {
1840
            } else if (page + count * PAGE_SIZE == right_pg) {
1706
                /*
1841
                /*
1707
                     * The interval can be addded by simply moving
1842
                     * The interval can be addded by simply moving
1708
                 * base of the right interval down and
1843
                 * base of the right interval down and
1709
                 * increasing its size accordingly.
1844
                 * increasing its size accordingly.
1710
                 */
1845
                 */
1711
                leaf->value[i] += count;
1846
                leaf->value[i] += count;
1712
                leaf->key[i] = page;
1847
                leaf->key[i] = page;
1713
                return 1;
1848
                return 1;
1714
            } else {
1849
            } else {
1715
                /*
1850
                /*
1716
                 * The interval is between both neigbouring
1851
                 * The interval is between both neigbouring
1717
                 * intervals, but cannot be merged with any of
1852
                 * intervals, but cannot be merged with any of
1718
                 * them.
1853
                 * them.
1719
                 */
1854
                 */
1720
                btree_insert(&a->used_space, page,
1855
                btree_insert(&a->used_space, page,
1721
                    (void *) count, leaf);
1856
                    (void *) count, leaf);
1722
                return 1;
1857
                return 1;
1723
            }
1858
            }
1724
        }
1859
        }
1725
    }
1860
    }
1726
 
1861
 
1727
    panic("Inconsistency detected while adding %" PRIc " pages of used space at "
1862
    panic("Inconsistency detected while adding %" PRIc " pages of used "
1728
        "%p.\n", count, page);
1863
        "space at %p.\n", count, page);
1729
}
1864
}
1730
 
1865
 
1731
/** Mark portion of address space area as unused.
1866
/** Mark portion of address space area as unused.
1732
 *
1867
 *
1733
 * The address space area must be already locked.
1868
 * The address space area must be already locked.
1734
 *
1869
 *
1735
 * @param a Address space area.
1870
 * @param a     Address space area.
1736
 * @param page First page to be marked.
1871
 * @param page      First page to be marked.
1737
 * @param count Number of page to be marked.
1872
 * @param count     Number of page to be marked.
1738
 *
1873
 *
1739
 * @return 0 on failure and 1 on success.
1874
 * @return      Zero on failure and non-zero on success.
1740
 */
1875
 */
1741
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1876
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1742
{
1877
{
1743
    btree_node_t *leaf, *node;
1878
    btree_node_t *leaf, *node;
1744
    count_t pages;
1879
    count_t pages;
1745
    unsigned int i;
1880
    unsigned int i;
1746
 
1881
 
1747
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1882
    ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1748
    ASSERT(count);
1883
    ASSERT(count);
1749
 
1884
 
1750
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1885
    pages = (count_t) btree_search(&a->used_space, page, &leaf);
1751
    if (pages) {
1886
    if (pages) {
1752
        /*
1887
        /*
1753
         * We are lucky, page is the beginning of some interval.
1888
         * We are lucky, page is the beginning of some interval.
1754
         */
1889
         */
1755
        if (count > pages) {
1890
        if (count > pages) {
1756
            return 0;
1891
            return 0;
1757
        } else if (count == pages) {
1892
        } else if (count == pages) {
1758
            btree_remove(&a->used_space, page, leaf);
1893
            btree_remove(&a->used_space, page, leaf);
1759
            return 1;
1894
            return 1;
1760
        } else {
1895
        } else {
1761
            /*
1896
            /*
1762
             * Find the respective interval.
1897
             * Find the respective interval.
1763
             * Decrease its size and relocate its start address.
1898
             * Decrease its size and relocate its start address.
1764
             */
1899
             */
1765
            for (i = 0; i < leaf->keys; i++) {
1900
            for (i = 0; i < leaf->keys; i++) {
1766
                if (leaf->key[i] == page) {
1901
                if (leaf->key[i] == page) {
1767
                    leaf->key[i] += count * PAGE_SIZE;
1902
                    leaf->key[i] += count * PAGE_SIZE;
1768
                    leaf->value[i] -= count;
1903
                    leaf->value[i] -= count;
1769
                    return 1;
1904
                    return 1;
1770
                }
1905
                }
1771
            }
1906
            }
1772
            goto error;
1907
            goto error;
1773
        }
1908
        }
1774
    }
1909
    }
1775
 
1910
 
1776
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1911
    node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1777
    if (node && page < leaf->key[0]) {
1912
    if (node && page < leaf->key[0]) {
1778
        uintptr_t left_pg = node->key[node->keys - 1];
1913
        uintptr_t left_pg = node->key[node->keys - 1];
1779
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1914
        count_t left_cnt = (count_t) node->value[node->keys - 1];
1780
 
1915
 
1781
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1916
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1782
            count * PAGE_SIZE)) {
1917
            count * PAGE_SIZE)) {
1783
            if (page + count * PAGE_SIZE ==
1918
            if (page + count * PAGE_SIZE ==
1784
                left_pg + left_cnt * PAGE_SIZE) {
1919
                left_pg + left_cnt * PAGE_SIZE) {
1785
                /*
1920
                /*
1786
                 * The interval is contained in the rightmost
1921
                 * The interval is contained in the rightmost
1787
                 * interval of the left neighbour and can be
1922
                 * interval of the left neighbour and can be
1788
                 * removed by updating the size of the bigger
1923
                 * removed by updating the size of the bigger
1789
                 * interval.
1924
                 * interval.
1790
                 */
1925
                 */
1791
                node->value[node->keys - 1] -= count;
1926
                node->value[node->keys - 1] -= count;
1792
                return 1;
1927
                return 1;
1793
            } else if (page + count * PAGE_SIZE <
1928
            } else if (page + count * PAGE_SIZE <
1794
                left_pg + left_cnt*PAGE_SIZE) {
1929
                left_pg + left_cnt*PAGE_SIZE) {
1795
                count_t new_cnt;
1930
                count_t new_cnt;
1796
               
1931
               
1797
                /*
1932
                /*
1798
                 * The interval is contained in the rightmost
1933
                 * The interval is contained in the rightmost
1799
                 * interval of the left neighbour but its
1934
                 * interval of the left neighbour but its
1800
                 * removal requires both updating the size of
1935
                 * removal requires both updating the size of
1801
                 * the original interval and also inserting a
1936
                 * the original interval and also inserting a
1802
                 * new interval.
1937
                 * new interval.
1803
                 */
1938
                 */
1804
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1939
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1805
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1940
                    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1806
                node->value[node->keys - 1] -= count + new_cnt;
1941
                node->value[node->keys - 1] -= count + new_cnt;
1807
                btree_insert(&a->used_space, page +
1942
                btree_insert(&a->used_space, page +
1808
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1943
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1809
                return 1;
1944
                return 1;
1810
            }
1945
            }
1811
        }
1946
        }
1812
        return 0;
1947
        return 0;
1813
    } else if (page < leaf->key[0]) {
1948
    } else if (page < leaf->key[0]) {
1814
        return 0;
1949
        return 0;
1815
    }
1950
    }
1816
   
1951
   
1817
    if (page > leaf->key[leaf->keys - 1]) {
1952
    if (page > leaf->key[leaf->keys - 1]) {
1818
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1953
        uintptr_t left_pg = leaf->key[leaf->keys - 1];
1819
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1954
        count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1820
 
1955
 
1821
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1956
        if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1822
            count * PAGE_SIZE)) {
1957
            count * PAGE_SIZE)) {
1823
            if (page + count * PAGE_SIZE ==
1958
            if (page + count * PAGE_SIZE ==
1824
                left_pg + left_cnt * PAGE_SIZE) {
1959
                left_pg + left_cnt * PAGE_SIZE) {
1825
                /*
1960
                /*
1826
                 * The interval is contained in the rightmost
1961
                 * The interval is contained in the rightmost
1827
                 * interval of the leaf and can be removed by
1962
                 * interval of the leaf and can be removed by
1828
                 * updating the size of the bigger interval.
1963
                 * updating the size of the bigger interval.
1829
                 */
1964
                 */
1830
                leaf->value[leaf->keys - 1] -= count;
1965
                leaf->value[leaf->keys - 1] -= count;
1831
                return 1;
1966
                return 1;
1832
            } else if (page + count * PAGE_SIZE < left_pg +
1967
            } else if (page + count * PAGE_SIZE < left_pg +
1833
                left_cnt * PAGE_SIZE) {
1968
                left_cnt * PAGE_SIZE) {
1834
                count_t new_cnt;
1969
                count_t new_cnt;
1835
               
1970
               
1836
                /*
1971
                /*
1837
                 * The interval is contained in the rightmost
1972
                 * The interval is contained in the rightmost
1838
                 * interval of the leaf but its removal
1973
                 * interval of the leaf but its removal
1839
                 * requires both updating the size of the
1974
                 * requires both updating the size of the
1840
                 * original interval and also inserting a new
1975
                 * original interval and also inserting a new
1841
                 * interval.
1976
                 * interval.
1842
                 */
1977
                 */
1843
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1978
                new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1844
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1979
                    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1845
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1980
                leaf->value[leaf->keys - 1] -= count + new_cnt;
1846
                btree_insert(&a->used_space, page +
1981
                btree_insert(&a->used_space, page +
1847
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1982
                    count * PAGE_SIZE, (void *) new_cnt, leaf);
1848
                return 1;
1983
                return 1;
1849
            }
1984
            }
1850
        }
1985
        }
1851
        return 0;
1986
        return 0;
1852
    }  
1987
    }  
1853
   
1988
   
1854
    /*
1989
    /*
1855
     * The border cases have been already resolved.
1990
     * The border cases have been already resolved.
1856
     * Now the interval can be only between intervals of the leaf.
1991
     * Now the interval can be only between intervals of the leaf.
1857
     */
1992
     */
1858
    for (i = 1; i < leaf->keys - 1; i++) {
1993
    for (i = 1; i < leaf->keys - 1; i++) {
1859
        if (page < leaf->key[i]) {
1994
        if (page < leaf->key[i]) {
1860
            uintptr_t left_pg = leaf->key[i - 1];
1995
            uintptr_t left_pg = leaf->key[i - 1];
1861
            count_t left_cnt = (count_t) leaf->value[i - 1];
1996
            count_t left_cnt = (count_t) leaf->value[i - 1];
1862
 
1997
 
1863
            /*
1998
            /*
1864
             * Now the interval is between intervals corresponding
1999
             * Now the interval is between intervals corresponding
1865
             * to (i - 1) and i.
2000
             * to (i - 1) and i.
1866
             */
2001
             */
1867
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
2002
            if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1868
                count * PAGE_SIZE)) {
2003
                count * PAGE_SIZE)) {
1869
                if (page + count * PAGE_SIZE ==
2004
                if (page + count * PAGE_SIZE ==
1870
                    left_pg + left_cnt*PAGE_SIZE) {
2005
                    left_pg + left_cnt*PAGE_SIZE) {
1871
                    /*
2006
                    /*
1872
                     * The interval is contained in the
2007
                     * The interval is contained in the
1873
                     * interval (i - 1) of the leaf and can
2008
                     * interval (i - 1) of the leaf and can
1874
                     * be removed by updating the size of
2009
                     * be removed by updating the size of
1875
                     * the bigger interval.
2010
                     * the bigger interval.
1876
                     */
2011
                     */
1877
                    leaf->value[i - 1] -= count;
2012
                    leaf->value[i - 1] -= count;
1878
                    return 1;
2013
                    return 1;
1879
                } else if (page + count * PAGE_SIZE <
2014
                } else if (page + count * PAGE_SIZE <
1880
                    left_pg + left_cnt * PAGE_SIZE) {
2015
                    left_pg + left_cnt * PAGE_SIZE) {
1881
                    count_t new_cnt;
2016
                    count_t new_cnt;
1882
               
2017
               
1883
                    /*
2018
                    /*
1884
                     * The interval is contained in the
2019
                     * The interval is contained in the
1885
                     * interval (i - 1) of the leaf but its
2020
                     * interval (i - 1) of the leaf but its
1886
                     * removal requires both updating the
2021
                     * removal requires both updating the
1887
                     * size of the original interval and
2022
                     * size of the original interval and
1888
                     * also inserting a new interval.
2023
                     * also inserting a new interval.
1889
                     */
2024
                     */
1890
                    new_cnt = ((left_pg +
2025
                    new_cnt = ((left_pg +
1891
                        left_cnt * PAGE_SIZE) -
2026
                        left_cnt * PAGE_SIZE) -
1892
                        (page + count * PAGE_SIZE)) >>
2027
                        (page + count * PAGE_SIZE)) >>
1893
                        PAGE_WIDTH;
2028
                        PAGE_WIDTH;
1894
                    leaf->value[i - 1] -= count + new_cnt;
2029
                    leaf->value[i - 1] -= count + new_cnt;
1895
                    btree_insert(&a->used_space, page +
2030
                    btree_insert(&a->used_space, page +
1896
                        count * PAGE_SIZE, (void *) new_cnt,
2031
                        count * PAGE_SIZE, (void *) new_cnt,
1897
                        leaf);
2032
                        leaf);
1898
                    return 1;
2033
                    return 1;
1899
                }
2034
                }
1900
            }
2035
            }
1901
            return 0;
2036
            return 0;
1902
        }
2037
        }
1903
    }
2038
    }
1904
 
2039
 
1905
error:
2040
error:
1906
    panic("Inconsistency detected while removing %" PRIc " pages of used space "
2041
    panic("Inconsistency detected while removing %" PRIc " pages of used "
1907
        "from %p.\n", count, page);
2042
        "space from %p.\n", count, page);
1908
}
2043
}
1909
 
2044
 
1910
/** Remove reference to address space area share info.
2045
/** Remove reference to address space area share info.
1911
 *
2046
 *
1912
 * If the reference count drops to 0, the sh_info is deallocated.
2047
 * If the reference count drops to 0, the sh_info is deallocated.
1913
 *
2048
 *
1914
 * @param sh_info Pointer to address space area share info.
2049
 * @param sh_info   Pointer to address space area share info.
1915
 */
2050
 */
1916
void sh_info_remove_reference(share_info_t *sh_info)
2051
void sh_info_remove_reference(share_info_t *sh_info)
1917
{
2052
{
1918
    bool dealloc = false;
2053
    bool dealloc = false;
1919
 
2054
 
1920
    mutex_lock(&sh_info->lock);
2055
    mutex_lock(&sh_info->lock);
1921
    ASSERT(sh_info->refcount);
2056
    ASSERT(sh_info->refcount);
1922
    if (--sh_info->refcount == 0) {
2057
    if (--sh_info->refcount == 0) {
1923
        dealloc = true;
2058
        dealloc = true;
1924
        link_t *cur;
2059
        link_t *cur;
1925
       
2060
       
1926
        /*
2061
        /*
1927
         * Now walk carefully the pagemap B+tree and free/remove
2062
         * Now walk carefully the pagemap B+tree and free/remove
1928
         * reference from all frames found there.
2063
         * reference from all frames found there.
1929
         */
2064
         */
1930
        for (cur = sh_info->pagemap.leaf_head.next;
2065
        for (cur = sh_info->pagemap.leaf_head.next;
1931
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
2066
            cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1932
            btree_node_t *node;
2067
            btree_node_t *node;
1933
            unsigned int i;
2068
            unsigned int i;
1934
           
2069
           
1935
            node = list_get_instance(cur, btree_node_t, leaf_link);
2070
            node = list_get_instance(cur, btree_node_t, leaf_link);
1936
            for (i = 0; i < node->keys; i++)
2071
            for (i = 0; i < node->keys; i++)
1937
                frame_free((uintptr_t) node->value[i]);
2072
                frame_free((uintptr_t) node->value[i]);
1938
        }
2073
        }
1939
       
2074
       
1940
    }
2075
    }
1941
    mutex_unlock(&sh_info->lock);
2076
    mutex_unlock(&sh_info->lock);
1942
   
2077
   
1943
    if (dealloc) {
2078
    if (dealloc) {
1944
        btree_destroy(&sh_info->pagemap);
2079
        btree_destroy(&sh_info->pagemap);
1945
        free(sh_info);
2080
        free(sh_info);
1946
    }
2081
    }
1947
}
2082
}
1948
 
2083
 
1949
/*
2084
/*
1950
 * Address space related syscalls.
2085
 * Address space related syscalls.
1951
 */
2086
 */
1952
 
2087
 
1953
/** Wrapper for as_area_create(). */
2088
/** Wrapper for as_area_create(). */
1954
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
2089
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1955
{
2090
{
1956
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
2091
    if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1957
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
2092
        AS_AREA_ATTR_NONE, &anon_backend, NULL))
1958
        return (unative_t) address;
2093
        return (unative_t) address;
1959
    else
2094
    else
1960
        return (unative_t) -1;
2095
        return (unative_t) -1;
1961
}
2096
}
1962
 
2097
 
1963
/** Wrapper for as_area_resize(). */
2098
/** Wrapper for as_area_resize(). */
1964
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
2099
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1965
{
2100
{
1966
    return (unative_t) as_area_resize(AS, address, size, 0);
2101
    return (unative_t) as_area_resize(AS, address, size, 0);
1967
}
2102
}
1968
 
2103
 
-
 
2104
/** Wrapper for as_area_change_flags(). */
-
 
2105
unative_t sys_as_area_change_flags(uintptr_t address, int flags)
-
 
2106
{
-
 
2107
    return (unative_t) as_area_change_flags(AS, flags, address);
-
 
2108
}
-
 
2109
 
1969
/** Wrapper for as_area_destroy(). */
2110
/** Wrapper for as_area_destroy(). */
1970
unative_t sys_as_area_destroy(uintptr_t address)
2111
unative_t sys_as_area_destroy(uintptr_t address)
1971
{
2112
{
1972
    return (unative_t) as_area_destroy(AS, address);
2113
    return (unative_t) as_area_destroy(AS, address);
1973
}
2114
}
1974
 
2115
 
1975
/** Print out information about address space.
2116
/** Print out information about address space.
1976
 *
2117
 *
1977
 * @param as Address space.
2118
 * @param as        Address space.
1978
 */
2119
 */
1979
void as_print(as_t *as)
2120
void as_print(as_t *as)
1980
{
2121
{
1981
    ipl_t ipl;
2122
    ipl_t ipl;
1982
   
2123
   
1983
    ipl = interrupts_disable();
2124
    ipl = interrupts_disable();
1984
    mutex_lock(&as->lock);
2125
    mutex_lock(&as->lock);
1985
   
2126
   
1986
    /* print out info about address space areas */
2127
    /* print out info about address space areas */
1987
    link_t *cur;
2128
    link_t *cur;
1988
    for (cur = as->as_area_btree.leaf_head.next;
2129
    for (cur = as->as_area_btree.leaf_head.next;
1989
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
2130
        cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1990
        btree_node_t *node;
2131
        btree_node_t *node;
1991
       
2132
       
1992
        node = list_get_instance(cur, btree_node_t, leaf_link);
2133
        node = list_get_instance(cur, btree_node_t, leaf_link);
1993
       
2134
       
1994
        unsigned int i;
2135
        unsigned int i;
1995
        for (i = 0; i < node->keys; i++) {
2136
        for (i = 0; i < node->keys; i++) {
1996
            as_area_t *area = node->value[i];
2137
            as_area_t *area = node->value[i];
1997
       
2138
       
1998
            mutex_lock(&area->lock);
2139
            mutex_lock(&area->lock);
1999
            printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n",
2140
            printf("as_area: %p, base=%p, pages=%" PRIc
2000
                area, area->base, area->pages, area->base,
2141
                " (%p - %p)\n", area, area->base, area->pages,
2001
                area->base + FRAMES2SIZE(area->pages));
2142
                area->base, area->base + FRAMES2SIZE(area->pages));
2002
            mutex_unlock(&area->lock);
2143
            mutex_unlock(&area->lock);
2003
        }
2144
        }
2004
    }
2145
    }
2005
   
2146
   
2006
    mutex_unlock(&as->lock);
2147
    mutex_unlock(&as->lock);
2007
    interrupts_restore(ipl);
2148
    interrupts_restore(ipl);
2008
}
2149
}
2009
 
2150
 
2010
/** @}
2151
/** @}
2011
 */
2152
 */
2012
 
2153