Subversion Repositories HelenOS

Rev

Rev 2745 | Rev 3104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
2183 jermar 60
#include <preemption.h>
703 jermar 61
#include <synch/spinlock.h>
1380 jermar 62
#include <synch/mutex.h>
788 jermar 63
#include <adt/list.h>
1147 jermar 64
#include <adt/btree.h>
1235 jermar 65
#include <proc/task.h>
1288 jermar 66
#include <proc/thread.h>
1235 jermar 67
#include <arch/asm.h>
703 jermar 68
#include <panic.h>
69
#include <debug.h>
1235 jermar 70
#include <print.h>
703 jermar 71
#include <memstr.h>
1070 jermar 72
#include <macros.h>
703 jermar 73
#include <arch.h>
1235 jermar 74
#include <errno.h>
75
#include <config.h>
1387 jermar 76
#include <align.h>
1235 jermar 77
#include <arch/types.h>
1288 jermar 78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
703 jermar 80
 
2009 jermar 81
#ifdef CONFIG_VIRT_IDX_DCACHE
82
#include <arch/mm/cache.h>
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
84
 
2125 decky 85
#ifndef __OBJC__
1757 jermar 86
/**
87
 * Each architecture decides what functions will be used to carry out
88
 * address space operations such as creating or locking page tables.
89
 */
756 jermar 90
as_operations_t *as_operations = NULL;
703 jermar 91
 
1890 jermar 92
/**
93
 * Slab for as_t objects.
94
 */
95
static slab_cache_t *as_slab;
2126 decky 96
#endif
1890 jermar 97
 
2087 jermar 98
/**
2170 jermar 99
 * This lock serializes access to the ASID subsystem.
100
 * It protects:
101
 * - inactive_as_with_asid_head list
102
 * - as->asid for each as of the as_t type
103
 * - asids_allocated counter
2087 jermar 104
 */
2170 jermar 105
SPINLOCK_INITIALIZE(asidlock);
823 jermar 106
 
107
/**
108
 * This list contains address spaces that are not active on any
109
 * processor and that have valid ASID.
110
 */
111
LIST_INITIALIZE(inactive_as_with_asid_head);
112
 
757 jermar 113
/** Kernel address space. */
114
as_t *AS_KERNEL = NULL;
115
 
1235 jermar 116
static int area_flags_to_page_flags(int aflags);
1780 jermar 117
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 118
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
119
    as_area_t *avoid_area);
1409 jermar 120
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 121
 
2126 decky 122
#ifndef __OBJC__
1891 jermar 123
static int as_constructor(void *obj, int flags)
124
{
125
	as_t *as = (as_t *) obj;
126
	int rc;
127
 
128
	link_initialize(&as->inactive_as_with_asid_link);
129
	mutex_initialize(&as->lock);	
130
 
131
	rc = as_constructor_arch(as, flags);
132
 
133
	return rc;
134
}
135
 
136
static int as_destructor(void *obj)
137
{
138
	as_t *as = (as_t *) obj;
139
 
140
	return as_destructor_arch(as);
141
}
2126 decky 142
#endif
1891 jermar 143
 
756 jermar 144
/** Initialize address space subsystem. */
145
void as_init(void)
146
{
147
	as_arch_init();
2126 decky 148
 
149
#ifndef __OBJC__
1891 jermar 150
	as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 151
	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
2126 decky 152
#endif
1890 jermar 153
 
789 palkovsky 154
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 155
	if (!AS_KERNEL)
156
		panic("can't create kernel address space\n");
157
 
756 jermar 158
}
159
 
757 jermar 160
/** Create address space.
161
 *
162
 * @param flags Flags that influence way in wich the address space is created.
163
 */
756 jermar 164
as_t *as_create(int flags)
703 jermar 165
{
166
	as_t *as;
167
 
2126 decky 168
#ifdef __OBJC__
169
	as = [as_t new];
170
	link_initialize(&as->inactive_as_with_asid_link);
171
	mutex_initialize(&as->lock);	
172
	(void) as_constructor_arch(as, flags);
173
#else
1890 jermar 174
	as = (as_t *) slab_alloc(as_slab, 0);
2126 decky 175
#endif
1891 jermar 176
	(void) as_create_arch(as, 0);
177
 
1147 jermar 178
	btree_create(&as->as_area_btree);
822 palkovsky 179
 
180
	if (flags & FLAG_AS_KERNEL)
181
		as->asid = ASID_KERNEL;
182
	else
183
		as->asid = ASID_INVALID;
184
 
2183 jermar 185
	atomic_set(&as->refcount, 0);
1415 jermar 186
	as->cpu_refcount = 0;
2089 decky 187
#ifdef AS_PAGE_TABLE
2106 jermar 188
	as->genarch.page_table = page_table_create(flags);
2089 decky 189
#else
190
	page_table_create(flags);
191
#endif
703 jermar 192
 
193
	return as;
194
}
195
 
1468 jermar 196
/** Destroy adress space.
197
 *
2087 jermar 198
 * When there are no tasks referencing this address space (i.e. its refcount is
199
 * zero), the address space can be destroyed.
2183 jermar 200
 *
201
 * We know that we don't hold any spinlock.
1468 jermar 202
 */
203
void as_destroy(as_t *as)
973 palkovsky 204
{
1468 jermar 205
	ipl_t ipl;
1594 jermar 206
	bool cond;
2183 jermar 207
	DEADLOCK_PROBE_INIT(p_asidlock);
973 palkovsky 208
 
2183 jermar 209
	ASSERT(atomic_get(&as->refcount) == 0);
1468 jermar 210
 
211
	/*
212
	 * Since there is no reference to this area,
213
	 * it is safe not to lock its mutex.
214
	 */
2170 jermar 215
 
2183 jermar 216
	/*
217
	 * We need to avoid deadlock between TLB shootdown and asidlock.
218
	 * We therefore try to take asid conditionally and if we don't succeed,
219
	 * we enable interrupts and try again. This is done while preemption is
220
	 * disabled to prevent nested context switches. We also depend on the
221
	 * fact that so far no spinlocks are held.
222
	 */
223
	preemption_disable();
224
	ipl = interrupts_read();
225
retry:
226
	interrupts_disable();
227
	if (!spinlock_trylock(&asidlock)) {
228
		interrupts_enable();
229
		DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
230
		goto retry;
231
	}
232
	preemption_enable();	/* Interrupts disabled, enable preemption */
1587 jermar 233
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 234
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 235
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 236
		asid_put(as->asid);
237
	}
2170 jermar 238
	spinlock_unlock(&asidlock);
1468 jermar 239
 
240
	/*
241
	 * Destroy address space areas of the address space.
1954 jermar 242
	 * The B+tree must be walked carefully because it is
1594 jermar 243
	 * also being destroyed.
1468 jermar 244
	 */	
1594 jermar 245
	for (cond = true; cond; ) {
1468 jermar 246
		btree_node_t *node;
1594 jermar 247
 
248
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 249
		node = list_get_instance(as->as_area_btree.leaf_head.next,
250
		    btree_node_t, leaf_link);
1594 jermar 251
 
252
		if ((cond = node->keys)) {
253
			as_area_destroy(as, node->key[0]);
254
		}
1468 jermar 255
	}
1495 jermar 256
 
1483 jermar 257
	btree_destroy(&as->as_area_btree);
2089 decky 258
#ifdef AS_PAGE_TABLE
2106 jermar 259
	page_table_destroy(as->genarch.page_table);
2089 decky 260
#else
261
	page_table_destroy(NULL);
262
#endif
1468 jermar 263
 
264
	interrupts_restore(ipl);
2126 decky 265
 
266
#ifdef __OBJC__
267
	[as free];
268
#else
1890 jermar 269
	slab_free(as_slab, as);
2126 decky 270
#endif
973 palkovsky 271
}
272
 
703 jermar 273
/** Create address space area of common attributes.
274
 *
275
 * The created address space area is added to the target address space.
276
 *
277
 * @param as Target address space.
1239 jermar 278
 * @param flags Flags of the area memory.
1048 jermar 279
 * @param size Size of area.
703 jermar 280
 * @param base Base address of area.
1239 jermar 281
 * @param attrs Attributes of the area.
1409 jermar 282
 * @param backend Address space area backend. NULL if no backend is used.
283
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 284
 *
285
 * @return Address space area on success or NULL on failure.
286
 */
2069 jermar 287
as_area_t *
288
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 289
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 290
{
291
	ipl_t ipl;
292
	as_area_t *a;
293
 
294
	if (base % PAGE_SIZE)
1048 jermar 295
		return NULL;
296
 
1233 jermar 297
	if (!size)
298
		return NULL;
299
 
1048 jermar 300
	/* Writeable executable areas are not supported. */
301
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
302
		return NULL;
703 jermar 303
 
304
	ipl = interrupts_disable();
1380 jermar 305
	mutex_lock(&as->lock);
703 jermar 306
 
1048 jermar 307
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 308
		mutex_unlock(&as->lock);
1048 jermar 309
		interrupts_restore(ipl);
310
		return NULL;
311
	}
703 jermar 312
 
822 palkovsky 313
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 314
 
1380 jermar 315
	mutex_initialize(&a->lock);
822 palkovsky 316
 
1424 jermar 317
	a->as = as;
1026 jermar 318
	a->flags = flags;
1239 jermar 319
	a->attributes = attrs;
1048 jermar 320
	a->pages = SIZE2FRAMES(size);
822 palkovsky 321
	a->base = base;
1409 jermar 322
	a->sh_info = NULL;
323
	a->backend = backend;
1424 jermar 324
	if (backend_data)
325
		a->backend_data = *backend_data;
326
	else
2087 jermar 327
		memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
328
		    0);
1424 jermar 329
 
1387 jermar 330
	btree_create(&a->used_space);
822 palkovsky 331
 
1147 jermar 332
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 333
 
1380 jermar 334
	mutex_unlock(&as->lock);
703 jermar 335
	interrupts_restore(ipl);
704 jermar 336
 
703 jermar 337
	return a;
338
}
339
 
1235 jermar 340
/** Find address space area and change it.
341
 *
342
 * @param as Address space.
2087 jermar 343
 * @param address Virtual address belonging to the area to be changed. Must be
344
 *     page-aligned.
1235 jermar 345
 * @param size New size of the virtual memory block starting at address. 
346
 * @param flags Flags influencing the remap operation. Currently unused.
347
 *
1306 jermar 348
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 349
 */ 
1780 jermar 350
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 351
{
1306 jermar 352
	as_area_t *area;
1235 jermar 353
	ipl_t ipl;
354
	size_t pages;
355
 
356
	ipl = interrupts_disable();
1380 jermar 357
	mutex_lock(&as->lock);
1235 jermar 358
 
359
	/*
360
	 * Locate the area.
361
	 */
362
	area = find_area_and_lock(as, address);
363
	if (!area) {
1380 jermar 364
		mutex_unlock(&as->lock);
1235 jermar 365
		interrupts_restore(ipl);
1306 jermar 366
		return ENOENT;
1235 jermar 367
	}
368
 
1424 jermar 369
	if (area->backend == &phys_backend) {
1235 jermar 370
		/*
371
		 * Remapping of address space areas associated
372
		 * with memory mapped devices is not supported.
373
		 */
1380 jermar 374
		mutex_unlock(&area->lock);
375
		mutex_unlock(&as->lock);
1235 jermar 376
		interrupts_restore(ipl);
1306 jermar 377
		return ENOTSUP;
1235 jermar 378
	}
1409 jermar 379
	if (area->sh_info) {
380
		/*
381
		 * Remapping of shared address space areas 
382
		 * is not supported.
383
		 */
384
		mutex_unlock(&area->lock);
385
		mutex_unlock(&as->lock);
386
		interrupts_restore(ipl);
387
		return ENOTSUP;
388
	}
1235 jermar 389
 
390
	pages = SIZE2FRAMES((address - area->base) + size);
391
	if (!pages) {
392
		/*
393
		 * Zero size address space areas are not allowed.
394
		 */
1380 jermar 395
		mutex_unlock(&area->lock);
396
		mutex_unlock(&as->lock);
1235 jermar 397
		interrupts_restore(ipl);
1306 jermar 398
		return EPERM;
1235 jermar 399
	}
400
 
401
	if (pages < area->pages) {
1403 jermar 402
		bool cond;
1780 jermar 403
		uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 404
 
405
		/*
406
		 * Shrinking the area.
407
		 * No need to check for overlaps.
408
		 */
1403 jermar 409
 
410
		/*
1436 jermar 411
		 * Start TLB shootdown sequence.
412
		 */
2087 jermar 413
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
414
		    pages * PAGE_SIZE, area->pages - pages);
1436 jermar 415
 
416
		/*
1403 jermar 417
		 * Remove frames belonging to used space starting from
418
		 * the highest addresses downwards until an overlap with
419
		 * the resized address space area is found. Note that this
420
		 * is also the right way to remove part of the used_space
421
		 * B+tree leaf list.
422
		 */		
423
		for (cond = true; cond;) {
424
			btree_node_t *node;
425
 
426
			ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 427
			node = 
428
			    list_get_instance(area->used_space.leaf_head.prev,
429
			    btree_node_t, leaf_link);
1403 jermar 430
			if ((cond = (bool) node->keys)) {
1780 jermar 431
				uintptr_t b = node->key[node->keys - 1];
2087 jermar 432
				count_t c =
433
				    (count_t) node->value[node->keys - 1];
2745 decky 434
				unsigned int i = 0;
1235 jermar 435
 
2087 jermar 436
				if (overlaps(b, c * PAGE_SIZE, area->base,
2133 jermar 437
				    pages * PAGE_SIZE)) {
1403 jermar 438
 
2087 jermar 439
					if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 440
						/*
2087 jermar 441
						 * The whole interval fits
442
						 * completely in the resized
443
						 * address space area.
1403 jermar 444
						 */
445
						break;
446
					}
447
 
448
					/*
2087 jermar 449
					 * Part of the interval corresponding
450
					 * to b and c overlaps with the resized
451
					 * address space area.
1403 jermar 452
					 */
453
 
454
					cond = false;	/* we are almost done */
455
					i = (start_free - b) >> PAGE_WIDTH;
3057 decky 456
					if (!used_space_remove(area, start_free, c - i))
457
						panic("Could not remove used space.\n");
1403 jermar 458
				} else {
459
					/*
2087 jermar 460
					 * The interval of used space can be
461
					 * completely removed.
1403 jermar 462
					 */
463
					if (!used_space_remove(area, b, c))
3057 decky 464
						panic("Could not remove used space.\n");
1403 jermar 465
				}
466
 
467
				for (; i < c; i++) {
468
					pte_t *pte;
469
 
470
					page_table_lock(as, false);
2087 jermar 471
					pte = page_mapping_find(as, b +
472
					    i * PAGE_SIZE);
473
					ASSERT(pte && PTE_VALID(pte) &&
474
					    PTE_PRESENT(pte));
475
					if (area->backend &&
476
					    area->backend->frame_free) {
1424 jermar 477
						area->backend->frame_free(area,
2087 jermar 478
						    b + i * PAGE_SIZE,
479
						    PTE_GET_FRAME(pte));
1409 jermar 480
					}
2087 jermar 481
					page_mapping_remove(as, b +
482
					    i * PAGE_SIZE);
1403 jermar 483
					page_table_unlock(as, false);
484
				}
1235 jermar 485
			}
486
		}
1436 jermar 487
 
1235 jermar 488
		/*
1436 jermar 489
		 * Finish TLB shootdown sequence.
1235 jermar 490
		 */
2183 jermar 491
 
2087 jermar 492
		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
493
		    area->pages - pages);
1889 jermar 494
		/*
495
		 * Invalidate software translation caches (e.g. TSB on sparc64).
496
		 */
2087 jermar 497
		as_invalidate_translation_cache(as, area->base +
498
		    pages * PAGE_SIZE, area->pages - pages);
2183 jermar 499
		tlb_shootdown_finalize();
500
 
1235 jermar 501
	} else {
502
		/*
503
		 * Growing the area.
504
		 * Check for overlaps with other address space areas.
505
		 */
2087 jermar 506
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
507
		    area)) {
1380 jermar 508
			mutex_unlock(&area->lock);
509
			mutex_unlock(&as->lock);		
1235 jermar 510
			interrupts_restore(ipl);
1306 jermar 511
			return EADDRNOTAVAIL;
1235 jermar 512
		}
513
	} 
514
 
515
	area->pages = pages;
516
 
1380 jermar 517
	mutex_unlock(&area->lock);
518
	mutex_unlock(&as->lock);
1235 jermar 519
	interrupts_restore(ipl);
520
 
1306 jermar 521
	return 0;
1235 jermar 522
}
523
 
1306 jermar 524
/** Destroy address space area.
525
 *
526
 * @param as Address space.
527
 * @param address Address withing the area to be deleted.
528
 *
529
 * @return Zero on success or a value from @ref errno.h on failure. 
530
 */
1780 jermar 531
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 532
{
533
	as_area_t *area;
1780 jermar 534
	uintptr_t base;
1495 jermar 535
	link_t *cur;
1306 jermar 536
	ipl_t ipl;
537
 
538
	ipl = interrupts_disable();
1380 jermar 539
	mutex_lock(&as->lock);
1306 jermar 540
 
541
	area = find_area_and_lock(as, address);
542
	if (!area) {
1380 jermar 543
		mutex_unlock(&as->lock);
1306 jermar 544
		interrupts_restore(ipl);
545
		return ENOENT;
546
	}
547
 
1403 jermar 548
	base = area->base;
549
 
1411 jermar 550
	/*
1436 jermar 551
	 * Start TLB shootdown sequence.
552
	 */
1889 jermar 553
	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 554
 
555
	/*
1411 jermar 556
	 * Visit only the pages mapped by used_space B+tree.
557
	 */
2087 jermar 558
	for (cur = area->used_space.leaf_head.next;
559
	    cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 560
		btree_node_t *node;
2745 decky 561
		unsigned int i;
1403 jermar 562
 
1495 jermar 563
		node = list_get_instance(cur, btree_node_t, leaf_link);
564
		for (i = 0; i < node->keys; i++) {
1780 jermar 565
			uintptr_t b = node->key[i];
1495 jermar 566
			count_t j;
1411 jermar 567
			pte_t *pte;
1403 jermar 568
 
1495 jermar 569
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 570
				page_table_lock(as, false);
2087 jermar 571
				pte = page_mapping_find(as, b + j * PAGE_SIZE);
572
				ASSERT(pte && PTE_VALID(pte) &&
573
				    PTE_PRESENT(pte));
574
				if (area->backend &&
575
				    area->backend->frame_free) {
576
					area->backend->frame_free(area,	b +
2133 jermar 577
					    j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 578
				}
2087 jermar 579
				page_mapping_remove(as, b + j * PAGE_SIZE);				
1411 jermar 580
				page_table_unlock(as, false);
1306 jermar 581
			}
582
		}
583
	}
1403 jermar 584
 
1306 jermar 585
	/*
1436 jermar 586
	 * Finish TLB shootdown sequence.
1306 jermar 587
	 */
2183 jermar 588
 
1889 jermar 589
	tlb_invalidate_pages(as->asid, area->base, area->pages);
590
	/*
2087 jermar 591
	 * Invalidate potential software translation caches (e.g. TSB on
592
	 * sparc64).
1889 jermar 593
	 */
594
	as_invalidate_translation_cache(as, area->base, area->pages);
2183 jermar 595
	tlb_shootdown_finalize();
1889 jermar 596
 
1436 jermar 597
	btree_destroy(&area->used_space);
1306 jermar 598
 
1309 jermar 599
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 600
 
601
	if (area->sh_info)
602
		sh_info_remove_reference(area->sh_info);
603
 
1380 jermar 604
	mutex_unlock(&area->lock);
1306 jermar 605
 
606
	/*
607
	 * Remove the empty area from address space.
608
	 */
1889 jermar 609
	btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 610
 
1309 jermar 611
	free(area);
612
 
1889 jermar 613
	mutex_unlock(&as->lock);
1306 jermar 614
	interrupts_restore(ipl);
615
	return 0;
616
}
617
 
1413 jermar 618
/** Share address space area with another or the same address space.
1235 jermar 619
 *
1424 jermar 620
 * Address space area mapping is shared with a new address space area.
621
 * If the source address space area has not been shared so far,
622
 * a new sh_info is created. The new address space area simply gets the
623
 * sh_info of the source area. The process of duplicating the
624
 * mapping is done through the backend share function.
1413 jermar 625
 * 
1417 jermar 626
 * @param src_as Pointer to source address space.
1239 jermar 627
 * @param src_base Base address of the source address space area.
1417 jermar 628
 * @param acc_size Expected size of the source area.
1428 palkovsky 629
 * @param dst_as Pointer to destination address space.
1417 jermar 630
 * @param dst_base Target base address.
631
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 632
 *
2007 jermar 633
 * @return Zero on success or ENOENT if there is no such task or if there is no
634
 * such address space area, EPERM if there was a problem in accepting the area
635
 * or ENOMEM if there was a problem in allocating destination address space
636
 * area. ENOTSUP is returned if the address space area backend does not support
2141 jermar 637
 * sharing.
1235 jermar 638
 */
1780 jermar 639
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
2647 jermar 640
    as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 641
{
642
	ipl_t ipl;
1239 jermar 643
	int src_flags;
644
	size_t src_size;
645
	as_area_t *src_area, *dst_area;
1413 jermar 646
	share_info_t *sh_info;
1424 jermar 647
	mem_backend_t *src_backend;
648
	mem_backend_data_t src_backend_data;
1434 palkovsky 649
 
1235 jermar 650
	ipl = interrupts_disable();
1380 jermar 651
	mutex_lock(&src_as->lock);
1329 palkovsky 652
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 653
	if (!src_area) {
1238 jermar 654
		/*
655
		 * Could not find the source address space area.
656
		 */
1380 jermar 657
		mutex_unlock(&src_as->lock);
1238 jermar 658
		interrupts_restore(ipl);
659
		return ENOENT;
660
	}
2007 jermar 661
 
1424 jermar 662
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 663
		/*
1851 jermar 664
		 * There is no backend or the backend does not
1424 jermar 665
		 * know how to share the area.
1413 jermar 666
		 */
667
		mutex_unlock(&src_area->lock);
668
		mutex_unlock(&src_as->lock);
669
		interrupts_restore(ipl);
670
		return ENOTSUP;
671
	}
672
 
1239 jermar 673
	src_size = src_area->pages * PAGE_SIZE;
674
	src_flags = src_area->flags;
1424 jermar 675
	src_backend = src_area->backend;
676
	src_backend_data = src_area->backend_data;
1544 palkovsky 677
 
678
	/* Share the cacheable flag from the original mapping */
679
	if (src_flags & AS_AREA_CACHEABLE)
680
		dst_flags_mask |= AS_AREA_CACHEABLE;
681
 
2087 jermar 682
	if (src_size != acc_size ||
683
	    (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 684
		mutex_unlock(&src_area->lock);
685
		mutex_unlock(&src_as->lock);
1235 jermar 686
		interrupts_restore(ipl);
687
		return EPERM;
688
	}
1413 jermar 689
 
1235 jermar 690
	/*
1413 jermar 691
	 * Now we are committed to sharing the area.
1954 jermar 692
	 * First, prepare the area for sharing.
1413 jermar 693
	 * Then it will be safe to unlock it.
694
	 */
695
	sh_info = src_area->sh_info;
696
	if (!sh_info) {
697
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
698
		mutex_initialize(&sh_info->lock);
699
		sh_info->refcount = 2;
700
		btree_create(&sh_info->pagemap);
701
		src_area->sh_info = sh_info;
2647 jermar 702
		/*
703
		 * Call the backend to setup sharing.
704
		 */
705
		src_area->backend->share(src_area);
1413 jermar 706
	} else {
707
		mutex_lock(&sh_info->lock);
708
		sh_info->refcount++;
709
		mutex_unlock(&sh_info->lock);
710
	}
711
 
712
	mutex_unlock(&src_area->lock);
713
	mutex_unlock(&src_as->lock);
714
 
715
	/*
1239 jermar 716
	 * Create copy of the source address space area.
717
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
718
	 * attribute set which prevents race condition with
719
	 * preliminary as_page_fault() calls.
1417 jermar 720
	 * The flags of the source area are masked against dst_flags_mask
721
	 * to support sharing in less privileged mode.
1235 jermar 722
	 */
1461 palkovsky 723
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 724
	    AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 725
	if (!dst_area) {
1235 jermar 726
		/*
727
		 * Destination address space area could not be created.
728
		 */
1413 jermar 729
		sh_info_remove_reference(sh_info);
730
 
1235 jermar 731
		interrupts_restore(ipl);
732
		return ENOMEM;
733
	}
2009 jermar 734
 
1235 jermar 735
	/*
1239 jermar 736
	 * Now the destination address space area has been
737
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 738
	 * attribute and set the sh_info.
1239 jermar 739
	 */	
2009 jermar 740
	mutex_lock(&dst_as->lock);	
1380 jermar 741
	mutex_lock(&dst_area->lock);
1239 jermar 742
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 743
	dst_area->sh_info = sh_info;
1380 jermar 744
	mutex_unlock(&dst_area->lock);
2009 jermar 745
	mutex_unlock(&dst_as->lock);	
746
 
1235 jermar 747
	interrupts_restore(ipl);
748
 
749
	return 0;
750
}
751
 
1423 jermar 752
/** Check access mode for address space area.
753
 *
754
 * The address space area must be locked prior to this call.
755
 *
756
 * @param area Address space area.
757
 * @param access Access mode.
758
 *
759
 * @return False if access violates area's permissions, true otherwise.
760
 */
761
bool as_area_check_access(as_area_t *area, pf_access_t access)
762
{
763
	int flagmap[] = {
764
		[PF_ACCESS_READ] = AS_AREA_READ,
765
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
766
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
767
	};
768
 
769
	if (!(area->flags & flagmap[access]))
770
		return false;
771
 
772
	return true;
773
}
774
 
703 jermar 775
/** Handle page fault within the current address space.
776
 *
1409 jermar 777
 * This is the high-level page fault handler. It decides
778
 * whether the page fault can be resolved by any backend
779
 * and if so, it invokes the backend to resolve the page
780
 * fault.
781
 *
703 jermar 782
 * Interrupts are assumed disabled.
783
 *
784
 * @param page Faulting page.
1411 jermar 785
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 786
 * @param istate Pointer to interrupted state.
703 jermar 787
 *
1409 jermar 788
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
789
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 790
 */
1780 jermar 791
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 792
{
1044 jermar 793
	pte_t *pte;
977 jermar 794
	as_area_t *area;
703 jermar 795
 
1380 jermar 796
	if (!THREAD)
1409 jermar 797
		return AS_PF_FAULT;
1380 jermar 798
 
703 jermar 799
	ASSERT(AS);
1044 jermar 800
 
1380 jermar 801
	mutex_lock(&AS->lock);
977 jermar 802
	area = find_area_and_lock(AS, page);	
703 jermar 803
	if (!area) {
804
		/*
805
		 * No area contained mapping for 'page'.
806
		 * Signal page fault to low-level handler.
807
		 */
1380 jermar 808
		mutex_unlock(&AS->lock);
1288 jermar 809
		goto page_fault;
703 jermar 810
	}
811
 
1239 jermar 812
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
813
		/*
814
		 * The address space area is not fully initialized.
815
		 * Avoid possible race by returning error.
816
		 */
1380 jermar 817
		mutex_unlock(&area->lock);
818
		mutex_unlock(&AS->lock);
1288 jermar 819
		goto page_fault;		
1239 jermar 820
	}
821
 
1424 jermar 822
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 823
		/*
824
		 * The address space area is not backed by any backend
825
		 * or the backend cannot handle page faults.
826
		 */
827
		mutex_unlock(&area->lock);
828
		mutex_unlock(&AS->lock);
829
		goto page_fault;		
830
	}
1179 jermar 831
 
1044 jermar 832
	page_table_lock(AS, false);
833
 
703 jermar 834
	/*
1044 jermar 835
	 * To avoid race condition between two page faults
836
	 * on the same address, we need to make sure
837
	 * the mapping has not been already inserted.
838
	 */
839
	if ((pte = page_mapping_find(AS, page))) {
840
		if (PTE_PRESENT(pte)) {
1423 jermar 841
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 842
			    (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
843
			    (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 844
				page_table_unlock(AS, false);
845
				mutex_unlock(&area->lock);
846
				mutex_unlock(&AS->lock);
847
				return AS_PF_OK;
848
			}
1044 jermar 849
		}
850
	}
1409 jermar 851
 
1044 jermar 852
	/*
1409 jermar 853
	 * Resort to the backend page fault handler.
703 jermar 854
	 */
1424 jermar 855
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 856
		page_table_unlock(AS, false);
857
		mutex_unlock(&area->lock);
858
		mutex_unlock(&AS->lock);
859
		goto page_fault;
860
	}
703 jermar 861
 
1044 jermar 862
	page_table_unlock(AS, false);
1380 jermar 863
	mutex_unlock(&area->lock);
864
	mutex_unlock(&AS->lock);
1288 jermar 865
	return AS_PF_OK;
866
 
867
page_fault:
868
	if (THREAD->in_copy_from_uspace) {
869
		THREAD->in_copy_from_uspace = false;
2087 jermar 870
		istate_set_retaddr(istate,
871
		    (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 872
	} else if (THREAD->in_copy_to_uspace) {
873
		THREAD->in_copy_to_uspace = false;
2087 jermar 874
		istate_set_retaddr(istate,
875
		    (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 876
	} else {
877
		return AS_PF_FAULT;
878
	}
879
 
880
	return AS_PF_DEFER;
703 jermar 881
}
882
 
823 jermar 883
/** Switch address spaces.
703 jermar 884
 *
1380 jermar 885
 * Note that this function cannot sleep as it is essentially a part of
2170 jermar 886
 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
887
 * thing which is forbidden in this context is locking the address space.
1380 jermar 888
 *
2183 jermar 889
 * When this function is enetered, no spinlocks may be held.
890
 *
823 jermar 891
 * @param old Old address space or NULL.
892
 * @param new New address space.
703 jermar 893
 */
2106 jermar 894
void as_switch(as_t *old_as, as_t *new_as)
703 jermar 895
{
2183 jermar 896
	DEADLOCK_PROBE_INIT(p_asidlock);
897
	preemption_disable();
898
retry:
899
	(void) interrupts_disable();
900
	if (!spinlock_trylock(&asidlock)) {
901
		/* 
902
		 * Avoid deadlock with TLB shootdown.
903
		 * We can enable interrupts here because
904
		 * preemption is disabled. We should not be
905
		 * holding any other lock.
906
		 */
907
		(void) interrupts_enable();
908
		DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
909
		goto retry;
910
	}
911
	preemption_enable();
703 jermar 912
 
913
	/*
823 jermar 914
	 * First, take care of the old address space.
915
	 */	
2106 jermar 916
	if (old_as) {
917
		ASSERT(old_as->cpu_refcount);
918
		if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
823 jermar 919
			/*
920
			 * The old address space is no longer active on
921
			 * any processor. It can be appended to the
922
			 * list of inactive address spaces with assigned
923
			 * ASID.
924
			 */
2141 jermar 925
			ASSERT(old_as->asid != ASID_INVALID);
926
			list_append(&old_as->inactive_as_with_asid_link,
927
			    &inactive_as_with_asid_head);
823 jermar 928
		}
1890 jermar 929
 
930
		/*
931
		 * Perform architecture-specific tasks when the address space
932
		 * is being removed from the CPU.
933
		 */
2106 jermar 934
		as_deinstall_arch(old_as);
823 jermar 935
	}
936
 
937
	/*
938
	 * Second, prepare the new address space.
939
	 */
2106 jermar 940
	if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
2170 jermar 941
		if (new_as->asid != ASID_INVALID)
2106 jermar 942
			list_remove(&new_as->inactive_as_with_asid_link);
2170 jermar 943
		else
944
			new_as->asid = asid_get();
823 jermar 945
	}
2106 jermar 946
#ifdef AS_PAGE_TABLE
947
	SET_PTL0_ADDRESS(new_as->genarch.page_table);
948
#endif
823 jermar 949
 
950
	/*
703 jermar 951
	 * Perform architecture-specific steps.
727 jermar 952
	 * (e.g. write ASID to hardware register etc.)
703 jermar 953
	 */
2106 jermar 954
	as_install_arch(new_as);
2170 jermar 955
 
956
	spinlock_unlock(&asidlock);
703 jermar 957
 
2106 jermar 958
	AS = new_as;
703 jermar 959
}
754 jermar 960
 
1235 jermar 961
/** Convert address space area flags to page flags.
754 jermar 962
 *
1235 jermar 963
 * @param aflags Flags of some address space area.
754 jermar 964
 *
1235 jermar 965
 * @return Flags to be passed to page_mapping_insert().
754 jermar 966
 */
1235 jermar 967
int area_flags_to_page_flags(int aflags)
754 jermar 968
{
969
	int flags;
970
 
1178 jermar 971
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 972
 
1235 jermar 973
	if (aflags & AS_AREA_READ)
1026 jermar 974
		flags |= PAGE_READ;
975
 
1235 jermar 976
	if (aflags & AS_AREA_WRITE)
1026 jermar 977
		flags |= PAGE_WRITE;
978
 
1235 jermar 979
	if (aflags & AS_AREA_EXEC)
1026 jermar 980
		flags |= PAGE_EXEC;
981
 
1424 jermar 982
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 983
		flags |= PAGE_CACHEABLE;
984
 
754 jermar 985
	return flags;
986
}
756 jermar 987
 
1235 jermar 988
/** Compute flags for virtual address translation subsytem.
989
 *
990
 * The address space area must be locked.
991
 * Interrupts must be disabled.
992
 *
993
 * @param a Address space area.
994
 *
995
 * @return Flags to be used in page_mapping_insert().
996
 */
1409 jermar 997
int as_area_get_flags(as_area_t *a)
1235 jermar 998
{
999
	return area_flags_to_page_flags(a->flags);
1000
}
1001
 
756 jermar 1002
/** Create page table.
1003
 *
1004
 * Depending on architecture, create either address space
1005
 * private or global page table.
1006
 *
1007
 * @param flags Flags saying whether the page table is for kernel address space.
1008
 *
1009
 * @return First entry of the page table.
1010
 */
1011
pte_t *page_table_create(int flags)
1012
{
2125 decky 1013
#ifdef __OBJC__
1014
	return [as_t page_table_create: flags];
1015
#else
1016
	ASSERT(as_operations);
1017
	ASSERT(as_operations->page_table_create);
1018
 
1019
	return as_operations->page_table_create(flags);
1020
#endif
756 jermar 1021
}
977 jermar 1022
 
1468 jermar 1023
/** Destroy page table.
1024
 *
1025
 * Destroy page table in architecture specific way.
1026
 *
1027
 * @param page_table Physical address of PTL0.
1028
 */
1029
void page_table_destroy(pte_t *page_table)
1030
{
2125 decky 1031
#ifdef __OBJC__
1032
	return [as_t page_table_destroy: page_table];
1033
#else
1034
	ASSERT(as_operations);
1035
	ASSERT(as_operations->page_table_destroy);
1036
 
1037
	as_operations->page_table_destroy(page_table);
1038
#endif
1468 jermar 1039
}
1040
 
1044 jermar 1041
/** Lock page table.
1042
 *
1043
 * This function should be called before any page_mapping_insert(),
1044
 * page_mapping_remove() and page_mapping_find().
1045
 * 
1046
 * Locking order is such that address space areas must be locked
1047
 * prior to this call. Address space can be locked prior to this
1048
 * call in which case the lock argument is false.
1049
 *
1050
 * @param as Address space.
1248 jermar 1051
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1052
 */
1053
void page_table_lock(as_t *as, bool lock)
1054
{
2125 decky 1055
#ifdef __OBJC__
1056
	[as page_table_lock: lock];
1057
#else
1044 jermar 1058
	ASSERT(as_operations);
1059
	ASSERT(as_operations->page_table_lock);
2125 decky 1060
 
1044 jermar 1061
	as_operations->page_table_lock(as, lock);
2125 decky 1062
#endif
1044 jermar 1063
}
1064
 
1065
/** Unlock page table.
1066
 *
1067
 * @param as Address space.
1248 jermar 1068
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1069
 */
1070
void page_table_unlock(as_t *as, bool unlock)
1071
{
2125 decky 1072
#ifdef __OBJC__
1073
	[as page_table_unlock: unlock];
1074
#else
1044 jermar 1075
	ASSERT(as_operations);
1076
	ASSERT(as_operations->page_table_unlock);
2125 decky 1077
 
1044 jermar 1078
	as_operations->page_table_unlock(as, unlock);
2125 decky 1079
#endif
1044 jermar 1080
}
1081
 
977 jermar 1082
 
1083
/** Find address space area and lock it.
1084
 *
1085
 * The address space must be locked and interrupts must be disabled.
1086
 *
1087
 * @param as Address space.
1088
 * @param va Virtual address.
1089
 *
2087 jermar 1090
 * @return Locked address space area containing va on success or NULL on
1091
 *     failure.
977 jermar 1092
 */
1780 jermar 1093
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1094
{
1095
	as_area_t *a;
1147 jermar 1096
	btree_node_t *leaf, *lnode;
2745 decky 1097
	unsigned int i;
977 jermar 1098
 
1147 jermar 1099
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1100
	if (a) {
1101
		/* va is the base address of an address space area */
1380 jermar 1102
		mutex_lock(&a->lock);
1147 jermar 1103
		return a;
1104
	}
1105
 
1106
	/*
1150 jermar 1107
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1108
	 * to find out whether this is a miss or va belongs to an address
1109
	 * space area found there.
1110
	 */
1111
 
1112
	/* First, search the leaf node itself. */
1113
	for (i = 0; i < leaf->keys; i++) {
1114
		a = (as_area_t *) leaf->value[i];
1380 jermar 1115
		mutex_lock(&a->lock);
1147 jermar 1116
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1117
			return a;
1118
		}
1380 jermar 1119
		mutex_unlock(&a->lock);
1147 jermar 1120
	}
977 jermar 1121
 
1147 jermar 1122
	/*
1150 jermar 1123
	 * Second, locate the left neighbour and test its last record.
1148 jermar 1124
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 1125
	 */
2087 jermar 1126
	lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1127
	if (lnode) {
1147 jermar 1128
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1129
		mutex_lock(&a->lock);
1147 jermar 1130
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1131
			return a;
1147 jermar 1132
		}
1380 jermar 1133
		mutex_unlock(&a->lock);
977 jermar 1134
	}
1135
 
1136
	return NULL;
1137
}
1048 jermar 1138
 
1139
/** Check area conflicts with other areas.
1140
 *
1141
 * The address space must be locked and interrupts must be disabled.
1142
 *
1143
 * @param as Address space.
1144
 * @param va Starting virtual address of the area being tested.
1145
 * @param size Size of the area being tested.
1146
 * @param avoid_area Do not touch this area. 
1147
 *
1148
 * @return True if there is no conflict, false otherwise.
1149
 */
2087 jermar 1150
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1151
			  as_area_t *avoid_area)
1048 jermar 1152
{
1153
	as_area_t *a;
1147 jermar 1154
	btree_node_t *leaf, *node;
2745 decky 1155
	unsigned int i;
1048 jermar 1156
 
1070 jermar 1157
	/*
1158
	 * We don't want any area to have conflicts with NULL page.
1159
	 */
1160
	if (overlaps(va, size, NULL, PAGE_SIZE))
1161
		return false;
1162
 
1147 jermar 1163
	/*
1164
	 * The leaf node is found in O(log n), where n is proportional to
1165
	 * the number of address space areas belonging to as.
1166
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1167
	 * record in the left neighbour, the leftmost record in the right
1168
	 * neighbour and all records in the leaf node itself.
1147 jermar 1169
	 */
1048 jermar 1170
 
1147 jermar 1171
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1172
		if (a != avoid_area)
1173
			return false;
1174
	}
1175
 
1176
	/* First, check the two border cases. */
1150 jermar 1177
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1178
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1179
		mutex_lock(&a->lock);
1147 jermar 1180
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1181
			mutex_unlock(&a->lock);
1147 jermar 1182
			return false;
1183
		}
1380 jermar 1184
		mutex_unlock(&a->lock);
1147 jermar 1185
	}
2087 jermar 1186
	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1187
	if (node) {
1147 jermar 1188
		a = (as_area_t *) node->value[0];
1380 jermar 1189
		mutex_lock(&a->lock);
1147 jermar 1190
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1191
			mutex_unlock(&a->lock);
1147 jermar 1192
			return false;
1193
		}
1380 jermar 1194
		mutex_unlock(&a->lock);
1147 jermar 1195
	}
1196
 
1197
	/* Second, check the leaf node. */
1198
	for (i = 0; i < leaf->keys; i++) {
1199
		a = (as_area_t *) leaf->value[i];
1200
 
1048 jermar 1201
		if (a == avoid_area)
1202
			continue;
1147 jermar 1203
 
1380 jermar 1204
		mutex_lock(&a->lock);
1147 jermar 1205
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1206
			mutex_unlock(&a->lock);
1147 jermar 1207
			return false;
1208
		}
1380 jermar 1209
		mutex_unlock(&a->lock);
1048 jermar 1210
	}
1211
 
1070 jermar 1212
	/*
1213
	 * So far, the area does not conflict with other areas.
1214
	 * Check if it doesn't conflict with kernel address space.
1215
	 */	 
1216
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1217
		return !overlaps(va, size, 
2087 jermar 1218
		    KERNEL_ADDRESS_SPACE_START,
1219
		    KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1220
	}
1221
 
1048 jermar 1222
	return true;
1223
}
1235 jermar 1224
 
2556 jermar 1225
/** Return size of the address space area with given base.
1226
 *
1227
 * @param base		Arbitrary address insede the address space area.
1228
 *
1229
 * @return		Size of the address space area in bytes or zero if it
1230
 *			does not exist.
1231
 */
1232
size_t as_area_get_size(uintptr_t base)
1329 palkovsky 1233
{
1234
	ipl_t ipl;
1235
	as_area_t *src_area;
1236
	size_t size;
1237
 
1238
	ipl = interrupts_disable();
1239
	src_area = find_area_and_lock(AS, base);
1240
	if (src_area){
1241
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1242
		mutex_unlock(&src_area->lock);
1329 palkovsky 1243
	} else {
1244
		size = 0;
1245
	}
1246
	interrupts_restore(ipl);
1247
	return size;
1248
}
1249
 
1387 jermar 1250
/** Mark portion of address space area as used.
1251
 *
1252
 * The address space area must be already locked.
1253
 *
1254
 * @param a Address space area.
1255
 * @param page First page to be marked.
1256
 * @param count Number of page to be marked.
1257
 *
1258
 * @return 0 on failure and 1 on success.
1259
 */
1780 jermar 1260
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1261
{
1262
	btree_node_t *leaf, *node;
1263
	count_t pages;
2745 decky 1264
	unsigned int i;
1387 jermar 1265
 
1266
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1267
	ASSERT(count);
1268
 
1269
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1270
	if (pages) {
1271
		/*
1272
		 * We hit the beginning of some used space.
1273
		 */
1274
		return 0;
1275
	}
1276
 
1437 jermar 1277
	if (!leaf->keys) {
1278
		btree_insert(&a->used_space, page, (void *) count, leaf);
1279
		return 1;
1280
	}
1281
 
1387 jermar 1282
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1283
	if (node) {
2087 jermar 1284
		uintptr_t left_pg = node->key[node->keys - 1];
1285
		uintptr_t right_pg = leaf->key[0];
1286
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1287
		count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1288
 
1289
		/*
1290
		 * Examine the possibility that the interval fits
1291
		 * somewhere between the rightmost interval of
1292
		 * the left neigbour and the first interval of the leaf.
1293
		 */
1294
 
1295
		if (page >= right_pg) {
1296
			/* Do nothing. */
2087 jermar 1297
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1298
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1299
			/* The interval intersects with the left interval. */
1300
			return 0;
2087 jermar 1301
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1302
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1303
			/* The interval intersects with the right interval. */
1304
			return 0;			
2087 jermar 1305
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1306
		    (page + count * PAGE_SIZE == right_pg)) {
1307
			/*
1308
			 * The interval can be added by merging the two already
1309
			 * present intervals.
1310
			 */
1403 jermar 1311
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1312
			btree_remove(&a->used_space, right_pg, leaf);
1313
			return 1; 
2087 jermar 1314
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1315
			/* 
1316
			 * The interval can be added by simply growing the left
1317
			 * interval.
1318
			 */
1403 jermar 1319
			node->value[node->keys - 1] += count;
1387 jermar 1320
			return 1;
2087 jermar 1321
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1322
			/*
2087 jermar 1323
			 * The interval can be addded by simply moving base of
1324
			 * the right interval down and increasing its size
1325
			 * accordingly.
1387 jermar 1326
			 */
1403 jermar 1327
			leaf->value[0] += count;
1387 jermar 1328
			leaf->key[0] = page;
1329
			return 1;
1330
		} else {
1331
			/*
1332
			 * The interval is between both neigbouring intervals,
1333
			 * but cannot be merged with any of them.
1334
			 */
2087 jermar 1335
			btree_insert(&a->used_space, page, (void *) count,
1336
			    leaf);
1387 jermar 1337
			return 1;
1338
		}
1339
	} else if (page < leaf->key[0]) {
1780 jermar 1340
		uintptr_t right_pg = leaf->key[0];
1387 jermar 1341
		count_t right_cnt = (count_t) leaf->value[0];
1342
 
1343
		/*
2087 jermar 1344
		 * Investigate the border case in which the left neighbour does
1345
		 * not exist but the interval fits from the left.
1387 jermar 1346
		 */
1347
 
2087 jermar 1348
		if (overlaps(page, count * PAGE_SIZE, right_pg,
1349
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1350
			/* The interval intersects with the right interval. */
1351
			return 0;
2087 jermar 1352
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1353
			/*
2087 jermar 1354
			 * The interval can be added by moving the base of the
1355
			 * right interval down and increasing its size
1356
			 * accordingly.
1387 jermar 1357
			 */
1358
			leaf->key[0] = page;
1403 jermar 1359
			leaf->value[0] += count;
1387 jermar 1360
			return 1;
1361
		} else {
1362
			/*
1363
			 * The interval doesn't adjoin with the right interval.
1364
			 * It must be added individually.
1365
			 */
2087 jermar 1366
			btree_insert(&a->used_space, page, (void *) count,
1367
			    leaf);
1387 jermar 1368
			return 1;
1369
		}
1370
	}
1371
 
1372
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1373
	if (node) {
2087 jermar 1374
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1375
		uintptr_t right_pg = node->key[0];
1376
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1377
		count_t right_cnt = (count_t) node->value[0];
1387 jermar 1378
 
1379
		/*
1380
		 * Examine the possibility that the interval fits
1381
		 * somewhere between the leftmost interval of
1382
		 * the right neigbour and the last interval of the leaf.
1383
		 */
1384
 
1385
		if (page < left_pg) {
1386
			/* Do nothing. */
2087 jermar 1387
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1388
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1389
			/* The interval intersects with the left interval. */
1390
			return 0;
2087 jermar 1391
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1392
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1393
			/* The interval intersects with the right interval. */
1394
			return 0;			
2087 jermar 1395
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1396
		    (page + count * PAGE_SIZE == right_pg)) {
1397
			/*
1398
			 * The interval can be added by merging the two already
1399
			 * present intervals.
1400
			 * */
1403 jermar 1401
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1402
			btree_remove(&a->used_space, right_pg, node);
1403
			return 1; 
2087 jermar 1404
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1405
			/*
1406
			 * The interval can be added by simply growing the left
1407
			 * interval.
1408
			 * */
1403 jermar 1409
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1410
			return 1;
2087 jermar 1411
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1412
			/*
2087 jermar 1413
			 * The interval can be addded by simply moving base of
1414
			 * the right interval down and increasing its size
1415
			 * accordingly.
1387 jermar 1416
			 */
1403 jermar 1417
			node->value[0] += count;
1387 jermar 1418
			node->key[0] = page;
1419
			return 1;
1420
		} else {
1421
			/*
1422
			 * The interval is between both neigbouring intervals,
1423
			 * but cannot be merged with any of them.
1424
			 */
2087 jermar 1425
			btree_insert(&a->used_space, page, (void *) count,
1426
			    leaf);
1387 jermar 1427
			return 1;
1428
		}
1429
	} else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1430
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1431
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1432
 
1433
		/*
2087 jermar 1434
		 * Investigate the border case in which the right neighbour
1435
		 * does not exist but the interval fits from the right.
1387 jermar 1436
		 */
1437
 
2087 jermar 1438
		if (overlaps(page, count * PAGE_SIZE, left_pg,
1439
		    left_cnt * PAGE_SIZE)) {
1403 jermar 1440
			/* The interval intersects with the left interval. */
1387 jermar 1441
			return 0;
2087 jermar 1442
		} else if (left_pg + left_cnt * PAGE_SIZE == page) {
1443
			/*
1444
			 * The interval can be added by growing the left
1445
			 * interval.
1446
			 */
1403 jermar 1447
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1448
			return 1;
1449
		} else {
1450
			/*
1451
			 * The interval doesn't adjoin with the left interval.
1452
			 * It must be added individually.
1453
			 */
2087 jermar 1454
			btree_insert(&a->used_space, page, (void *) count,
1455
			    leaf);
1387 jermar 1456
			return 1;
1457
		}
1458
	}
1459
 
1460
	/*
2087 jermar 1461
	 * Note that if the algorithm made it thus far, the interval can fit
1462
	 * only between two other intervals of the leaf. The two border cases
1463
	 * were already resolved.
1387 jermar 1464
	 */
1465
	for (i = 1; i < leaf->keys; i++) {
1466
		if (page < leaf->key[i]) {
2087 jermar 1467
			uintptr_t left_pg = leaf->key[i - 1];
1468
			uintptr_t right_pg = leaf->key[i];
1469
			count_t left_cnt = (count_t) leaf->value[i - 1];
1470
			count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1471
 
1472
			/*
1473
			 * The interval fits between left_pg and right_pg.
1474
			 */
1475
 
2087 jermar 1476
			if (overlaps(page, count * PAGE_SIZE, left_pg,
1477
			    left_cnt * PAGE_SIZE)) {
1478
				/*
1479
				 * The interval intersects with the left
1480
				 * interval.
1481
				 */
1387 jermar 1482
				return 0;
2087 jermar 1483
			} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1484
			    right_cnt * PAGE_SIZE)) {
1485
				/*
1486
				 * The interval intersects with the right
1487
				 * interval.
1488
				 */
1387 jermar 1489
				return 0;			
2087 jermar 1490
			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1491
			    (page + count * PAGE_SIZE == right_pg)) {
1492
				/*
1493
				 * The interval can be added by merging the two
1494
				 * already present intervals.
1495
				 */
1403 jermar 1496
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1497
				btree_remove(&a->used_space, right_pg, leaf);
1498
				return 1; 
2087 jermar 1499
			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1500
				/*
1501
				 * The interval can be added by simply growing
1502
				 * the left interval.
1503
				 */
1403 jermar 1504
				leaf->value[i - 1] += count;
1387 jermar 1505
				return 1;
2087 jermar 1506
			} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1507
				/*
2087 jermar 1508
			         * The interval can be addded by simply moving
1509
				 * base of the right interval down and
1510
				 * increasing its size accordingly.
1387 jermar 1511
			 	 */
1403 jermar 1512
				leaf->value[i] += count;
1387 jermar 1513
				leaf->key[i] = page;
1514
				return 1;
1515
			} else {
1516
				/*
2087 jermar 1517
				 * The interval is between both neigbouring
1518
				 * intervals, but cannot be merged with any of
1519
				 * them.
1387 jermar 1520
				 */
2087 jermar 1521
				btree_insert(&a->used_space, page,
1522
				    (void *) count, leaf);
1387 jermar 1523
				return 1;
1524
			}
1525
		}
1526
	}
1527
 
3057 decky 1528
	panic("Inconsistency detected while adding %" PRIc " pages of used space at "
2087 jermar 1529
	    "%p.\n", count, page);
1387 jermar 1530
}
1531
 
1532
/** Mark portion of address space area as unused.
1533
 *
1534
 * The address space area must be already locked.
1535
 *
1536
 * @param a Address space area.
1537
 * @param page First page to be marked.
1538
 * @param count Number of page to be marked.
1539
 *
1540
 * @return 0 on failure and 1 on success.
1541
 */
1780 jermar 1542
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1543
{
1544
	btree_node_t *leaf, *node;
1545
	count_t pages;
2745 decky 1546
	unsigned int i;
1387 jermar 1547
 
1548
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1549
	ASSERT(count);
1550
 
1551
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1552
	if (pages) {
1553
		/*
1554
		 * We are lucky, page is the beginning of some interval.
1555
		 */
1556
		if (count > pages) {
1557
			return 0;
1558
		} else if (count == pages) {
1559
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1560
			return 1;
1387 jermar 1561
		} else {
1562
			/*
1563
			 * Find the respective interval.
1564
			 * Decrease its size and relocate its start address.
1565
			 */
1566
			for (i = 0; i < leaf->keys; i++) {
1567
				if (leaf->key[i] == page) {
2087 jermar 1568
					leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1569
					leaf->value[i] -= count;
1387 jermar 1570
					return 1;
1571
				}
1572
			}
1573
			goto error;
1574
		}
1575
	}
1576
 
1577
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1578
	if (node && page < leaf->key[0]) {
1780 jermar 1579
		uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1580
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1581
 
2087 jermar 1582
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1583
		    count * PAGE_SIZE)) {
1584
			if (page + count * PAGE_SIZE ==
1585
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1586
				/*
2087 jermar 1587
				 * The interval is contained in the rightmost
1588
				 * interval of the left neighbour and can be
1589
				 * removed by updating the size of the bigger
1590
				 * interval.
1387 jermar 1591
				 */
1403 jermar 1592
				node->value[node->keys - 1] -= count;
1387 jermar 1593
				return 1;
2087 jermar 1594
			} else if (page + count * PAGE_SIZE <
1595
			    left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1596
				count_t new_cnt;
1387 jermar 1597
 
1598
				/*
2087 jermar 1599
				 * The interval is contained in the rightmost
1600
				 * interval of the left neighbour but its
1601
				 * removal requires both updating the size of
1602
				 * the original interval and also inserting a
1603
				 * new interval.
1387 jermar 1604
				 */
2087 jermar 1605
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1606
				    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1607
				node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1608
				btree_insert(&a->used_space, page +
1609
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1610
				return 1;
1611
			}
1612
		}
1613
		return 0;
1614
	} else if (page < leaf->key[0]) {
1615
		return 0;
1616
	}
1617
 
1618
	if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1619
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1620
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1621
 
2087 jermar 1622
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1623
		    count * PAGE_SIZE)) {
1624
			if (page + count * PAGE_SIZE == 
1625
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1626
				/*
2087 jermar 1627
				 * The interval is contained in the rightmost
1628
				 * interval of the leaf and can be removed by
1629
				 * updating the size of the bigger interval.
1387 jermar 1630
				 */
1403 jermar 1631
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1632
				return 1;
2087 jermar 1633
			} else if (page + count * PAGE_SIZE < left_pg +
1634
			    left_cnt * PAGE_SIZE) {
1403 jermar 1635
				count_t new_cnt;
1387 jermar 1636
 
1637
				/*
2087 jermar 1638
				 * The interval is contained in the rightmost
1639
				 * interval of the leaf but its removal
1640
				 * requires both updating the size of the
1641
				 * original interval and also inserting a new
1642
				 * interval.
1387 jermar 1643
				 */
2087 jermar 1644
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1645
				    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1646
				leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1647
				btree_insert(&a->used_space, page +
1648
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1649
				return 1;
1650
			}
1651
		}
1652
		return 0;
1653
	}	
1654
 
1655
	/*
1656
	 * The border cases have been already resolved.
1657
	 * Now the interval can be only between intervals of the leaf. 
1658
	 */
1659
	for (i = 1; i < leaf->keys - 1; i++) {
1660
		if (page < leaf->key[i]) {
1780 jermar 1661
			uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1662
			count_t left_cnt = (count_t) leaf->value[i - 1];
1663
 
1664
			/*
2087 jermar 1665
			 * Now the interval is between intervals corresponding
1666
			 * to (i - 1) and i.
1387 jermar 1667
			 */
2087 jermar 1668
			if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1669
			    count * PAGE_SIZE)) {
1670
				if (page + count * PAGE_SIZE ==
1671
				    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1672
					/*
2087 jermar 1673
					 * The interval is contained in the
1674
					 * interval (i - 1) of the leaf and can
1675
					 * be removed by updating the size of
1676
					 * the bigger interval.
1387 jermar 1677
					 */
1403 jermar 1678
					leaf->value[i - 1] -= count;
1387 jermar 1679
					return 1;
2087 jermar 1680
				} else if (page + count * PAGE_SIZE <
1681
				    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1682
					count_t new_cnt;
1387 jermar 1683
 
1684
					/*
2087 jermar 1685
					 * The interval is contained in the
1686
					 * interval (i - 1) of the leaf but its
1687
					 * removal requires both updating the
1688
					 * size of the original interval and
1387 jermar 1689
					 * also inserting a new interval.
1690
					 */
2087 jermar 1691
					new_cnt = ((left_pg +
1692
					    left_cnt * PAGE_SIZE) -
1693
					    (page + count * PAGE_SIZE)) >>
1694
					    PAGE_WIDTH;
1403 jermar 1695
					leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1696
					btree_insert(&a->used_space, page +
1697
					    count * PAGE_SIZE, (void *) new_cnt,
1698
					    leaf);
1387 jermar 1699
					return 1;
1700
				}
1701
			}
1702
			return 0;
1703
		}
1704
	}
1705
 
1706
error:
3057 decky 1707
	panic("Inconsistency detected while removing %" PRIc " pages of used space "
2087 jermar 1708
	    "from %p.\n", count, page);
1387 jermar 1709
}
1710
 
1409 jermar 1711
/** Remove reference to address space area share info.
1712
 *
1713
 * If the reference count drops to 0, the sh_info is deallocated.
1714
 *
1715
 * @param sh_info Pointer to address space area share info.
1716
 */
1717
void sh_info_remove_reference(share_info_t *sh_info)
1718
{
1719
	bool dealloc = false;
1720
 
1721
	mutex_lock(&sh_info->lock);
1722
	ASSERT(sh_info->refcount);
1723
	if (--sh_info->refcount == 0) {
1724
		dealloc = true;
1495 jermar 1725
		link_t *cur;
1409 jermar 1726
 
1727
		/*
1728
		 * Now walk carefully the pagemap B+tree and free/remove
1729
		 * reference from all frames found there.
1730
		 */
2087 jermar 1731
		for (cur = sh_info->pagemap.leaf_head.next;
1732
		    cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1733
			btree_node_t *node;
2745 decky 1734
			unsigned int i;
1409 jermar 1735
 
1495 jermar 1736
			node = list_get_instance(cur, btree_node_t, leaf_link);
1737
			for (i = 0; i < node->keys; i++) 
1780 jermar 1738
				frame_free((uintptr_t) node->value[i]);
1409 jermar 1739
		}
1740
 
1741
	}
1742
	mutex_unlock(&sh_info->lock);
1743
 
1744
	if (dealloc) {
1745
		btree_destroy(&sh_info->pagemap);
1746
		free(sh_info);
1747
	}
1748
}
1749
 
1235 jermar 1750
/*
1751
 * Address space related syscalls.
1752
 */
1753
 
1754
/** Wrapper for as_area_create(). */
1780 jermar 1755
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1756
{
2087 jermar 1757
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1758
	    AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1759
		return (unative_t) address;
1235 jermar 1760
	else
1780 jermar 1761
		return (unative_t) -1;
1235 jermar 1762
}
1763
 
1793 jermar 1764
/** Wrapper for as_area_resize(). */
1780 jermar 1765
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1766
{
1780 jermar 1767
	return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1768
}
1769
 
1793 jermar 1770
/** Wrapper for as_area_destroy(). */
1780 jermar 1771
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1772
{
1780 jermar 1773
	return (unative_t) as_area_destroy(AS, address);
1306 jermar 1774
}
1702 cejka 1775
 
1914 jermar 1776
/** Print out information about address space.
1777
 *
1778
 * @param as Address space.
1779
 */
1780
void as_print(as_t *as)
1781
{
1782
	ipl_t ipl;
1783
 
1784
	ipl = interrupts_disable();
1785
	mutex_lock(&as->lock);
1786
 
1787
	/* print out info about address space areas */
1788
	link_t *cur;
2087 jermar 1789
	for (cur = as->as_area_btree.leaf_head.next;
1790
	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1791
		btree_node_t *node;
1914 jermar 1792
 
2087 jermar 1793
		node = list_get_instance(cur, btree_node_t, leaf_link);
1794
 
2745 decky 1795
		unsigned int i;
1914 jermar 1796
		for (i = 0; i < node->keys; i++) {
1915 jermar 1797
			as_area_t *area = node->value[i];
1914 jermar 1798
 
1799
			mutex_lock(&area->lock);
3057 decky 1800
			printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n",
2087 jermar 1801
			    area, area->base, area->pages, area->base,
3057 decky 1802
			    area->base + FRAMES2SIZE(area->pages));
1914 jermar 1803
			mutex_unlock(&area->lock);
1804
		}
1805
	}
1806
 
1807
	mutex_unlock(&as->lock);
1808
	interrupts_restore(ipl);
1809
}
1810
 
1757 jermar 1811
/** @}
1702 cejka 1812
 */