Subversion Repositories HelenOS

Rev

Rev 2141 | Rev 2183 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
1288 jermar 77
#include <syscall/copy.h>
78
#include <arch/interrupt.h>
703 jermar 79
 
2009 jermar 80
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#include <arch/mm/cache.h>
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
 
2125 decky 84
#ifndef __OBJC__
1757 jermar 85
/**
86
 * Each architecture decides what functions will be used to carry out
87
 * address space operations such as creating or locking page tables.
88
 */
756 jermar 89
as_operations_t *as_operations = NULL;
703 jermar 90
 
1890 jermar 91
/**
92
 * Slab for as_t objects.
93
 */
94
static slab_cache_t *as_slab;
2126 decky 95
#endif
1890 jermar 96
 
2087 jermar 97
/**
2170 jermar 98
 * This lock serializes access to the ASID subsystem.
99
 * It protects:
100
 * - inactive_as_with_asid_head list
101
 * - as->asid for each as of the as_t type
102
 * - asids_allocated counter
2087 jermar 103
 */
2170 jermar 104
SPINLOCK_INITIALIZE(asidlock);
823 jermar 105
 
106
/**
107
 * This list contains address spaces that are not active on any
108
 * processor and that have valid ASID.
109
 */
110
LIST_INITIALIZE(inactive_as_with_asid_head);
111
 
757 jermar 112
/** Kernel address space. */
113
as_t *AS_KERNEL = NULL;
114
 
1235 jermar 115
static int area_flags_to_page_flags(int aflags);
1780 jermar 116
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 117
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
118
    as_area_t *avoid_area);
1409 jermar 119
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 120
 
2126 decky 121
#ifndef __OBJC__
1891 jermar 122
static int as_constructor(void *obj, int flags)
123
{
124
	as_t *as = (as_t *) obj;
125
	int rc;
126
 
127
	link_initialize(&as->inactive_as_with_asid_link);
128
	mutex_initialize(&as->lock);	
129
 
130
	rc = as_constructor_arch(as, flags);
131
 
132
	return rc;
133
}
134
 
135
static int as_destructor(void *obj)
136
{
137
	as_t *as = (as_t *) obj;
138
 
139
	return as_destructor_arch(as);
140
}
2126 decky 141
#endif
1891 jermar 142
 
756 jermar 143
/** Initialize address space subsystem. */
144
void as_init(void)
145
{
146
	as_arch_init();
2126 decky 147
 
148
#ifndef __OBJC__
1891 jermar 149
	as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 150
	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
2126 decky 151
#endif
1890 jermar 152
 
789 palkovsky 153
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 154
	if (!AS_KERNEL)
155
		panic("can't create kernel address space\n");
156
 
756 jermar 157
}
158
 
757 jermar 159
/** Create address space.
160
 *
161
 * @param flags Flags that influence way in wich the address space is created.
162
 */
756 jermar 163
as_t *as_create(int flags)
703 jermar 164
{
165
	as_t *as;
166
 
2126 decky 167
#ifdef __OBJC__
168
	as = [as_t new];
169
	link_initialize(&as->inactive_as_with_asid_link);
170
	mutex_initialize(&as->lock);	
171
	(void) as_constructor_arch(as, flags);
172
#else
1890 jermar 173
	as = (as_t *) slab_alloc(as_slab, 0);
2126 decky 174
#endif
1891 jermar 175
	(void) as_create_arch(as, 0);
176
 
1147 jermar 177
	btree_create(&as->as_area_btree);
822 palkovsky 178
 
179
	if (flags & FLAG_AS_KERNEL)
180
		as->asid = ASID_KERNEL;
181
	else
182
		as->asid = ASID_INVALID;
183
 
1468 jermar 184
	as->refcount = 0;
1415 jermar 185
	as->cpu_refcount = 0;
2089 decky 186
#ifdef AS_PAGE_TABLE
2106 jermar 187
	as->genarch.page_table = page_table_create(flags);
2089 decky 188
#else
189
	page_table_create(flags);
190
#endif
703 jermar 191
 
192
	return as;
193
}
194
 
1468 jermar 195
/** Destroy adress space.
196
 *
2087 jermar 197
 * When there are no tasks referencing this address space (i.e. its refcount is
198
 * zero), the address space can be destroyed.
1468 jermar 199
 */
200
void as_destroy(as_t *as)
973 palkovsky 201
{
1468 jermar 202
	ipl_t ipl;
1594 jermar 203
	bool cond;
973 palkovsky 204
 
1468 jermar 205
	ASSERT(as->refcount == 0);
206
 
207
	/*
208
	 * Since there is no reference to this area,
209
	 * it is safe not to lock its mutex.
210
	 */
2170 jermar 211
 
1468 jermar 212
	ipl = interrupts_disable();
2170 jermar 213
	spinlock_lock(&asidlock);
1587 jermar 214
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 215
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 216
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 217
		asid_put(as->asid);
218
	}
2170 jermar 219
	spinlock_unlock(&asidlock);
1468 jermar 220
 
221
	/*
222
	 * Destroy address space areas of the address space.
1954 jermar 223
	 * The B+tree must be walked carefully because it is
1594 jermar 224
	 * also being destroyed.
1468 jermar 225
	 */	
1594 jermar 226
	for (cond = true; cond; ) {
1468 jermar 227
		btree_node_t *node;
1594 jermar 228
 
229
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 230
		node = list_get_instance(as->as_area_btree.leaf_head.next,
231
		    btree_node_t, leaf_link);
1594 jermar 232
 
233
		if ((cond = node->keys)) {
234
			as_area_destroy(as, node->key[0]);
235
		}
1468 jermar 236
	}
1495 jermar 237
 
1483 jermar 238
	btree_destroy(&as->as_area_btree);
2089 decky 239
#ifdef AS_PAGE_TABLE
2106 jermar 240
	page_table_destroy(as->genarch.page_table);
2089 decky 241
#else
242
	page_table_destroy(NULL);
243
#endif
1468 jermar 244
 
245
	interrupts_restore(ipl);
2126 decky 246
 
247
#ifdef __OBJC__
248
	[as free];
249
#else
1890 jermar 250
	slab_free(as_slab, as);
2126 decky 251
#endif
973 palkovsky 252
}
253
 
703 jermar 254
/** Create address space area of common attributes.
255
 *
256
 * The created address space area is added to the target address space.
257
 *
258
 * @param as Target address space.
1239 jermar 259
 * @param flags Flags of the area memory.
1048 jermar 260
 * @param size Size of area.
703 jermar 261
 * @param base Base address of area.
1239 jermar 262
 * @param attrs Attributes of the area.
1409 jermar 263
 * @param backend Address space area backend. NULL if no backend is used.
264
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 265
 *
266
 * @return Address space area on success or NULL on failure.
267
 */
2069 jermar 268
as_area_t *
269
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 270
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 271
{
272
	ipl_t ipl;
273
	as_area_t *a;
274
 
275
	if (base % PAGE_SIZE)
1048 jermar 276
		return NULL;
277
 
1233 jermar 278
	if (!size)
279
		return NULL;
280
 
1048 jermar 281
	/* Writeable executable areas are not supported. */
282
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
283
		return NULL;
703 jermar 284
 
285
	ipl = interrupts_disable();
1380 jermar 286
	mutex_lock(&as->lock);
703 jermar 287
 
1048 jermar 288
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 289
		mutex_unlock(&as->lock);
1048 jermar 290
		interrupts_restore(ipl);
291
		return NULL;
292
	}
703 jermar 293
 
822 palkovsky 294
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 295
 
1380 jermar 296
	mutex_initialize(&a->lock);
822 palkovsky 297
 
1424 jermar 298
	a->as = as;
1026 jermar 299
	a->flags = flags;
1239 jermar 300
	a->attributes = attrs;
1048 jermar 301
	a->pages = SIZE2FRAMES(size);
822 palkovsky 302
	a->base = base;
1409 jermar 303
	a->sh_info = NULL;
304
	a->backend = backend;
1424 jermar 305
	if (backend_data)
306
		a->backend_data = *backend_data;
307
	else
2087 jermar 308
		memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
309
		    0);
1424 jermar 310
 
1387 jermar 311
	btree_create(&a->used_space);
822 palkovsky 312
 
1147 jermar 313
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 314
 
1380 jermar 315
	mutex_unlock(&as->lock);
703 jermar 316
	interrupts_restore(ipl);
704 jermar 317
 
703 jermar 318
	return a;
319
}
320
 
1235 jermar 321
/** Find address space area and change it.
322
 *
323
 * @param as Address space.
2087 jermar 324
 * @param address Virtual address belonging to the area to be changed. Must be
325
 *     page-aligned.
1235 jermar 326
 * @param size New size of the virtual memory block starting at address. 
327
 * @param flags Flags influencing the remap operation. Currently unused.
328
 *
1306 jermar 329
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 330
 */ 
1780 jermar 331
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 332
{
1306 jermar 333
	as_area_t *area;
1235 jermar 334
	ipl_t ipl;
335
	size_t pages;
336
 
337
	ipl = interrupts_disable();
1380 jermar 338
	mutex_lock(&as->lock);
1235 jermar 339
 
340
	/*
341
	 * Locate the area.
342
	 */
343
	area = find_area_and_lock(as, address);
344
	if (!area) {
1380 jermar 345
		mutex_unlock(&as->lock);
1235 jermar 346
		interrupts_restore(ipl);
1306 jermar 347
		return ENOENT;
1235 jermar 348
	}
349
 
1424 jermar 350
	if (area->backend == &phys_backend) {
1235 jermar 351
		/*
352
		 * Remapping of address space areas associated
353
		 * with memory mapped devices is not supported.
354
		 */
1380 jermar 355
		mutex_unlock(&area->lock);
356
		mutex_unlock(&as->lock);
1235 jermar 357
		interrupts_restore(ipl);
1306 jermar 358
		return ENOTSUP;
1235 jermar 359
	}
1409 jermar 360
	if (area->sh_info) {
361
		/*
362
		 * Remapping of shared address space areas 
363
		 * is not supported.
364
		 */
365
		mutex_unlock(&area->lock);
366
		mutex_unlock(&as->lock);
367
		interrupts_restore(ipl);
368
		return ENOTSUP;
369
	}
1235 jermar 370
 
371
	pages = SIZE2FRAMES((address - area->base) + size);
372
	if (!pages) {
373
		/*
374
		 * Zero size address space areas are not allowed.
375
		 */
1380 jermar 376
		mutex_unlock(&area->lock);
377
		mutex_unlock(&as->lock);
1235 jermar 378
		interrupts_restore(ipl);
1306 jermar 379
		return EPERM;
1235 jermar 380
	}
381
 
382
	if (pages < area->pages) {
1403 jermar 383
		bool cond;
1780 jermar 384
		uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 385
 
386
		/*
387
		 * Shrinking the area.
388
		 * No need to check for overlaps.
389
		 */
1403 jermar 390
 
391
		/*
1436 jermar 392
		 * Start TLB shootdown sequence.
393
		 */
2087 jermar 394
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
395
		    pages * PAGE_SIZE, area->pages - pages);
1436 jermar 396
 
397
		/*
1403 jermar 398
		 * Remove frames belonging to used space starting from
399
		 * the highest addresses downwards until an overlap with
400
		 * the resized address space area is found. Note that this
401
		 * is also the right way to remove part of the used_space
402
		 * B+tree leaf list.
403
		 */		
404
		for (cond = true; cond;) {
405
			btree_node_t *node;
406
 
407
			ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 408
			node = 
409
			    list_get_instance(area->used_space.leaf_head.prev,
410
			    btree_node_t, leaf_link);
1403 jermar 411
			if ((cond = (bool) node->keys)) {
1780 jermar 412
				uintptr_t b = node->key[node->keys - 1];
2087 jermar 413
				count_t c =
414
				    (count_t) node->value[node->keys - 1];
1403 jermar 415
				int i = 0;
1235 jermar 416
 
2087 jermar 417
				if (overlaps(b, c * PAGE_SIZE, area->base,
2133 jermar 418
				    pages * PAGE_SIZE)) {
1403 jermar 419
 
2087 jermar 420
					if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 421
						/*
2087 jermar 422
						 * The whole interval fits
423
						 * completely in the resized
424
						 * address space area.
1403 jermar 425
						 */
426
						break;
427
					}
428
 
429
					/*
2087 jermar 430
					 * Part of the interval corresponding
431
					 * to b and c overlaps with the resized
432
					 * address space area.
1403 jermar 433
					 */
434
 
435
					cond = false;	/* we are almost done */
436
					i = (start_free - b) >> PAGE_WIDTH;
2087 jermar 437
					if (!used_space_remove(area, start_free,
438
					    c - i))
439
						panic("Could not remove used "
440
						    "space.\n");
1403 jermar 441
				} else {
442
					/*
2087 jermar 443
					 * The interval of used space can be
444
					 * completely removed.
1403 jermar 445
					 */
446
					if (!used_space_remove(area, b, c))
2087 jermar 447
						panic("Could not remove used "
448
						    "space.\n");
1403 jermar 449
				}
450
 
451
				for (; i < c; i++) {
452
					pte_t *pte;
453
 
454
					page_table_lock(as, false);
2087 jermar 455
					pte = page_mapping_find(as, b +
456
					    i * PAGE_SIZE);
457
					ASSERT(pte && PTE_VALID(pte) &&
458
					    PTE_PRESENT(pte));
459
					if (area->backend &&
460
					    area->backend->frame_free) {
1424 jermar 461
						area->backend->frame_free(area,
2087 jermar 462
						    b + i * PAGE_SIZE,
463
						    PTE_GET_FRAME(pte));
1409 jermar 464
					}
2087 jermar 465
					page_mapping_remove(as, b +
466
					    i * PAGE_SIZE);
1403 jermar 467
					page_table_unlock(as, false);
468
				}
1235 jermar 469
			}
470
		}
1436 jermar 471
 
1235 jermar 472
		/*
1436 jermar 473
		 * Finish TLB shootdown sequence.
1235 jermar 474
		 */
2087 jermar 475
		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
476
		    area->pages - pages);
1235 jermar 477
		tlb_shootdown_finalize();
1889 jermar 478
 
479
		/*
480
		 * Invalidate software translation caches (e.g. TSB on sparc64).
481
		 */
2087 jermar 482
		as_invalidate_translation_cache(as, area->base +
483
		    pages * PAGE_SIZE, area->pages - pages);
1235 jermar 484
	} else {
485
		/*
486
		 * Growing the area.
487
		 * Check for overlaps with other address space areas.
488
		 */
2087 jermar 489
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
490
		    area)) {
1380 jermar 491
			mutex_unlock(&area->lock);
492
			mutex_unlock(&as->lock);		
1235 jermar 493
			interrupts_restore(ipl);
1306 jermar 494
			return EADDRNOTAVAIL;
1235 jermar 495
		}
496
	} 
497
 
498
	area->pages = pages;
499
 
1380 jermar 500
	mutex_unlock(&area->lock);
501
	mutex_unlock(&as->lock);
1235 jermar 502
	interrupts_restore(ipl);
503
 
1306 jermar 504
	return 0;
1235 jermar 505
}
506
 
1306 jermar 507
/** Destroy address space area.
508
 *
509
 * @param as Address space.
510
 * @param address Address withing the area to be deleted.
511
 *
512
 * @return Zero on success or a value from @ref errno.h on failure. 
513
 */
1780 jermar 514
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 515
{
516
	as_area_t *area;
1780 jermar 517
	uintptr_t base;
1495 jermar 518
	link_t *cur;
1306 jermar 519
	ipl_t ipl;
520
 
521
	ipl = interrupts_disable();
1380 jermar 522
	mutex_lock(&as->lock);
1306 jermar 523
 
524
	area = find_area_and_lock(as, address);
525
	if (!area) {
1380 jermar 526
		mutex_unlock(&as->lock);
1306 jermar 527
		interrupts_restore(ipl);
528
		return ENOENT;
529
	}
530
 
1403 jermar 531
	base = area->base;
532
 
1411 jermar 533
	/*
1436 jermar 534
	 * Start TLB shootdown sequence.
535
	 */
1889 jermar 536
	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 537
 
538
	/*
1411 jermar 539
	 * Visit only the pages mapped by used_space B+tree.
540
	 */
2087 jermar 541
	for (cur = area->used_space.leaf_head.next;
542
	    cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 543
		btree_node_t *node;
1495 jermar 544
		int i;
1403 jermar 545
 
1495 jermar 546
		node = list_get_instance(cur, btree_node_t, leaf_link);
547
		for (i = 0; i < node->keys; i++) {
1780 jermar 548
			uintptr_t b = node->key[i];
1495 jermar 549
			count_t j;
1411 jermar 550
			pte_t *pte;
1403 jermar 551
 
1495 jermar 552
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 553
				page_table_lock(as, false);
2087 jermar 554
				pte = page_mapping_find(as, b + j * PAGE_SIZE);
555
				ASSERT(pte && PTE_VALID(pte) &&
556
				    PTE_PRESENT(pte));
557
				if (area->backend &&
558
				    area->backend->frame_free) {
559
					area->backend->frame_free(area,	b +
2133 jermar 560
					    j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 561
				}
2087 jermar 562
				page_mapping_remove(as, b + j * PAGE_SIZE);				
1411 jermar 563
				page_table_unlock(as, false);
1306 jermar 564
			}
565
		}
566
	}
1403 jermar 567
 
1306 jermar 568
	/*
1436 jermar 569
	 * Finish TLB shootdown sequence.
1306 jermar 570
	 */
1889 jermar 571
	tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 572
	tlb_shootdown_finalize();
1436 jermar 573
 
1889 jermar 574
	/*
2087 jermar 575
	 * Invalidate potential software translation caches (e.g. TSB on
576
	 * sparc64).
1889 jermar 577
	 */
578
	as_invalidate_translation_cache(as, area->base, area->pages);
579
 
1436 jermar 580
	btree_destroy(&area->used_space);
1306 jermar 581
 
1309 jermar 582
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 583
 
584
	if (area->sh_info)
585
		sh_info_remove_reference(area->sh_info);
586
 
1380 jermar 587
	mutex_unlock(&area->lock);
1306 jermar 588
 
589
	/*
590
	 * Remove the empty area from address space.
591
	 */
1889 jermar 592
	btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 593
 
1309 jermar 594
	free(area);
595
 
1889 jermar 596
	mutex_unlock(&as->lock);
1306 jermar 597
	interrupts_restore(ipl);
598
	return 0;
599
}
600
 
1413 jermar 601
/** Share address space area with another or the same address space.
1235 jermar 602
 *
1424 jermar 603
 * Address space area mapping is shared with a new address space area.
604
 * If the source address space area has not been shared so far,
605
 * a new sh_info is created. The new address space area simply gets the
606
 * sh_info of the source area. The process of duplicating the
607
 * mapping is done through the backend share function.
1413 jermar 608
 * 
1417 jermar 609
 * @param src_as Pointer to source address space.
1239 jermar 610
 * @param src_base Base address of the source address space area.
1417 jermar 611
 * @param acc_size Expected size of the source area.
1428 palkovsky 612
 * @param dst_as Pointer to destination address space.
1417 jermar 613
 * @param dst_base Target base address.
614
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 615
 *
2007 jermar 616
 * @return Zero on success or ENOENT if there is no such task or if there is no
617
 * such address space area, EPERM if there was a problem in accepting the area
618
 * or ENOMEM if there was a problem in allocating destination address space
619
 * area. ENOTSUP is returned if the address space area backend does not support
2141 jermar 620
 * sharing.
1235 jermar 621
 */
1780 jermar 622
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
623
		  as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 624
{
625
	ipl_t ipl;
1239 jermar 626
	int src_flags;
627
	size_t src_size;
628
	as_area_t *src_area, *dst_area;
1413 jermar 629
	share_info_t *sh_info;
1424 jermar 630
	mem_backend_t *src_backend;
631
	mem_backend_data_t src_backend_data;
1434 palkovsky 632
 
1235 jermar 633
	ipl = interrupts_disable();
1380 jermar 634
	mutex_lock(&src_as->lock);
1329 palkovsky 635
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 636
	if (!src_area) {
1238 jermar 637
		/*
638
		 * Could not find the source address space area.
639
		 */
1380 jermar 640
		mutex_unlock(&src_as->lock);
1238 jermar 641
		interrupts_restore(ipl);
642
		return ENOENT;
643
	}
2007 jermar 644
 
1424 jermar 645
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 646
		/*
1851 jermar 647
		 * There is no backend or the backend does not
1424 jermar 648
		 * know how to share the area.
1413 jermar 649
		 */
650
		mutex_unlock(&src_area->lock);
651
		mutex_unlock(&src_as->lock);
652
		interrupts_restore(ipl);
653
		return ENOTSUP;
654
	}
655
 
1239 jermar 656
	src_size = src_area->pages * PAGE_SIZE;
657
	src_flags = src_area->flags;
1424 jermar 658
	src_backend = src_area->backend;
659
	src_backend_data = src_area->backend_data;
1544 palkovsky 660
 
661
	/* Share the cacheable flag from the original mapping */
662
	if (src_flags & AS_AREA_CACHEABLE)
663
		dst_flags_mask |= AS_AREA_CACHEABLE;
664
 
2087 jermar 665
	if (src_size != acc_size ||
666
	    (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 667
		mutex_unlock(&src_area->lock);
668
		mutex_unlock(&src_as->lock);
1235 jermar 669
		interrupts_restore(ipl);
670
		return EPERM;
671
	}
1413 jermar 672
 
1235 jermar 673
	/*
1413 jermar 674
	 * Now we are committed to sharing the area.
1954 jermar 675
	 * First, prepare the area for sharing.
1413 jermar 676
	 * Then it will be safe to unlock it.
677
	 */
678
	sh_info = src_area->sh_info;
679
	if (!sh_info) {
680
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
681
		mutex_initialize(&sh_info->lock);
682
		sh_info->refcount = 2;
683
		btree_create(&sh_info->pagemap);
684
		src_area->sh_info = sh_info;
685
	} else {
686
		mutex_lock(&sh_info->lock);
687
		sh_info->refcount++;
688
		mutex_unlock(&sh_info->lock);
689
	}
690
 
1424 jermar 691
	src_area->backend->share(src_area);
1413 jermar 692
 
693
	mutex_unlock(&src_area->lock);
694
	mutex_unlock(&src_as->lock);
695
 
696
	/*
1239 jermar 697
	 * Create copy of the source address space area.
698
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
699
	 * attribute set which prevents race condition with
700
	 * preliminary as_page_fault() calls.
1417 jermar 701
	 * The flags of the source area are masked against dst_flags_mask
702
	 * to support sharing in less privileged mode.
1235 jermar 703
	 */
1461 palkovsky 704
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 705
	    AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 706
	if (!dst_area) {
1235 jermar 707
		/*
708
		 * Destination address space area could not be created.
709
		 */
1413 jermar 710
		sh_info_remove_reference(sh_info);
711
 
1235 jermar 712
		interrupts_restore(ipl);
713
		return ENOMEM;
714
	}
2009 jermar 715
 
1235 jermar 716
	/*
1239 jermar 717
	 * Now the destination address space area has been
718
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 719
	 * attribute and set the sh_info.
1239 jermar 720
	 */	
2009 jermar 721
	mutex_lock(&dst_as->lock);	
1380 jermar 722
	mutex_lock(&dst_area->lock);
1239 jermar 723
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 724
	dst_area->sh_info = sh_info;
1380 jermar 725
	mutex_unlock(&dst_area->lock);
2009 jermar 726
	mutex_unlock(&dst_as->lock);	
727
 
1235 jermar 728
	interrupts_restore(ipl);
729
 
730
	return 0;
731
}
732
 
1423 jermar 733
/** Check access mode for address space area.
734
 *
735
 * The address space area must be locked prior to this call.
736
 *
737
 * @param area Address space area.
738
 * @param access Access mode.
739
 *
740
 * @return False if access violates area's permissions, true otherwise.
741
 */
742
bool as_area_check_access(as_area_t *area, pf_access_t access)
743
{
744
	int flagmap[] = {
745
		[PF_ACCESS_READ] = AS_AREA_READ,
746
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
747
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
748
	};
749
 
750
	if (!(area->flags & flagmap[access]))
751
		return false;
752
 
753
	return true;
754
}
755
 
703 jermar 756
/** Handle page fault within the current address space.
757
 *
1409 jermar 758
 * This is the high-level page fault handler. It decides
759
 * whether the page fault can be resolved by any backend
760
 * and if so, it invokes the backend to resolve the page
761
 * fault.
762
 *
703 jermar 763
 * Interrupts are assumed disabled.
764
 *
765
 * @param page Faulting page.
1411 jermar 766
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 767
 * @param istate Pointer to interrupted state.
703 jermar 768
 *
1409 jermar 769
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
770
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 771
 */
1780 jermar 772
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 773
{
1044 jermar 774
	pte_t *pte;
977 jermar 775
	as_area_t *area;
703 jermar 776
 
1380 jermar 777
	if (!THREAD)
1409 jermar 778
		return AS_PF_FAULT;
1380 jermar 779
 
703 jermar 780
	ASSERT(AS);
1044 jermar 781
 
1380 jermar 782
	mutex_lock(&AS->lock);
977 jermar 783
	area = find_area_and_lock(AS, page);	
703 jermar 784
	if (!area) {
785
		/*
786
		 * No area contained mapping for 'page'.
787
		 * Signal page fault to low-level handler.
788
		 */
1380 jermar 789
		mutex_unlock(&AS->lock);
1288 jermar 790
		goto page_fault;
703 jermar 791
	}
792
 
1239 jermar 793
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
794
		/*
795
		 * The address space area is not fully initialized.
796
		 * Avoid possible race by returning error.
797
		 */
1380 jermar 798
		mutex_unlock(&area->lock);
799
		mutex_unlock(&AS->lock);
1288 jermar 800
		goto page_fault;		
1239 jermar 801
	}
802
 
1424 jermar 803
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 804
		/*
805
		 * The address space area is not backed by any backend
806
		 * or the backend cannot handle page faults.
807
		 */
808
		mutex_unlock(&area->lock);
809
		mutex_unlock(&AS->lock);
810
		goto page_fault;		
811
	}
1179 jermar 812
 
1044 jermar 813
	page_table_lock(AS, false);
814
 
703 jermar 815
	/*
1044 jermar 816
	 * To avoid race condition between two page faults
817
	 * on the same address, we need to make sure
818
	 * the mapping has not been already inserted.
819
	 */
820
	if ((pte = page_mapping_find(AS, page))) {
821
		if (PTE_PRESENT(pte)) {
1423 jermar 822
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 823
			    (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
824
			    (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 825
				page_table_unlock(AS, false);
826
				mutex_unlock(&area->lock);
827
				mutex_unlock(&AS->lock);
828
				return AS_PF_OK;
829
			}
1044 jermar 830
		}
831
	}
1409 jermar 832
 
1044 jermar 833
	/*
1409 jermar 834
	 * Resort to the backend page fault handler.
703 jermar 835
	 */
1424 jermar 836
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 837
		page_table_unlock(AS, false);
838
		mutex_unlock(&area->lock);
839
		mutex_unlock(&AS->lock);
840
		goto page_fault;
841
	}
703 jermar 842
 
1044 jermar 843
	page_table_unlock(AS, false);
1380 jermar 844
	mutex_unlock(&area->lock);
845
	mutex_unlock(&AS->lock);
1288 jermar 846
	return AS_PF_OK;
847
 
848
page_fault:
849
	if (THREAD->in_copy_from_uspace) {
850
		THREAD->in_copy_from_uspace = false;
2087 jermar 851
		istate_set_retaddr(istate,
852
		    (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 853
	} else if (THREAD->in_copy_to_uspace) {
854
		THREAD->in_copy_to_uspace = false;
2087 jermar 855
		istate_set_retaddr(istate,
856
		    (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 857
	} else {
858
		return AS_PF_FAULT;
859
	}
860
 
861
	return AS_PF_DEFER;
703 jermar 862
}
863
 
823 jermar 864
/** Switch address spaces.
703 jermar 865
 *
1380 jermar 866
 * Note that this function cannot sleep as it is essentially a part of
2170 jermar 867
 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
868
 * thing which is forbidden in this context is locking the address space.
1380 jermar 869
 *
823 jermar 870
 * @param old Old address space or NULL.
871
 * @param new New address space.
703 jermar 872
 */
2106 jermar 873
void as_switch(as_t *old_as, as_t *new_as)
703 jermar 874
{
2170 jermar 875
	spinlock_lock(&asidlock);
703 jermar 876
 
877
	/*
823 jermar 878
	 * First, take care of the old address space.
879
	 */	
2106 jermar 880
	if (old_as) {
881
		ASSERT(old_as->cpu_refcount);
882
		if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
823 jermar 883
			/*
884
			 * The old address space is no longer active on
885
			 * any processor. It can be appended to the
886
			 * list of inactive address spaces with assigned
887
			 * ASID.
888
			 */
2141 jermar 889
			ASSERT(old_as->asid != ASID_INVALID);
890
			list_append(&old_as->inactive_as_with_asid_link,
891
			    &inactive_as_with_asid_head);
823 jermar 892
		}
1890 jermar 893
 
894
		/*
895
		 * Perform architecture-specific tasks when the address space
896
		 * is being removed from the CPU.
897
		 */
2106 jermar 898
		as_deinstall_arch(old_as);
823 jermar 899
	}
900
 
901
	/*
902
	 * Second, prepare the new address space.
903
	 */
2106 jermar 904
	if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
2170 jermar 905
		if (new_as->asid != ASID_INVALID)
2106 jermar 906
			list_remove(&new_as->inactive_as_with_asid_link);
2170 jermar 907
		else
908
			new_as->asid = asid_get();
823 jermar 909
	}
2106 jermar 910
#ifdef AS_PAGE_TABLE
911
	SET_PTL0_ADDRESS(new_as->genarch.page_table);
912
#endif
823 jermar 913
 
914
	/*
703 jermar 915
	 * Perform architecture-specific steps.
727 jermar 916
	 * (e.g. write ASID to hardware register etc.)
703 jermar 917
	 */
2106 jermar 918
	as_install_arch(new_as);
2170 jermar 919
 
920
	spinlock_unlock(&asidlock);
703 jermar 921
 
2106 jermar 922
	AS = new_as;
703 jermar 923
}
754 jermar 924
 
1235 jermar 925
/** Convert address space area flags to page flags.
754 jermar 926
 *
1235 jermar 927
 * @param aflags Flags of some address space area.
754 jermar 928
 *
1235 jermar 929
 * @return Flags to be passed to page_mapping_insert().
754 jermar 930
 */
1235 jermar 931
int area_flags_to_page_flags(int aflags)
754 jermar 932
{
933
	int flags;
934
 
1178 jermar 935
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 936
 
1235 jermar 937
	if (aflags & AS_AREA_READ)
1026 jermar 938
		flags |= PAGE_READ;
939
 
1235 jermar 940
	if (aflags & AS_AREA_WRITE)
1026 jermar 941
		flags |= PAGE_WRITE;
942
 
1235 jermar 943
	if (aflags & AS_AREA_EXEC)
1026 jermar 944
		flags |= PAGE_EXEC;
945
 
1424 jermar 946
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 947
		flags |= PAGE_CACHEABLE;
948
 
754 jermar 949
	return flags;
950
}
756 jermar 951
 
1235 jermar 952
/** Compute flags for virtual address translation subsytem.
953
 *
954
 * The address space area must be locked.
955
 * Interrupts must be disabled.
956
 *
957
 * @param a Address space area.
958
 *
959
 * @return Flags to be used in page_mapping_insert().
960
 */
1409 jermar 961
int as_area_get_flags(as_area_t *a)
1235 jermar 962
{
963
	return area_flags_to_page_flags(a->flags);
964
}
965
 
756 jermar 966
/** Create page table.
967
 *
968
 * Depending on architecture, create either address space
969
 * private or global page table.
970
 *
971
 * @param flags Flags saying whether the page table is for kernel address space.
972
 *
973
 * @return First entry of the page table.
974
 */
975
pte_t *page_table_create(int flags)
976
{
2125 decky 977
#ifdef __OBJC__
978
	return [as_t page_table_create: flags];
979
#else
980
	ASSERT(as_operations);
981
	ASSERT(as_operations->page_table_create);
982
 
983
	return as_operations->page_table_create(flags);
984
#endif
756 jermar 985
}
977 jermar 986
 
1468 jermar 987
/** Destroy page table.
988
 *
989
 * Destroy page table in architecture specific way.
990
 *
991
 * @param page_table Physical address of PTL0.
992
 */
993
void page_table_destroy(pte_t *page_table)
994
{
2125 decky 995
#ifdef __OBJC__
996
	return [as_t page_table_destroy: page_table];
997
#else
998
	ASSERT(as_operations);
999
	ASSERT(as_operations->page_table_destroy);
1000
 
1001
	as_operations->page_table_destroy(page_table);
1002
#endif
1468 jermar 1003
}
1004
 
1044 jermar 1005
/** Lock page table.
1006
 *
1007
 * This function should be called before any page_mapping_insert(),
1008
 * page_mapping_remove() and page_mapping_find().
1009
 * 
1010
 * Locking order is such that address space areas must be locked
1011
 * prior to this call. Address space can be locked prior to this
1012
 * call in which case the lock argument is false.
1013
 *
1014
 * @param as Address space.
1248 jermar 1015
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1016
 */
1017
void page_table_lock(as_t *as, bool lock)
1018
{
2125 decky 1019
#ifdef __OBJC__
1020
	[as page_table_lock: lock];
1021
#else
1044 jermar 1022
	ASSERT(as_operations);
1023
	ASSERT(as_operations->page_table_lock);
2125 decky 1024
 
1044 jermar 1025
	as_operations->page_table_lock(as, lock);
2125 decky 1026
#endif
1044 jermar 1027
}
1028
 
1029
/** Unlock page table.
1030
 *
1031
 * @param as Address space.
1248 jermar 1032
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1033
 */
1034
void page_table_unlock(as_t *as, bool unlock)
1035
{
2125 decky 1036
#ifdef __OBJC__
1037
	[as page_table_unlock: unlock];
1038
#else
1044 jermar 1039
	ASSERT(as_operations);
1040
	ASSERT(as_operations->page_table_unlock);
2125 decky 1041
 
1044 jermar 1042
	as_operations->page_table_unlock(as, unlock);
2125 decky 1043
#endif
1044 jermar 1044
}
1045
 
977 jermar 1046
 
1047
/** Find address space area and lock it.
1048
 *
1049
 * The address space must be locked and interrupts must be disabled.
1050
 *
1051
 * @param as Address space.
1052
 * @param va Virtual address.
1053
 *
2087 jermar 1054
 * @return Locked address space area containing va on success or NULL on
1055
 *     failure.
977 jermar 1056
 */
1780 jermar 1057
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1058
{
1059
	as_area_t *a;
1147 jermar 1060
	btree_node_t *leaf, *lnode;
1061
	int i;
977 jermar 1062
 
1147 jermar 1063
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1064
	if (a) {
1065
		/* va is the base address of an address space area */
1380 jermar 1066
		mutex_lock(&a->lock);
1147 jermar 1067
		return a;
1068
	}
1069
 
1070
	/*
1150 jermar 1071
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1072
	 * to find out whether this is a miss or va belongs to an address
1073
	 * space area found there.
1074
	 */
1075
 
1076
	/* First, search the leaf node itself. */
1077
	for (i = 0; i < leaf->keys; i++) {
1078
		a = (as_area_t *) leaf->value[i];
1380 jermar 1079
		mutex_lock(&a->lock);
1147 jermar 1080
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1081
			return a;
1082
		}
1380 jermar 1083
		mutex_unlock(&a->lock);
1147 jermar 1084
	}
977 jermar 1085
 
1147 jermar 1086
	/*
1150 jermar 1087
	 * Second, locate the left neighbour and test its last record.
1148 jermar 1088
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 1089
	 */
2087 jermar 1090
	lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1091
	if (lnode) {
1147 jermar 1092
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1093
		mutex_lock(&a->lock);
1147 jermar 1094
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1095
			return a;
1147 jermar 1096
		}
1380 jermar 1097
		mutex_unlock(&a->lock);
977 jermar 1098
	}
1099
 
1100
	return NULL;
1101
}
1048 jermar 1102
 
1103
/** Check area conflicts with other areas.
1104
 *
1105
 * The address space must be locked and interrupts must be disabled.
1106
 *
1107
 * @param as Address space.
1108
 * @param va Starting virtual address of the area being tested.
1109
 * @param size Size of the area being tested.
1110
 * @param avoid_area Do not touch this area. 
1111
 *
1112
 * @return True if there is no conflict, false otherwise.
1113
 */
2087 jermar 1114
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1115
			  as_area_t *avoid_area)
1048 jermar 1116
{
1117
	as_area_t *a;
1147 jermar 1118
	btree_node_t *leaf, *node;
1119
	int i;
1048 jermar 1120
 
1070 jermar 1121
	/*
1122
	 * We don't want any area to have conflicts with NULL page.
1123
	 */
1124
	if (overlaps(va, size, NULL, PAGE_SIZE))
1125
		return false;
1126
 
1147 jermar 1127
	/*
1128
	 * The leaf node is found in O(log n), where n is proportional to
1129
	 * the number of address space areas belonging to as.
1130
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1131
	 * record in the left neighbour, the leftmost record in the right
1132
	 * neighbour and all records in the leaf node itself.
1147 jermar 1133
	 */
1048 jermar 1134
 
1147 jermar 1135
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1136
		if (a != avoid_area)
1137
			return false;
1138
	}
1139
 
1140
	/* First, check the two border cases. */
1150 jermar 1141
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1142
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1143
		mutex_lock(&a->lock);
1147 jermar 1144
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1145
			mutex_unlock(&a->lock);
1147 jermar 1146
			return false;
1147
		}
1380 jermar 1148
		mutex_unlock(&a->lock);
1147 jermar 1149
	}
2087 jermar 1150
	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1151
	if (node) {
1147 jermar 1152
		a = (as_area_t *) node->value[0];
1380 jermar 1153
		mutex_lock(&a->lock);
1147 jermar 1154
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1155
			mutex_unlock(&a->lock);
1147 jermar 1156
			return false;
1157
		}
1380 jermar 1158
		mutex_unlock(&a->lock);
1147 jermar 1159
	}
1160
 
1161
	/* Second, check the leaf node. */
1162
	for (i = 0; i < leaf->keys; i++) {
1163
		a = (as_area_t *) leaf->value[i];
1164
 
1048 jermar 1165
		if (a == avoid_area)
1166
			continue;
1147 jermar 1167
 
1380 jermar 1168
		mutex_lock(&a->lock);
1147 jermar 1169
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1170
			mutex_unlock(&a->lock);
1147 jermar 1171
			return false;
1172
		}
1380 jermar 1173
		mutex_unlock(&a->lock);
1048 jermar 1174
	}
1175
 
1070 jermar 1176
	/*
1177
	 * So far, the area does not conflict with other areas.
1178
	 * Check if it doesn't conflict with kernel address space.
1179
	 */	 
1180
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1181
		return !overlaps(va, size, 
2087 jermar 1182
		    KERNEL_ADDRESS_SPACE_START,
1183
		    KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1184
	}
1185
 
1048 jermar 1186
	return true;
1187
}
1235 jermar 1188
 
1380 jermar 1189
/** Return size of the address space area with given base.  */
1780 jermar 1190
size_t as_get_size(uintptr_t base)
1329 palkovsky 1191
{
1192
	ipl_t ipl;
1193
	as_area_t *src_area;
1194
	size_t size;
1195
 
1196
	ipl = interrupts_disable();
1197
	src_area = find_area_and_lock(AS, base);
1198
	if (src_area){
1199
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1200
		mutex_unlock(&src_area->lock);
1329 palkovsky 1201
	} else {
1202
		size = 0;
1203
	}
1204
	interrupts_restore(ipl);
1205
	return size;
1206
}
1207
 
1387 jermar 1208
/** Mark portion of address space area as used.
1209
 *
1210
 * The address space area must be already locked.
1211
 *
1212
 * @param a Address space area.
1213
 * @param page First page to be marked.
1214
 * @param count Number of page to be marked.
1215
 *
1216
 * @return 0 on failure and 1 on success.
1217
 */
1780 jermar 1218
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1219
{
1220
	btree_node_t *leaf, *node;
1221
	count_t pages;
1222
	int i;
1223
 
1224
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1225
	ASSERT(count);
1226
 
1227
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1228
	if (pages) {
1229
		/*
1230
		 * We hit the beginning of some used space.
1231
		 */
1232
		return 0;
1233
	}
1234
 
1437 jermar 1235
	if (!leaf->keys) {
1236
		btree_insert(&a->used_space, page, (void *) count, leaf);
1237
		return 1;
1238
	}
1239
 
1387 jermar 1240
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1241
	if (node) {
2087 jermar 1242
		uintptr_t left_pg = node->key[node->keys - 1];
1243
		uintptr_t right_pg = leaf->key[0];
1244
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1245
		count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1246
 
1247
		/*
1248
		 * Examine the possibility that the interval fits
1249
		 * somewhere between the rightmost interval of
1250
		 * the left neigbour and the first interval of the leaf.
1251
		 */
1252
 
1253
		if (page >= right_pg) {
1254
			/* Do nothing. */
2087 jermar 1255
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1256
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1257
			/* The interval intersects with the left interval. */
1258
			return 0;
2087 jermar 1259
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1260
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1261
			/* The interval intersects with the right interval. */
1262
			return 0;			
2087 jermar 1263
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1264
		    (page + count * PAGE_SIZE == right_pg)) {
1265
			/*
1266
			 * The interval can be added by merging the two already
1267
			 * present intervals.
1268
			 */
1403 jermar 1269
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1270
			btree_remove(&a->used_space, right_pg, leaf);
1271
			return 1; 
2087 jermar 1272
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1273
			/* 
1274
			 * The interval can be added by simply growing the left
1275
			 * interval.
1276
			 */
1403 jermar 1277
			node->value[node->keys - 1] += count;
1387 jermar 1278
			return 1;
2087 jermar 1279
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1280
			/*
2087 jermar 1281
			 * The interval can be addded by simply moving base of
1282
			 * the right interval down and increasing its size
1283
			 * accordingly.
1387 jermar 1284
			 */
1403 jermar 1285
			leaf->value[0] += count;
1387 jermar 1286
			leaf->key[0] = page;
1287
			return 1;
1288
		} else {
1289
			/*
1290
			 * The interval is between both neigbouring intervals,
1291
			 * but cannot be merged with any of them.
1292
			 */
2087 jermar 1293
			btree_insert(&a->used_space, page, (void *) count,
1294
			    leaf);
1387 jermar 1295
			return 1;
1296
		}
1297
	} else if (page < leaf->key[0]) {
1780 jermar 1298
		uintptr_t right_pg = leaf->key[0];
1387 jermar 1299
		count_t right_cnt = (count_t) leaf->value[0];
1300
 
1301
		/*
2087 jermar 1302
		 * Investigate the border case in which the left neighbour does
1303
		 * not exist but the interval fits from the left.
1387 jermar 1304
		 */
1305
 
2087 jermar 1306
		if (overlaps(page, count * PAGE_SIZE, right_pg,
1307
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1308
			/* The interval intersects with the right interval. */
1309
			return 0;
2087 jermar 1310
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1311
			/*
2087 jermar 1312
			 * The interval can be added by moving the base of the
1313
			 * right interval down and increasing its size
1314
			 * accordingly.
1387 jermar 1315
			 */
1316
			leaf->key[0] = page;
1403 jermar 1317
			leaf->value[0] += count;
1387 jermar 1318
			return 1;
1319
		} else {
1320
			/*
1321
			 * The interval doesn't adjoin with the right interval.
1322
			 * It must be added individually.
1323
			 */
2087 jermar 1324
			btree_insert(&a->used_space, page, (void *) count,
1325
			    leaf);
1387 jermar 1326
			return 1;
1327
		}
1328
	}
1329
 
1330
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1331
	if (node) {
2087 jermar 1332
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1333
		uintptr_t right_pg = node->key[0];
1334
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1335
		count_t right_cnt = (count_t) node->value[0];
1387 jermar 1336
 
1337
		/*
1338
		 * Examine the possibility that the interval fits
1339
		 * somewhere between the leftmost interval of
1340
		 * the right neigbour and the last interval of the leaf.
1341
		 */
1342
 
1343
		if (page < left_pg) {
1344
			/* Do nothing. */
2087 jermar 1345
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1346
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1347
			/* The interval intersects with the left interval. */
1348
			return 0;
2087 jermar 1349
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1350
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1351
			/* The interval intersects with the right interval. */
1352
			return 0;			
2087 jermar 1353
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1354
		    (page + count * PAGE_SIZE == right_pg)) {
1355
			/*
1356
			 * The interval can be added by merging the two already
1357
			 * present intervals.
1358
			 * */
1403 jermar 1359
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1360
			btree_remove(&a->used_space, right_pg, node);
1361
			return 1; 
2087 jermar 1362
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1363
			/*
1364
			 * The interval can be added by simply growing the left
1365
			 * interval.
1366
			 * */
1403 jermar 1367
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1368
			return 1;
2087 jermar 1369
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1370
			/*
2087 jermar 1371
			 * The interval can be addded by simply moving base of
1372
			 * the right interval down and increasing its size
1373
			 * accordingly.
1387 jermar 1374
			 */
1403 jermar 1375
			node->value[0] += count;
1387 jermar 1376
			node->key[0] = page;
1377
			return 1;
1378
		} else {
1379
			/*
1380
			 * The interval is between both neigbouring intervals,
1381
			 * but cannot be merged with any of them.
1382
			 */
2087 jermar 1383
			btree_insert(&a->used_space, page, (void *) count,
1384
			    leaf);
1387 jermar 1385
			return 1;
1386
		}
1387
	} else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1388
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1389
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1390
 
1391
		/*
2087 jermar 1392
		 * Investigate the border case in which the right neighbour
1393
		 * does not exist but the interval fits from the right.
1387 jermar 1394
		 */
1395
 
2087 jermar 1396
		if (overlaps(page, count * PAGE_SIZE, left_pg,
1397
		    left_cnt * PAGE_SIZE)) {
1403 jermar 1398
			/* The interval intersects with the left interval. */
1387 jermar 1399
			return 0;
2087 jermar 1400
		} else if (left_pg + left_cnt * PAGE_SIZE == page) {
1401
			/*
1402
			 * The interval can be added by growing the left
1403
			 * interval.
1404
			 */
1403 jermar 1405
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1406
			return 1;
1407
		} else {
1408
			/*
1409
			 * The interval doesn't adjoin with the left interval.
1410
			 * It must be added individually.
1411
			 */
2087 jermar 1412
			btree_insert(&a->used_space, page, (void *) count,
1413
			    leaf);
1387 jermar 1414
			return 1;
1415
		}
1416
	}
1417
 
1418
	/*
2087 jermar 1419
	 * Note that if the algorithm made it thus far, the interval can fit
1420
	 * only between two other intervals of the leaf. The two border cases
1421
	 * were already resolved.
1387 jermar 1422
	 */
1423
	for (i = 1; i < leaf->keys; i++) {
1424
		if (page < leaf->key[i]) {
2087 jermar 1425
			uintptr_t left_pg = leaf->key[i - 1];
1426
			uintptr_t right_pg = leaf->key[i];
1427
			count_t left_cnt = (count_t) leaf->value[i - 1];
1428
			count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1429
 
1430
			/*
1431
			 * The interval fits between left_pg and right_pg.
1432
			 */
1433
 
2087 jermar 1434
			if (overlaps(page, count * PAGE_SIZE, left_pg,
1435
			    left_cnt * PAGE_SIZE)) {
1436
				/*
1437
				 * The interval intersects with the left
1438
				 * interval.
1439
				 */
1387 jermar 1440
				return 0;
2087 jermar 1441
			} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1442
			    right_cnt * PAGE_SIZE)) {
1443
				/*
1444
				 * The interval intersects with the right
1445
				 * interval.
1446
				 */
1387 jermar 1447
				return 0;			
2087 jermar 1448
			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1449
			    (page + count * PAGE_SIZE == right_pg)) {
1450
				/*
1451
				 * The interval can be added by merging the two
1452
				 * already present intervals.
1453
				 */
1403 jermar 1454
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1455
				btree_remove(&a->used_space, right_pg, leaf);
1456
				return 1; 
2087 jermar 1457
			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1458
				/*
1459
				 * The interval can be added by simply growing
1460
				 * the left interval.
1461
				 */
1403 jermar 1462
				leaf->value[i - 1] += count;
1387 jermar 1463
				return 1;
2087 jermar 1464
			} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1465
				/*
2087 jermar 1466
			         * The interval can be addded by simply moving
1467
				 * base of the right interval down and
1468
				 * increasing its size accordingly.
1387 jermar 1469
			 	 */
1403 jermar 1470
				leaf->value[i] += count;
1387 jermar 1471
				leaf->key[i] = page;
1472
				return 1;
1473
			} else {
1474
				/*
2087 jermar 1475
				 * The interval is between both neigbouring
1476
				 * intervals, but cannot be merged with any of
1477
				 * them.
1387 jermar 1478
				 */
2087 jermar 1479
				btree_insert(&a->used_space, page,
1480
				    (void *) count, leaf);
1387 jermar 1481
				return 1;
1482
			}
1483
		}
1484
	}
1485
 
2087 jermar 1486
	panic("Inconsistency detected while adding %d pages of used space at "
1487
	    "%p.\n", count, page);
1387 jermar 1488
}
1489
 
1490
/** Mark portion of address space area as unused.
1491
 *
1492
 * The address space area must be already locked.
1493
 *
1494
 * @param a Address space area.
1495
 * @param page First page to be marked.
1496
 * @param count Number of page to be marked.
1497
 *
1498
 * @return 0 on failure and 1 on success.
1499
 */
1780 jermar 1500
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1501
{
1502
	btree_node_t *leaf, *node;
1503
	count_t pages;
1504
	int i;
1505
 
1506
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1507
	ASSERT(count);
1508
 
1509
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1510
	if (pages) {
1511
		/*
1512
		 * We are lucky, page is the beginning of some interval.
1513
		 */
1514
		if (count > pages) {
1515
			return 0;
1516
		} else if (count == pages) {
1517
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1518
			return 1;
1387 jermar 1519
		} else {
1520
			/*
1521
			 * Find the respective interval.
1522
			 * Decrease its size and relocate its start address.
1523
			 */
1524
			for (i = 0; i < leaf->keys; i++) {
1525
				if (leaf->key[i] == page) {
2087 jermar 1526
					leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1527
					leaf->value[i] -= count;
1387 jermar 1528
					return 1;
1529
				}
1530
			}
1531
			goto error;
1532
		}
1533
	}
1534
 
1535
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1536
	if (node && page < leaf->key[0]) {
1780 jermar 1537
		uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1538
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1539
 
2087 jermar 1540
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1541
		    count * PAGE_SIZE)) {
1542
			if (page + count * PAGE_SIZE ==
1543
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1544
				/*
2087 jermar 1545
				 * The interval is contained in the rightmost
1546
				 * interval of the left neighbour and can be
1547
				 * removed by updating the size of the bigger
1548
				 * interval.
1387 jermar 1549
				 */
1403 jermar 1550
				node->value[node->keys - 1] -= count;
1387 jermar 1551
				return 1;
2087 jermar 1552
			} else if (page + count * PAGE_SIZE <
1553
			    left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1554
				count_t new_cnt;
1387 jermar 1555
 
1556
				/*
2087 jermar 1557
				 * The interval is contained in the rightmost
1558
				 * interval of the left neighbour but its
1559
				 * removal requires both updating the size of
1560
				 * the original interval and also inserting a
1561
				 * new interval.
1387 jermar 1562
				 */
2087 jermar 1563
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1564
				    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1565
				node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1566
				btree_insert(&a->used_space, page +
1567
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1568
				return 1;
1569
			}
1570
		}
1571
		return 0;
1572
	} else if (page < leaf->key[0]) {
1573
		return 0;
1574
	}
1575
 
1576
	if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1577
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1578
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1579
 
2087 jermar 1580
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1581
		    count * PAGE_SIZE)) {
1582
			if (page + count * PAGE_SIZE == 
1583
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1584
				/*
2087 jermar 1585
				 * The interval is contained in the rightmost
1586
				 * interval of the leaf and can be removed by
1587
				 * updating the size of the bigger interval.
1387 jermar 1588
				 */
1403 jermar 1589
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1590
				return 1;
2087 jermar 1591
			} else if (page + count * PAGE_SIZE < left_pg +
1592
			    left_cnt * PAGE_SIZE) {
1403 jermar 1593
				count_t new_cnt;
1387 jermar 1594
 
1595
				/*
2087 jermar 1596
				 * The interval is contained in the rightmost
1597
				 * interval of the leaf but its removal
1598
				 * requires both updating the size of the
1599
				 * original interval and also inserting a new
1600
				 * interval.
1387 jermar 1601
				 */
2087 jermar 1602
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1603
				    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1604
				leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1605
				btree_insert(&a->used_space, page +
1606
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1607
				return 1;
1608
			}
1609
		}
1610
		return 0;
1611
	}	
1612
 
1613
	/*
1614
	 * The border cases have been already resolved.
1615
	 * Now the interval can be only between intervals of the leaf. 
1616
	 */
1617
	for (i = 1; i < leaf->keys - 1; i++) {
1618
		if (page < leaf->key[i]) {
1780 jermar 1619
			uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1620
			count_t left_cnt = (count_t) leaf->value[i - 1];
1621
 
1622
			/*
2087 jermar 1623
			 * Now the interval is between intervals corresponding
1624
			 * to (i - 1) and i.
1387 jermar 1625
			 */
2087 jermar 1626
			if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1627
			    count * PAGE_SIZE)) {
1628
				if (page + count * PAGE_SIZE ==
1629
				    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1630
					/*
2087 jermar 1631
					 * The interval is contained in the
1632
					 * interval (i - 1) of the leaf and can
1633
					 * be removed by updating the size of
1634
					 * the bigger interval.
1387 jermar 1635
					 */
1403 jermar 1636
					leaf->value[i - 1] -= count;
1387 jermar 1637
					return 1;
2087 jermar 1638
				} else if (page + count * PAGE_SIZE <
1639
				    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1640
					count_t new_cnt;
1387 jermar 1641
 
1642
					/*
2087 jermar 1643
					 * The interval is contained in the
1644
					 * interval (i - 1) of the leaf but its
1645
					 * removal requires both updating the
1646
					 * size of the original interval and
1387 jermar 1647
					 * also inserting a new interval.
1648
					 */
2087 jermar 1649
					new_cnt = ((left_pg +
1650
					    left_cnt * PAGE_SIZE) -
1651
					    (page + count * PAGE_SIZE)) >>
1652
					    PAGE_WIDTH;
1403 jermar 1653
					leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1654
					btree_insert(&a->used_space, page +
1655
					    count * PAGE_SIZE, (void *) new_cnt,
1656
					    leaf);
1387 jermar 1657
					return 1;
1658
				}
1659
			}
1660
			return 0;
1661
		}
1662
	}
1663
 
1664
error:
2087 jermar 1665
	panic("Inconsistency detected while removing %d pages of used space "
1666
	    "from %p.\n", count, page);
1387 jermar 1667
}
1668
 
1409 jermar 1669
/** Remove reference to address space area share info.
1670
 *
1671
 * If the reference count drops to 0, the sh_info is deallocated.
1672
 *
1673
 * @param sh_info Pointer to address space area share info.
1674
 */
1675
void sh_info_remove_reference(share_info_t *sh_info)
1676
{
1677
	bool dealloc = false;
1678
 
1679
	mutex_lock(&sh_info->lock);
1680
	ASSERT(sh_info->refcount);
1681
	if (--sh_info->refcount == 0) {
1682
		dealloc = true;
1495 jermar 1683
		link_t *cur;
1409 jermar 1684
 
1685
		/*
1686
		 * Now walk carefully the pagemap B+tree and free/remove
1687
		 * reference from all frames found there.
1688
		 */
2087 jermar 1689
		for (cur = sh_info->pagemap.leaf_head.next;
1690
		    cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1691
			btree_node_t *node;
1495 jermar 1692
			int i;
1409 jermar 1693
 
1495 jermar 1694
			node = list_get_instance(cur, btree_node_t, leaf_link);
1695
			for (i = 0; i < node->keys; i++) 
1780 jermar 1696
				frame_free((uintptr_t) node->value[i]);
1409 jermar 1697
		}
1698
 
1699
	}
1700
	mutex_unlock(&sh_info->lock);
1701
 
1702
	if (dealloc) {
1703
		btree_destroy(&sh_info->pagemap);
1704
		free(sh_info);
1705
	}
1706
}
1707
 
1235 jermar 1708
/*
1709
 * Address space related syscalls.
1710
 */
1711
 
1712
/** Wrapper for as_area_create(). */
1780 jermar 1713
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1714
{
2087 jermar 1715
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1716
	    AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1717
		return (unative_t) address;
1235 jermar 1718
	else
1780 jermar 1719
		return (unative_t) -1;
1235 jermar 1720
}
1721
 
1793 jermar 1722
/** Wrapper for as_area_resize(). */
1780 jermar 1723
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1724
{
1780 jermar 1725
	return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1726
}
1727
 
1793 jermar 1728
/** Wrapper for as_area_destroy(). */
1780 jermar 1729
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1730
{
1780 jermar 1731
	return (unative_t) as_area_destroy(AS, address);
1306 jermar 1732
}
1702 cejka 1733
 
1914 jermar 1734
/** Print out information about address space.
1735
 *
1736
 * @param as Address space.
1737
 */
1738
void as_print(as_t *as)
1739
{
1740
	ipl_t ipl;
1741
 
1742
	ipl = interrupts_disable();
1743
	mutex_lock(&as->lock);
1744
 
1745
	/* print out info about address space areas */
1746
	link_t *cur;
2087 jermar 1747
	for (cur = as->as_area_btree.leaf_head.next;
1748
	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1749
		btree_node_t *node;
1914 jermar 1750
 
2087 jermar 1751
		node = list_get_instance(cur, btree_node_t, leaf_link);
1752
 
1914 jermar 1753
		int i;
1754
		for (i = 0; i < node->keys; i++) {
1915 jermar 1755
			as_area_t *area = node->value[i];
1914 jermar 1756
 
1757
			mutex_lock(&area->lock);
1758
			printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
2087 jermar 1759
			    area, area->base, area->pages, area->base,
1760
			    area->base + area->pages*PAGE_SIZE);
1914 jermar 1761
			mutex_unlock(&area->lock);
1762
		}
1763
	}
1764
 
1765
	mutex_unlock(&as->lock);
1766
	interrupts_restore(ipl);
1767
}
1768
 
1757 jermar 1769
/** @}
1702 cejka 1770
 */