Subversion Repositories HelenOS

Rev

Rev 2094 | Rev 2125 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
1288 jermar 77
#include <syscall/copy.h>
78
#include <arch/interrupt.h>
703 jermar 79
 
2009 jermar 80
#ifdef CONFIG_VIRT_IDX_DCACHE
81
#include <arch/mm/cache.h>
82
#endif /* CONFIG_VIRT_IDX_DCACHE */
83
 
1757 jermar 84
/**
85
 * Each architecture decides what functions will be used to carry out
86
 * address space operations such as creating or locking page tables.
87
 */
756 jermar 88
as_operations_t *as_operations = NULL;
703 jermar 89
 
1890 jermar 90
/**
91
 * Slab for as_t objects.
92
 */
93
static slab_cache_t *as_slab;
94
 
2087 jermar 95
/**
96
 * This lock protects inactive_as_with_asid_head list. It must be acquired
97
 * before as_t mutex.
98
 */
1415 jermar 99
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 100
 
101
/**
102
 * This list contains address spaces that are not active on any
103
 * processor and that have valid ASID.
104
 */
105
LIST_INITIALIZE(inactive_as_with_asid_head);
106
 
757 jermar 107
/** Kernel address space. */
108
as_t *AS_KERNEL = NULL;
109
 
1235 jermar 110
static int area_flags_to_page_flags(int aflags);
1780 jermar 111
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 112
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
113
    as_area_t *avoid_area);
1409 jermar 114
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 115
 
1891 jermar 116
static int as_constructor(void *obj, int flags)
117
{
118
	as_t *as = (as_t *) obj;
119
	int rc;
120
 
121
	link_initialize(&as->inactive_as_with_asid_link);
122
	mutex_initialize(&as->lock);	
123
 
124
	rc = as_constructor_arch(as, flags);
125
 
126
	return rc;
127
}
128
 
129
static int as_destructor(void *obj)
130
{
131
	as_t *as = (as_t *) obj;
132
 
133
	return as_destructor_arch(as);
134
}
135
 
756 jermar 136
/** Initialize address space subsystem. */
137
void as_init(void)
138
{
139
	as_arch_init();
1890 jermar 140
 
1891 jermar 141
	as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 142
	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
1890 jermar 143
 
789 palkovsky 144
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 145
	if (!AS_KERNEL)
146
		panic("can't create kernel address space\n");
147
 
756 jermar 148
}
149
 
757 jermar 150
/** Create address space.
151
 *
152
 * @param flags Flags that influence way in wich the address space is created.
153
 */
756 jermar 154
as_t *as_create(int flags)
703 jermar 155
{
156
	as_t *as;
157
 
1890 jermar 158
	as = (as_t *) slab_alloc(as_slab, 0);
1891 jermar 159
	(void) as_create_arch(as, 0);
160
 
1147 jermar 161
	btree_create(&as->as_area_btree);
822 palkovsky 162
 
163
	if (flags & FLAG_AS_KERNEL)
164
		as->asid = ASID_KERNEL;
165
	else
166
		as->asid = ASID_INVALID;
167
 
1468 jermar 168
	as->refcount = 0;
1415 jermar 169
	as->cpu_refcount = 0;
2089 decky 170
#ifdef AS_PAGE_TABLE
2106 jermar 171
	as->genarch.page_table = page_table_create(flags);
2089 decky 172
#else
173
	page_table_create(flags);
174
#endif
703 jermar 175
 
176
	return as;
177
}
178
 
1468 jermar 179
/** Destroy adress space.
180
 *
2087 jermar 181
 * When there are no tasks referencing this address space (i.e. its refcount is
182
 * zero), the address space can be destroyed.
1468 jermar 183
 */
184
void as_destroy(as_t *as)
973 palkovsky 185
{
1468 jermar 186
	ipl_t ipl;
1594 jermar 187
	bool cond;
973 palkovsky 188
 
1468 jermar 189
	ASSERT(as->refcount == 0);
190
 
191
	/*
192
	 * Since there is no reference to this area,
193
	 * it is safe not to lock its mutex.
194
	 */
195
	ipl = interrupts_disable();
196
	spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 197
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 198
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 199
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 200
		asid_put(as->asid);
201
	}
202
	spinlock_unlock(&inactive_as_with_asid_lock);
203
 
204
	/*
205
	 * Destroy address space areas of the address space.
1954 jermar 206
	 * The B+tree must be walked carefully because it is
1594 jermar 207
	 * also being destroyed.
1468 jermar 208
	 */	
1594 jermar 209
	for (cond = true; cond; ) {
1468 jermar 210
		btree_node_t *node;
1594 jermar 211
 
212
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 213
		node = list_get_instance(as->as_area_btree.leaf_head.next,
214
		    btree_node_t, leaf_link);
1594 jermar 215
 
216
		if ((cond = node->keys)) {
217
			as_area_destroy(as, node->key[0]);
218
		}
1468 jermar 219
	}
1495 jermar 220
 
1483 jermar 221
	btree_destroy(&as->as_area_btree);
2089 decky 222
#ifdef AS_PAGE_TABLE
2106 jermar 223
	page_table_destroy(as->genarch.page_table);
2089 decky 224
#else
225
	page_table_destroy(NULL);
226
#endif
1468 jermar 227
 
228
	interrupts_restore(ipl);
229
 
1890 jermar 230
	slab_free(as_slab, as);
973 palkovsky 231
}
232
 
703 jermar 233
/** Create address space area of common attributes.
234
 *
235
 * The created address space area is added to the target address space.
236
 *
237
 * @param as Target address space.
1239 jermar 238
 * @param flags Flags of the area memory.
1048 jermar 239
 * @param size Size of area.
703 jermar 240
 * @param base Base address of area.
1239 jermar 241
 * @param attrs Attributes of the area.
1409 jermar 242
 * @param backend Address space area backend. NULL if no backend is used.
243
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 244
 *
245
 * @return Address space area on success or NULL on failure.
246
 */
2069 jermar 247
as_area_t *
248
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 249
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 250
{
251
	ipl_t ipl;
252
	as_area_t *a;
253
 
254
	if (base % PAGE_SIZE)
1048 jermar 255
		return NULL;
256
 
1233 jermar 257
	if (!size)
258
		return NULL;
259
 
1048 jermar 260
	/* Writeable executable areas are not supported. */
261
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
262
		return NULL;
703 jermar 263
 
264
	ipl = interrupts_disable();
1380 jermar 265
	mutex_lock(&as->lock);
703 jermar 266
 
1048 jermar 267
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 268
		mutex_unlock(&as->lock);
1048 jermar 269
		interrupts_restore(ipl);
270
		return NULL;
271
	}
703 jermar 272
 
822 palkovsky 273
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 274
 
1380 jermar 275
	mutex_initialize(&a->lock);
822 palkovsky 276
 
1424 jermar 277
	a->as = as;
1026 jermar 278
	a->flags = flags;
1239 jermar 279
	a->attributes = attrs;
1048 jermar 280
	a->pages = SIZE2FRAMES(size);
822 palkovsky 281
	a->base = base;
1409 jermar 282
	a->sh_info = NULL;
283
	a->backend = backend;
1424 jermar 284
	if (backend_data)
285
		a->backend_data = *backend_data;
286
	else
2087 jermar 287
		memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
288
		    0);
1424 jermar 289
 
1387 jermar 290
	btree_create(&a->used_space);
822 palkovsky 291
 
1147 jermar 292
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 293
 
1380 jermar 294
	mutex_unlock(&as->lock);
703 jermar 295
	interrupts_restore(ipl);
704 jermar 296
 
703 jermar 297
	return a;
298
}
299
 
1235 jermar 300
/** Find address space area and change it.
301
 *
302
 * @param as Address space.
2087 jermar 303
 * @param address Virtual address belonging to the area to be changed. Must be
304
 *     page-aligned.
1235 jermar 305
 * @param size New size of the virtual memory block starting at address. 
306
 * @param flags Flags influencing the remap operation. Currently unused.
307
 *
1306 jermar 308
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 309
 */ 
1780 jermar 310
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 311
{
1306 jermar 312
	as_area_t *area;
1235 jermar 313
	ipl_t ipl;
314
	size_t pages;
315
 
316
	ipl = interrupts_disable();
1380 jermar 317
	mutex_lock(&as->lock);
1235 jermar 318
 
319
	/*
320
	 * Locate the area.
321
	 */
322
	area = find_area_and_lock(as, address);
323
	if (!area) {
1380 jermar 324
		mutex_unlock(&as->lock);
1235 jermar 325
		interrupts_restore(ipl);
1306 jermar 326
		return ENOENT;
1235 jermar 327
	}
328
 
1424 jermar 329
	if (area->backend == &phys_backend) {
1235 jermar 330
		/*
331
		 * Remapping of address space areas associated
332
		 * with memory mapped devices is not supported.
333
		 */
1380 jermar 334
		mutex_unlock(&area->lock);
335
		mutex_unlock(&as->lock);
1235 jermar 336
		interrupts_restore(ipl);
1306 jermar 337
		return ENOTSUP;
1235 jermar 338
	}
1409 jermar 339
	if (area->sh_info) {
340
		/*
341
		 * Remapping of shared address space areas 
342
		 * is not supported.
343
		 */
344
		mutex_unlock(&area->lock);
345
		mutex_unlock(&as->lock);
346
		interrupts_restore(ipl);
347
		return ENOTSUP;
348
	}
1235 jermar 349
 
350
	pages = SIZE2FRAMES((address - area->base) + size);
351
	if (!pages) {
352
		/*
353
		 * Zero size address space areas are not allowed.
354
		 */
1380 jermar 355
		mutex_unlock(&area->lock);
356
		mutex_unlock(&as->lock);
1235 jermar 357
		interrupts_restore(ipl);
1306 jermar 358
		return EPERM;
1235 jermar 359
	}
360
 
361
	if (pages < area->pages) {
1403 jermar 362
		bool cond;
1780 jermar 363
		uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 364
 
365
		/*
366
		 * Shrinking the area.
367
		 * No need to check for overlaps.
368
		 */
1403 jermar 369
 
370
		/*
1436 jermar 371
		 * Start TLB shootdown sequence.
372
		 */
2087 jermar 373
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
374
		    pages * PAGE_SIZE, area->pages - pages);
1436 jermar 375
 
376
		/*
1403 jermar 377
		 * Remove frames belonging to used space starting from
378
		 * the highest addresses downwards until an overlap with
379
		 * the resized address space area is found. Note that this
380
		 * is also the right way to remove part of the used_space
381
		 * B+tree leaf list.
382
		 */		
383
		for (cond = true; cond;) {
384
			btree_node_t *node;
385
 
386
			ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 387
			node = 
388
			    list_get_instance(area->used_space.leaf_head.prev,
389
			    btree_node_t, leaf_link);
1403 jermar 390
			if ((cond = (bool) node->keys)) {
1780 jermar 391
				uintptr_t b = node->key[node->keys - 1];
2087 jermar 392
				count_t c =
393
				    (count_t) node->value[node->keys - 1];
1403 jermar 394
				int i = 0;
1235 jermar 395
 
2087 jermar 396
				if (overlaps(b, c * PAGE_SIZE, area->base,
397
				    pages*PAGE_SIZE)) {
1403 jermar 398
 
2087 jermar 399
					if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 400
						/*
2087 jermar 401
						 * The whole interval fits
402
						 * completely in the resized
403
						 * address space area.
1403 jermar 404
						 */
405
						break;
406
					}
407
 
408
					/*
2087 jermar 409
					 * Part of the interval corresponding
410
					 * to b and c overlaps with the resized
411
					 * address space area.
1403 jermar 412
					 */
413
 
414
					cond = false;	/* we are almost done */
415
					i = (start_free - b) >> PAGE_WIDTH;
2087 jermar 416
					if (!used_space_remove(area, start_free,
417
					    c - i))
418
						panic("Could not remove used "
419
						    "space.\n");
1403 jermar 420
				} else {
421
					/*
2087 jermar 422
					 * The interval of used space can be
423
					 * completely removed.
1403 jermar 424
					 */
425
					if (!used_space_remove(area, b, c))
2087 jermar 426
						panic("Could not remove used "
427
						    "space.\n");
1403 jermar 428
				}
429
 
430
				for (; i < c; i++) {
431
					pte_t *pte;
432
 
433
					page_table_lock(as, false);
2087 jermar 434
					pte = page_mapping_find(as, b +
435
					    i * PAGE_SIZE);
436
					ASSERT(pte && PTE_VALID(pte) &&
437
					    PTE_PRESENT(pte));
438
					if (area->backend &&
439
					    area->backend->frame_free) {
1424 jermar 440
						area->backend->frame_free(area,
2087 jermar 441
						    b + i * PAGE_SIZE,
442
						    PTE_GET_FRAME(pte));
1409 jermar 443
					}
2087 jermar 444
					page_mapping_remove(as, b +
445
					    i * PAGE_SIZE);
1403 jermar 446
					page_table_unlock(as, false);
447
				}
1235 jermar 448
			}
449
		}
1436 jermar 450
 
1235 jermar 451
		/*
1436 jermar 452
		 * Finish TLB shootdown sequence.
1235 jermar 453
		 */
2087 jermar 454
		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
455
		    area->pages - pages);
1235 jermar 456
		tlb_shootdown_finalize();
1889 jermar 457
 
458
		/*
459
		 * Invalidate software translation caches (e.g. TSB on sparc64).
460
		 */
2087 jermar 461
		as_invalidate_translation_cache(as, area->base +
462
		    pages * PAGE_SIZE, area->pages - pages);
1235 jermar 463
	} else {
464
		/*
465
		 * Growing the area.
466
		 * Check for overlaps with other address space areas.
467
		 */
2087 jermar 468
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
469
		    area)) {
1380 jermar 470
			mutex_unlock(&area->lock);
471
			mutex_unlock(&as->lock);		
1235 jermar 472
			interrupts_restore(ipl);
1306 jermar 473
			return EADDRNOTAVAIL;
1235 jermar 474
		}
475
	} 
476
 
477
	area->pages = pages;
478
 
1380 jermar 479
	mutex_unlock(&area->lock);
480
	mutex_unlock(&as->lock);
1235 jermar 481
	interrupts_restore(ipl);
482
 
1306 jermar 483
	return 0;
1235 jermar 484
}
485
 
1306 jermar 486
/** Destroy address space area.
487
 *
488
 * @param as Address space.
489
 * @param address Address withing the area to be deleted.
490
 *
491
 * @return Zero on success or a value from @ref errno.h on failure. 
492
 */
1780 jermar 493
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 494
{
495
	as_area_t *area;
1780 jermar 496
	uintptr_t base;
1495 jermar 497
	link_t *cur;
1306 jermar 498
	ipl_t ipl;
499
 
500
	ipl = interrupts_disable();
1380 jermar 501
	mutex_lock(&as->lock);
1306 jermar 502
 
503
	area = find_area_and_lock(as, address);
504
	if (!area) {
1380 jermar 505
		mutex_unlock(&as->lock);
1306 jermar 506
		interrupts_restore(ipl);
507
		return ENOENT;
508
	}
509
 
1403 jermar 510
	base = area->base;
511
 
1411 jermar 512
	/*
1436 jermar 513
	 * Start TLB shootdown sequence.
514
	 */
1889 jermar 515
	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 516
 
517
	/*
1411 jermar 518
	 * Visit only the pages mapped by used_space B+tree.
519
	 */
2087 jermar 520
	for (cur = area->used_space.leaf_head.next;
521
	    cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 522
		btree_node_t *node;
1495 jermar 523
		int i;
1403 jermar 524
 
1495 jermar 525
		node = list_get_instance(cur, btree_node_t, leaf_link);
526
		for (i = 0; i < node->keys; i++) {
1780 jermar 527
			uintptr_t b = node->key[i];
1495 jermar 528
			count_t j;
1411 jermar 529
			pte_t *pte;
1403 jermar 530
 
1495 jermar 531
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 532
				page_table_lock(as, false);
2087 jermar 533
				pte = page_mapping_find(as, b + j * PAGE_SIZE);
534
				ASSERT(pte && PTE_VALID(pte) &&
535
				    PTE_PRESENT(pte));
536
				if (area->backend &&
537
				    area->backend->frame_free) {
538
					area->backend->frame_free(area,	b +
539
					j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 540
				}
2087 jermar 541
				page_mapping_remove(as, b + j * PAGE_SIZE);				
1411 jermar 542
				page_table_unlock(as, false);
1306 jermar 543
			}
544
		}
545
	}
1403 jermar 546
 
1306 jermar 547
	/*
1436 jermar 548
	 * Finish TLB shootdown sequence.
1306 jermar 549
	 */
1889 jermar 550
	tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 551
	tlb_shootdown_finalize();
1436 jermar 552
 
1889 jermar 553
	/*
2087 jermar 554
	 * Invalidate potential software translation caches (e.g. TSB on
555
	 * sparc64).
1889 jermar 556
	 */
557
	as_invalidate_translation_cache(as, area->base, area->pages);
558
 
1436 jermar 559
	btree_destroy(&area->used_space);
1306 jermar 560
 
1309 jermar 561
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 562
 
563
	if (area->sh_info)
564
		sh_info_remove_reference(area->sh_info);
565
 
1380 jermar 566
	mutex_unlock(&area->lock);
1306 jermar 567
 
568
	/*
569
	 * Remove the empty area from address space.
570
	 */
1889 jermar 571
	btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 572
 
1309 jermar 573
	free(area);
574
 
1889 jermar 575
	mutex_unlock(&as->lock);
1306 jermar 576
	interrupts_restore(ipl);
577
	return 0;
578
}
579
 
1413 jermar 580
/** Share address space area with another or the same address space.
1235 jermar 581
 *
1424 jermar 582
 * Address space area mapping is shared with a new address space area.
583
 * If the source address space area has not been shared so far,
584
 * a new sh_info is created. The new address space area simply gets the
585
 * sh_info of the source area. The process of duplicating the
586
 * mapping is done through the backend share function.
1413 jermar 587
 * 
1417 jermar 588
 * @param src_as Pointer to source address space.
1239 jermar 589
 * @param src_base Base address of the source address space area.
1417 jermar 590
 * @param acc_size Expected size of the source area.
1428 palkovsky 591
 * @param dst_as Pointer to destination address space.
1417 jermar 592
 * @param dst_base Target base address.
593
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 594
 *
2007 jermar 595
 * @return Zero on success or ENOENT if there is no such task or if there is no
596
 * such address space area, EPERM if there was a problem in accepting the area
597
 * or ENOMEM if there was a problem in allocating destination address space
598
 * area. ENOTSUP is returned if the address space area backend does not support
2015 jermar 599
 * sharing or if the kernel detects an attempt to create an illegal address
600
 * alias.
1235 jermar 601
 */
1780 jermar 602
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
603
		  as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 604
{
605
	ipl_t ipl;
1239 jermar 606
	int src_flags;
607
	size_t src_size;
608
	as_area_t *src_area, *dst_area;
1413 jermar 609
	share_info_t *sh_info;
1424 jermar 610
	mem_backend_t *src_backend;
611
	mem_backend_data_t src_backend_data;
1434 palkovsky 612
 
1235 jermar 613
	ipl = interrupts_disable();
1380 jermar 614
	mutex_lock(&src_as->lock);
1329 palkovsky 615
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 616
	if (!src_area) {
1238 jermar 617
		/*
618
		 * Could not find the source address space area.
619
		 */
1380 jermar 620
		mutex_unlock(&src_as->lock);
1238 jermar 621
		interrupts_restore(ipl);
622
		return ENOENT;
623
	}
2007 jermar 624
 
1424 jermar 625
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 626
		/*
1851 jermar 627
		 * There is no backend or the backend does not
1424 jermar 628
		 * know how to share the area.
1413 jermar 629
		 */
630
		mutex_unlock(&src_area->lock);
631
		mutex_unlock(&src_as->lock);
632
		interrupts_restore(ipl);
633
		return ENOTSUP;
634
	}
635
 
1239 jermar 636
	src_size = src_area->pages * PAGE_SIZE;
637
	src_flags = src_area->flags;
1424 jermar 638
	src_backend = src_area->backend;
639
	src_backend_data = src_area->backend_data;
1544 palkovsky 640
 
641
	/* Share the cacheable flag from the original mapping */
642
	if (src_flags & AS_AREA_CACHEABLE)
643
		dst_flags_mask |= AS_AREA_CACHEABLE;
644
 
2087 jermar 645
	if (src_size != acc_size ||
646
	    (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 647
		mutex_unlock(&src_area->lock);
648
		mutex_unlock(&src_as->lock);
1235 jermar 649
		interrupts_restore(ipl);
650
		return EPERM;
651
	}
1413 jermar 652
 
2015 jermar 653
#ifdef CONFIG_VIRT_IDX_DCACHE
654
	if (!(dst_flags_mask & AS_AREA_EXEC)) {
655
		if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
656
			/*
657
			 * Refuse to create an illegal address alias.
658
			 */
659
			mutex_unlock(&src_area->lock);
660
			mutex_unlock(&src_as->lock);
661
			interrupts_restore(ipl);
662
			return ENOTSUP;
663
		}
664
	}
665
#endif /* CONFIG_VIRT_IDX_DCACHE */
666
 
1235 jermar 667
	/*
1413 jermar 668
	 * Now we are committed to sharing the area.
1954 jermar 669
	 * First, prepare the area for sharing.
1413 jermar 670
	 * Then it will be safe to unlock it.
671
	 */
672
	sh_info = src_area->sh_info;
673
	if (!sh_info) {
674
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
675
		mutex_initialize(&sh_info->lock);
676
		sh_info->refcount = 2;
677
		btree_create(&sh_info->pagemap);
678
		src_area->sh_info = sh_info;
679
	} else {
680
		mutex_lock(&sh_info->lock);
681
		sh_info->refcount++;
682
		mutex_unlock(&sh_info->lock);
683
	}
684
 
1424 jermar 685
	src_area->backend->share(src_area);
1413 jermar 686
 
687
	mutex_unlock(&src_area->lock);
688
	mutex_unlock(&src_as->lock);
689
 
690
	/*
1239 jermar 691
	 * Create copy of the source address space area.
692
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
693
	 * attribute set which prevents race condition with
694
	 * preliminary as_page_fault() calls.
1417 jermar 695
	 * The flags of the source area are masked against dst_flags_mask
696
	 * to support sharing in less privileged mode.
1235 jermar 697
	 */
1461 palkovsky 698
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 699
	    AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 700
	if (!dst_area) {
1235 jermar 701
		/*
702
		 * Destination address space area could not be created.
703
		 */
1413 jermar 704
		sh_info_remove_reference(sh_info);
705
 
1235 jermar 706
		interrupts_restore(ipl);
707
		return ENOMEM;
708
	}
2009 jermar 709
 
1235 jermar 710
	/*
1239 jermar 711
	 * Now the destination address space area has been
712
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 713
	 * attribute and set the sh_info.
1239 jermar 714
	 */	
2009 jermar 715
	mutex_lock(&dst_as->lock);	
1380 jermar 716
	mutex_lock(&dst_area->lock);
1239 jermar 717
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 718
	dst_area->sh_info = sh_info;
1380 jermar 719
	mutex_unlock(&dst_area->lock);
2009 jermar 720
	mutex_unlock(&dst_as->lock);	
721
 
1235 jermar 722
	interrupts_restore(ipl);
723
 
724
	return 0;
725
}
726
 
1423 jermar 727
/** Check access mode for address space area.
728
 *
729
 * The address space area must be locked prior to this call.
730
 *
731
 * @param area Address space area.
732
 * @param access Access mode.
733
 *
734
 * @return False if access violates area's permissions, true otherwise.
735
 */
736
bool as_area_check_access(as_area_t *area, pf_access_t access)
737
{
738
	int flagmap[] = {
739
		[PF_ACCESS_READ] = AS_AREA_READ,
740
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
741
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
742
	};
743
 
744
	if (!(area->flags & flagmap[access]))
745
		return false;
746
 
747
	return true;
748
}
749
 
703 jermar 750
/** Handle page fault within the current address space.
751
 *
1409 jermar 752
 * This is the high-level page fault handler. It decides
753
 * whether the page fault can be resolved by any backend
754
 * and if so, it invokes the backend to resolve the page
755
 * fault.
756
 *
703 jermar 757
 * Interrupts are assumed disabled.
758
 *
759
 * @param page Faulting page.
1411 jermar 760
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 761
 * @param istate Pointer to interrupted state.
703 jermar 762
 *
1409 jermar 763
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
764
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 765
 */
1780 jermar 766
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 767
{
1044 jermar 768
	pte_t *pte;
977 jermar 769
	as_area_t *area;
703 jermar 770
 
1380 jermar 771
	if (!THREAD)
1409 jermar 772
		return AS_PF_FAULT;
1380 jermar 773
 
703 jermar 774
	ASSERT(AS);
1044 jermar 775
 
1380 jermar 776
	mutex_lock(&AS->lock);
977 jermar 777
	area = find_area_and_lock(AS, page);	
703 jermar 778
	if (!area) {
779
		/*
780
		 * No area contained mapping for 'page'.
781
		 * Signal page fault to low-level handler.
782
		 */
1380 jermar 783
		mutex_unlock(&AS->lock);
1288 jermar 784
		goto page_fault;
703 jermar 785
	}
786
 
1239 jermar 787
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
788
		/*
789
		 * The address space area is not fully initialized.
790
		 * Avoid possible race by returning error.
791
		 */
1380 jermar 792
		mutex_unlock(&area->lock);
793
		mutex_unlock(&AS->lock);
1288 jermar 794
		goto page_fault;		
1239 jermar 795
	}
796
 
1424 jermar 797
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 798
		/*
799
		 * The address space area is not backed by any backend
800
		 * or the backend cannot handle page faults.
801
		 */
802
		mutex_unlock(&area->lock);
803
		mutex_unlock(&AS->lock);
804
		goto page_fault;		
805
	}
1179 jermar 806
 
1044 jermar 807
	page_table_lock(AS, false);
808
 
703 jermar 809
	/*
1044 jermar 810
	 * To avoid race condition between two page faults
811
	 * on the same address, we need to make sure
812
	 * the mapping has not been already inserted.
813
	 */
814
	if ((pte = page_mapping_find(AS, page))) {
815
		if (PTE_PRESENT(pte)) {
1423 jermar 816
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 817
			    (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
818
			    (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 819
				page_table_unlock(AS, false);
820
				mutex_unlock(&area->lock);
821
				mutex_unlock(&AS->lock);
822
				return AS_PF_OK;
823
			}
1044 jermar 824
		}
825
	}
1409 jermar 826
 
1044 jermar 827
	/*
1409 jermar 828
	 * Resort to the backend page fault handler.
703 jermar 829
	 */
1424 jermar 830
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 831
		page_table_unlock(AS, false);
832
		mutex_unlock(&area->lock);
833
		mutex_unlock(&AS->lock);
834
		goto page_fault;
835
	}
703 jermar 836
 
1044 jermar 837
	page_table_unlock(AS, false);
1380 jermar 838
	mutex_unlock(&area->lock);
839
	mutex_unlock(&AS->lock);
1288 jermar 840
	return AS_PF_OK;
841
 
842
page_fault:
843
	if (THREAD->in_copy_from_uspace) {
844
		THREAD->in_copy_from_uspace = false;
2087 jermar 845
		istate_set_retaddr(istate,
846
		    (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 847
	} else if (THREAD->in_copy_to_uspace) {
848
		THREAD->in_copy_to_uspace = false;
2087 jermar 849
		istate_set_retaddr(istate,
850
		    (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 851
	} else {
852
		return AS_PF_FAULT;
853
	}
854
 
855
	return AS_PF_DEFER;
703 jermar 856
}
857
 
823 jermar 858
/** Switch address spaces.
703 jermar 859
 *
1380 jermar 860
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 861
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 862
 *
823 jermar 863
 * @param old Old address space or NULL.
864
 * @param new New address space.
703 jermar 865
 */
2106 jermar 866
void as_switch(as_t *old_as, as_t *new_as)
703 jermar 867
{
868
	ipl_t ipl;
823 jermar 869
	bool needs_asid = false;
703 jermar 870
 
871
	ipl = interrupts_disable();
1415 jermar 872
	spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 873
 
874
	/*
823 jermar 875
	 * First, take care of the old address space.
876
	 */	
2106 jermar 877
	if (old_as) {
878
		mutex_lock_active(&old_as->lock);
879
		ASSERT(old_as->cpu_refcount);
880
		if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
823 jermar 881
			/*
882
			 * The old address space is no longer active on
883
			 * any processor. It can be appended to the
884
			 * list of inactive address spaces with assigned
885
			 * ASID.
886
			 */
2106 jermar 887
			 ASSERT(old_as->asid != ASID_INVALID);
888
			 list_append(&old_as->inactive_as_with_asid_link,
2087 jermar 889
			     &inactive_as_with_asid_head);
823 jermar 890
		}
2106 jermar 891
		mutex_unlock(&old_as->lock);
1890 jermar 892
 
893
		/*
894
		 * Perform architecture-specific tasks when the address space
895
		 * is being removed from the CPU.
896
		 */
2106 jermar 897
		as_deinstall_arch(old_as);
823 jermar 898
	}
899
 
900
	/*
901
	 * Second, prepare the new address space.
902
	 */
2106 jermar 903
	mutex_lock_active(&new_as->lock);
904
	if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
905
		if (new_as->asid != ASID_INVALID) {
906
			list_remove(&new_as->inactive_as_with_asid_link);
2087 jermar 907
		} else {
908
			/*
2106 jermar 909
			 * Defer call to asid_get() until new_as->lock is released.
2087 jermar 910
			 */
911
			needs_asid = true;
912
		}
823 jermar 913
	}
2106 jermar 914
#ifdef AS_PAGE_TABLE
915
	SET_PTL0_ADDRESS(new_as->genarch.page_table);
916
#endif
917
	mutex_unlock(&new_as->lock);
823 jermar 918
 
919
	if (needs_asid) {
920
		/*
921
		 * Allocation of new ASID was deferred
922
		 * until now in order to avoid deadlock.
923
		 */
924
		asid_t asid;
925
 
926
		asid = asid_get();
2106 jermar 927
		mutex_lock_active(&new_as->lock);
928
		new_as->asid = asid;
929
		mutex_unlock(&new_as->lock);
823 jermar 930
	}
1415 jermar 931
	spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 932
	interrupts_restore(ipl);
933
 
934
	/*
703 jermar 935
	 * Perform architecture-specific steps.
727 jermar 936
	 * (e.g. write ASID to hardware register etc.)
703 jermar 937
	 */
2106 jermar 938
	as_install_arch(new_as);
703 jermar 939
 
2106 jermar 940
	AS = new_as;
703 jermar 941
}
754 jermar 942
 
1235 jermar 943
/** Convert address space area flags to page flags.
754 jermar 944
 *
1235 jermar 945
 * @param aflags Flags of some address space area.
754 jermar 946
 *
1235 jermar 947
 * @return Flags to be passed to page_mapping_insert().
754 jermar 948
 */
1235 jermar 949
int area_flags_to_page_flags(int aflags)
754 jermar 950
{
951
	int flags;
952
 
1178 jermar 953
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 954
 
1235 jermar 955
	if (aflags & AS_AREA_READ)
1026 jermar 956
		flags |= PAGE_READ;
957
 
1235 jermar 958
	if (aflags & AS_AREA_WRITE)
1026 jermar 959
		flags |= PAGE_WRITE;
960
 
1235 jermar 961
	if (aflags & AS_AREA_EXEC)
1026 jermar 962
		flags |= PAGE_EXEC;
963
 
1424 jermar 964
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 965
		flags |= PAGE_CACHEABLE;
966
 
754 jermar 967
	return flags;
968
}
756 jermar 969
 
1235 jermar 970
/** Compute flags for virtual address translation subsytem.
971
 *
972
 * The address space area must be locked.
973
 * Interrupts must be disabled.
974
 *
975
 * @param a Address space area.
976
 *
977
 * @return Flags to be used in page_mapping_insert().
978
 */
1409 jermar 979
int as_area_get_flags(as_area_t *a)
1235 jermar 980
{
981
	return area_flags_to_page_flags(a->flags);
982
}
983
 
756 jermar 984
/** Create page table.
985
 *
986
 * Depending on architecture, create either address space
987
 * private or global page table.
988
 *
989
 * @param flags Flags saying whether the page table is for kernel address space.
990
 *
991
 * @return First entry of the page table.
992
 */
993
pte_t *page_table_create(int flags)
994
{
995
        ASSERT(as_operations);
996
        ASSERT(as_operations->page_table_create);
997
 
998
        return as_operations->page_table_create(flags);
999
}
977 jermar 1000
 
1468 jermar 1001
/** Destroy page table.
1002
 *
1003
 * Destroy page table in architecture specific way.
1004
 *
1005
 * @param page_table Physical address of PTL0.
1006
 */
1007
void page_table_destroy(pte_t *page_table)
1008
{
1009
        ASSERT(as_operations);
1010
        ASSERT(as_operations->page_table_destroy);
1011
 
1012
        as_operations->page_table_destroy(page_table);
1013
}
1014
 
1044 jermar 1015
/** Lock page table.
1016
 *
1017
 * This function should be called before any page_mapping_insert(),
1018
 * page_mapping_remove() and page_mapping_find().
1019
 * 
1020
 * Locking order is such that address space areas must be locked
1021
 * prior to this call. Address space can be locked prior to this
1022
 * call in which case the lock argument is false.
1023
 *
1024
 * @param as Address space.
1248 jermar 1025
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1026
 */
1027
void page_table_lock(as_t *as, bool lock)
1028
{
1029
	ASSERT(as_operations);
1030
	ASSERT(as_operations->page_table_lock);
1031
 
1032
	as_operations->page_table_lock(as, lock);
1033
}
1034
 
1035
/** Unlock page table.
1036
 *
1037
 * @param as Address space.
1248 jermar 1038
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1039
 */
1040
void page_table_unlock(as_t *as, bool unlock)
1041
{
1042
	ASSERT(as_operations);
1043
	ASSERT(as_operations->page_table_unlock);
1044
 
1045
	as_operations->page_table_unlock(as, unlock);
1046
}
1047
 
977 jermar 1048
 
1049
/** Find address space area and lock it.
1050
 *
1051
 * The address space must be locked and interrupts must be disabled.
1052
 *
1053
 * @param as Address space.
1054
 * @param va Virtual address.
1055
 *
2087 jermar 1056
 * @return Locked address space area containing va on success or NULL on
1057
 *     failure.
977 jermar 1058
 */
1780 jermar 1059
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1060
{
1061
	as_area_t *a;
1147 jermar 1062
	btree_node_t *leaf, *lnode;
1063
	int i;
977 jermar 1064
 
1147 jermar 1065
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1066
	if (a) {
1067
		/* va is the base address of an address space area */
1380 jermar 1068
		mutex_lock(&a->lock);
1147 jermar 1069
		return a;
1070
	}
1071
 
1072
	/*
1150 jermar 1073
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1074
	 * to find out whether this is a miss or va belongs to an address
1075
	 * space area found there.
1076
	 */
1077
 
1078
	/* First, search the leaf node itself. */
1079
	for (i = 0; i < leaf->keys; i++) {
1080
		a = (as_area_t *) leaf->value[i];
1380 jermar 1081
		mutex_lock(&a->lock);
1147 jermar 1082
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1083
			return a;
1084
		}
1380 jermar 1085
		mutex_unlock(&a->lock);
1147 jermar 1086
	}
977 jermar 1087
 
1147 jermar 1088
	/*
1150 jermar 1089
	 * Second, locate the left neighbour and test its last record.
1148 jermar 1090
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 1091
	 */
2087 jermar 1092
	lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1093
	if (lnode) {
1147 jermar 1094
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1095
		mutex_lock(&a->lock);
1147 jermar 1096
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1097
			return a;
1147 jermar 1098
		}
1380 jermar 1099
		mutex_unlock(&a->lock);
977 jermar 1100
	}
1101
 
1102
	return NULL;
1103
}
1048 jermar 1104
 
1105
/** Check area conflicts with other areas.
1106
 *
1107
 * The address space must be locked and interrupts must be disabled.
1108
 *
1109
 * @param as Address space.
1110
 * @param va Starting virtual address of the area being tested.
1111
 * @param size Size of the area being tested.
1112
 * @param avoid_area Do not touch this area. 
1113
 *
1114
 * @return True if there is no conflict, false otherwise.
1115
 */
2087 jermar 1116
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1117
			  as_area_t *avoid_area)
1048 jermar 1118
{
1119
	as_area_t *a;
1147 jermar 1120
	btree_node_t *leaf, *node;
1121
	int i;
1048 jermar 1122
 
1070 jermar 1123
	/*
1124
	 * We don't want any area to have conflicts with NULL page.
1125
	 */
1126
	if (overlaps(va, size, NULL, PAGE_SIZE))
1127
		return false;
1128
 
1147 jermar 1129
	/*
1130
	 * The leaf node is found in O(log n), where n is proportional to
1131
	 * the number of address space areas belonging to as.
1132
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1133
	 * record in the left neighbour, the leftmost record in the right
1134
	 * neighbour and all records in the leaf node itself.
1147 jermar 1135
	 */
1048 jermar 1136
 
1147 jermar 1137
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1138
		if (a != avoid_area)
1139
			return false;
1140
	}
1141
 
1142
	/* First, check the two border cases. */
1150 jermar 1143
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1144
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1145
		mutex_lock(&a->lock);
1147 jermar 1146
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1147
			mutex_unlock(&a->lock);
1147 jermar 1148
			return false;
1149
		}
1380 jermar 1150
		mutex_unlock(&a->lock);
1147 jermar 1151
	}
2087 jermar 1152
	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1153
	if (node) {
1147 jermar 1154
		a = (as_area_t *) node->value[0];
1380 jermar 1155
		mutex_lock(&a->lock);
1147 jermar 1156
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1157
			mutex_unlock(&a->lock);
1147 jermar 1158
			return false;
1159
		}
1380 jermar 1160
		mutex_unlock(&a->lock);
1147 jermar 1161
	}
1162
 
1163
	/* Second, check the leaf node. */
1164
	for (i = 0; i < leaf->keys; i++) {
1165
		a = (as_area_t *) leaf->value[i];
1166
 
1048 jermar 1167
		if (a == avoid_area)
1168
			continue;
1147 jermar 1169
 
1380 jermar 1170
		mutex_lock(&a->lock);
1147 jermar 1171
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1172
			mutex_unlock(&a->lock);
1147 jermar 1173
			return false;
1174
		}
1380 jermar 1175
		mutex_unlock(&a->lock);
1048 jermar 1176
	}
1177
 
1070 jermar 1178
	/*
1179
	 * So far, the area does not conflict with other areas.
1180
	 * Check if it doesn't conflict with kernel address space.
1181
	 */	 
1182
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1183
		return !overlaps(va, size, 
2087 jermar 1184
		    KERNEL_ADDRESS_SPACE_START,
1185
		    KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1186
	}
1187
 
1048 jermar 1188
	return true;
1189
}
1235 jermar 1190
 
1380 jermar 1191
/** Return size of the address space area with given base.  */
1780 jermar 1192
size_t as_get_size(uintptr_t base)
1329 palkovsky 1193
{
1194
	ipl_t ipl;
1195
	as_area_t *src_area;
1196
	size_t size;
1197
 
1198
	ipl = interrupts_disable();
1199
	src_area = find_area_and_lock(AS, base);
1200
	if (src_area){
1201
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1202
		mutex_unlock(&src_area->lock);
1329 palkovsky 1203
	} else {
1204
		size = 0;
1205
	}
1206
	interrupts_restore(ipl);
1207
	return size;
1208
}
1209
 
1387 jermar 1210
/** Mark portion of address space area as used.
1211
 *
1212
 * The address space area must be already locked.
1213
 *
1214
 * @param a Address space area.
1215
 * @param page First page to be marked.
1216
 * @param count Number of page to be marked.
1217
 *
1218
 * @return 0 on failure and 1 on success.
1219
 */
1780 jermar 1220
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1221
{
1222
	btree_node_t *leaf, *node;
1223
	count_t pages;
1224
	int i;
1225
 
1226
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1227
	ASSERT(count);
1228
 
1229
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1230
	if (pages) {
1231
		/*
1232
		 * We hit the beginning of some used space.
1233
		 */
1234
		return 0;
1235
	}
1236
 
1437 jermar 1237
	if (!leaf->keys) {
1238
		btree_insert(&a->used_space, page, (void *) count, leaf);
1239
		return 1;
1240
	}
1241
 
1387 jermar 1242
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1243
	if (node) {
2087 jermar 1244
		uintptr_t left_pg = node->key[node->keys - 1];
1245
		uintptr_t right_pg = leaf->key[0];
1246
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1247
		count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1248
 
1249
		/*
1250
		 * Examine the possibility that the interval fits
1251
		 * somewhere between the rightmost interval of
1252
		 * the left neigbour and the first interval of the leaf.
1253
		 */
1254
 
1255
		if (page >= right_pg) {
1256
			/* Do nothing. */
2087 jermar 1257
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1258
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1259
			/* The interval intersects with the left interval. */
1260
			return 0;
2087 jermar 1261
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1262
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1263
			/* The interval intersects with the right interval. */
1264
			return 0;			
2087 jermar 1265
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1266
		    (page + count * PAGE_SIZE == right_pg)) {
1267
			/*
1268
			 * The interval can be added by merging the two already
1269
			 * present intervals.
1270
			 */
1403 jermar 1271
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1272
			btree_remove(&a->used_space, right_pg, leaf);
1273
			return 1; 
2087 jermar 1274
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1275
			/* 
1276
			 * The interval can be added by simply growing the left
1277
			 * interval.
1278
			 */
1403 jermar 1279
			node->value[node->keys - 1] += count;
1387 jermar 1280
			return 1;
2087 jermar 1281
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1282
			/*
2087 jermar 1283
			 * The interval can be addded by simply moving base of
1284
			 * the right interval down and increasing its size
1285
			 * accordingly.
1387 jermar 1286
			 */
1403 jermar 1287
			leaf->value[0] += count;
1387 jermar 1288
			leaf->key[0] = page;
1289
			return 1;
1290
		} else {
1291
			/*
1292
			 * The interval is between both neigbouring intervals,
1293
			 * but cannot be merged with any of them.
1294
			 */
2087 jermar 1295
			btree_insert(&a->used_space, page, (void *) count,
1296
			    leaf);
1387 jermar 1297
			return 1;
1298
		}
1299
	} else if (page < leaf->key[0]) {
1780 jermar 1300
		uintptr_t right_pg = leaf->key[0];
1387 jermar 1301
		count_t right_cnt = (count_t) leaf->value[0];
1302
 
1303
		/*
2087 jermar 1304
		 * Investigate the border case in which the left neighbour does
1305
		 * not exist but the interval fits from the left.
1387 jermar 1306
		 */
1307
 
2087 jermar 1308
		if (overlaps(page, count * PAGE_SIZE, right_pg,
1309
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1310
			/* The interval intersects with the right interval. */
1311
			return 0;
2087 jermar 1312
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1313
			/*
2087 jermar 1314
			 * The interval can be added by moving the base of the
1315
			 * right interval down and increasing its size
1316
			 * accordingly.
1387 jermar 1317
			 */
1318
			leaf->key[0] = page;
1403 jermar 1319
			leaf->value[0] += count;
1387 jermar 1320
			return 1;
1321
		} else {
1322
			/*
1323
			 * The interval doesn't adjoin with the right interval.
1324
			 * It must be added individually.
1325
			 */
2087 jermar 1326
			btree_insert(&a->used_space, page, (void *) count,
1327
			    leaf);
1387 jermar 1328
			return 1;
1329
		}
1330
	}
1331
 
1332
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1333
	if (node) {
2087 jermar 1334
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1335
		uintptr_t right_pg = node->key[0];
1336
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1337
		count_t right_cnt = (count_t) node->value[0];
1387 jermar 1338
 
1339
		/*
1340
		 * Examine the possibility that the interval fits
1341
		 * somewhere between the leftmost interval of
1342
		 * the right neigbour and the last interval of the leaf.
1343
		 */
1344
 
1345
		if (page < left_pg) {
1346
			/* Do nothing. */
2087 jermar 1347
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1348
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1349
			/* The interval intersects with the left interval. */
1350
			return 0;
2087 jermar 1351
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1352
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1353
			/* The interval intersects with the right interval. */
1354
			return 0;			
2087 jermar 1355
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1356
		    (page + count * PAGE_SIZE == right_pg)) {
1357
			/*
1358
			 * The interval can be added by merging the two already
1359
			 * present intervals.
1360
			 * */
1403 jermar 1361
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1362
			btree_remove(&a->used_space, right_pg, node);
1363
			return 1; 
2087 jermar 1364
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1365
			/*
1366
			 * The interval can be added by simply growing the left
1367
			 * interval.
1368
			 * */
1403 jermar 1369
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1370
			return 1;
2087 jermar 1371
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1372
			/*
2087 jermar 1373
			 * The interval can be addded by simply moving base of
1374
			 * the right interval down and increasing its size
1375
			 * accordingly.
1387 jermar 1376
			 */
1403 jermar 1377
			node->value[0] += count;
1387 jermar 1378
			node->key[0] = page;
1379
			return 1;
1380
		} else {
1381
			/*
1382
			 * The interval is between both neigbouring intervals,
1383
			 * but cannot be merged with any of them.
1384
			 */
2087 jermar 1385
			btree_insert(&a->used_space, page, (void *) count,
1386
			    leaf);
1387 jermar 1387
			return 1;
1388
		}
1389
	} else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1390
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1391
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1392
 
1393
		/*
2087 jermar 1394
		 * Investigate the border case in which the right neighbour
1395
		 * does not exist but the interval fits from the right.
1387 jermar 1396
		 */
1397
 
2087 jermar 1398
		if (overlaps(page, count * PAGE_SIZE, left_pg,
1399
		    left_cnt * PAGE_SIZE)) {
1403 jermar 1400
			/* The interval intersects with the left interval. */
1387 jermar 1401
			return 0;
2087 jermar 1402
		} else if (left_pg + left_cnt * PAGE_SIZE == page) {
1403
			/*
1404
			 * The interval can be added by growing the left
1405
			 * interval.
1406
			 */
1403 jermar 1407
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1408
			return 1;
1409
		} else {
1410
			/*
1411
			 * The interval doesn't adjoin with the left interval.
1412
			 * It must be added individually.
1413
			 */
2087 jermar 1414
			btree_insert(&a->used_space, page, (void *) count,
1415
			    leaf);
1387 jermar 1416
			return 1;
1417
		}
1418
	}
1419
 
1420
	/*
2087 jermar 1421
	 * Note that if the algorithm made it thus far, the interval can fit
1422
	 * only between two other intervals of the leaf. The two border cases
1423
	 * were already resolved.
1387 jermar 1424
	 */
1425
	for (i = 1; i < leaf->keys; i++) {
1426
		if (page < leaf->key[i]) {
2087 jermar 1427
			uintptr_t left_pg = leaf->key[i - 1];
1428
			uintptr_t right_pg = leaf->key[i];
1429
			count_t left_cnt = (count_t) leaf->value[i - 1];
1430
			count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1431
 
1432
			/*
1433
			 * The interval fits between left_pg and right_pg.
1434
			 */
1435
 
2087 jermar 1436
			if (overlaps(page, count * PAGE_SIZE, left_pg,
1437
			    left_cnt * PAGE_SIZE)) {
1438
				/*
1439
				 * The interval intersects with the left
1440
				 * interval.
1441
				 */
1387 jermar 1442
				return 0;
2087 jermar 1443
			} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1444
			    right_cnt * PAGE_SIZE)) {
1445
				/*
1446
				 * The interval intersects with the right
1447
				 * interval.
1448
				 */
1387 jermar 1449
				return 0;			
2087 jermar 1450
			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1451
			    (page + count * PAGE_SIZE == right_pg)) {
1452
				/*
1453
				 * The interval can be added by merging the two
1454
				 * already present intervals.
1455
				 */
1403 jermar 1456
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1457
				btree_remove(&a->used_space, right_pg, leaf);
1458
				return 1; 
2087 jermar 1459
			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1460
				/*
1461
				 * The interval can be added by simply growing
1462
				 * the left interval.
1463
				 */
1403 jermar 1464
				leaf->value[i - 1] += count;
1387 jermar 1465
				return 1;
2087 jermar 1466
			} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1467
				/*
2087 jermar 1468
			         * The interval can be addded by simply moving
1469
				 * base of the right interval down and
1470
				 * increasing its size accordingly.
1387 jermar 1471
			 	 */
1403 jermar 1472
				leaf->value[i] += count;
1387 jermar 1473
				leaf->key[i] = page;
1474
				return 1;
1475
			} else {
1476
				/*
2087 jermar 1477
				 * The interval is between both neigbouring
1478
				 * intervals, but cannot be merged with any of
1479
				 * them.
1387 jermar 1480
				 */
2087 jermar 1481
				btree_insert(&a->used_space, page,
1482
				    (void *) count, leaf);
1387 jermar 1483
				return 1;
1484
			}
1485
		}
1486
	}
1487
 
2087 jermar 1488
	panic("Inconsistency detected while adding %d pages of used space at "
1489
	    "%p.\n", count, page);
1387 jermar 1490
}
1491
 
1492
/** Mark portion of address space area as unused.
1493
 *
1494
 * The address space area must be already locked.
1495
 *
1496
 * @param a Address space area.
1497
 * @param page First page to be marked.
1498
 * @param count Number of page to be marked.
1499
 *
1500
 * @return 0 on failure and 1 on success.
1501
 */
1780 jermar 1502
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1503
{
1504
	btree_node_t *leaf, *node;
1505
	count_t pages;
1506
	int i;
1507
 
1508
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1509
	ASSERT(count);
1510
 
1511
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1512
	if (pages) {
1513
		/*
1514
		 * We are lucky, page is the beginning of some interval.
1515
		 */
1516
		if (count > pages) {
1517
			return 0;
1518
		} else if (count == pages) {
1519
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1520
			return 1;
1387 jermar 1521
		} else {
1522
			/*
1523
			 * Find the respective interval.
1524
			 * Decrease its size and relocate its start address.
1525
			 */
1526
			for (i = 0; i < leaf->keys; i++) {
1527
				if (leaf->key[i] == page) {
2087 jermar 1528
					leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1529
					leaf->value[i] -= count;
1387 jermar 1530
					return 1;
1531
				}
1532
			}
1533
			goto error;
1534
		}
1535
	}
1536
 
1537
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1538
	if (node && page < leaf->key[0]) {
1780 jermar 1539
		uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1540
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1541
 
2087 jermar 1542
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1543
		    count * PAGE_SIZE)) {
1544
			if (page + count * PAGE_SIZE ==
1545
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1546
				/*
2087 jermar 1547
				 * The interval is contained in the rightmost
1548
				 * interval of the left neighbour and can be
1549
				 * removed by updating the size of the bigger
1550
				 * interval.
1387 jermar 1551
				 */
1403 jermar 1552
				node->value[node->keys - 1] -= count;
1387 jermar 1553
				return 1;
2087 jermar 1554
			} else if (page + count * PAGE_SIZE <
1555
			    left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1556
				count_t new_cnt;
1387 jermar 1557
 
1558
				/*
2087 jermar 1559
				 * The interval is contained in the rightmost
1560
				 * interval of the left neighbour but its
1561
				 * removal requires both updating the size of
1562
				 * the original interval and also inserting a
1563
				 * new interval.
1387 jermar 1564
				 */
2087 jermar 1565
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1566
				    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1567
				node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1568
				btree_insert(&a->used_space, page +
1569
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1570
				return 1;
1571
			}
1572
		}
1573
		return 0;
1574
	} else if (page < leaf->key[0]) {
1575
		return 0;
1576
	}
1577
 
1578
	if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1579
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1580
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1581
 
2087 jermar 1582
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1583
		    count * PAGE_SIZE)) {
1584
			if (page + count * PAGE_SIZE == 
1585
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1586
				/*
2087 jermar 1587
				 * The interval is contained in the rightmost
1588
				 * interval of the leaf and can be removed by
1589
				 * updating the size of the bigger interval.
1387 jermar 1590
				 */
1403 jermar 1591
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1592
				return 1;
2087 jermar 1593
			} else if (page + count * PAGE_SIZE < left_pg +
1594
			    left_cnt * PAGE_SIZE) {
1403 jermar 1595
				count_t new_cnt;
1387 jermar 1596
 
1597
				/*
2087 jermar 1598
				 * The interval is contained in the rightmost
1599
				 * interval of the leaf but its removal
1600
				 * requires both updating the size of the
1601
				 * original interval and also inserting a new
1602
				 * interval.
1387 jermar 1603
				 */
2087 jermar 1604
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1605
				    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1606
				leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1607
				btree_insert(&a->used_space, page +
1608
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1609
				return 1;
1610
			}
1611
		}
1612
		return 0;
1613
	}	
1614
 
1615
	/*
1616
	 * The border cases have been already resolved.
1617
	 * Now the interval can be only between intervals of the leaf. 
1618
	 */
1619
	for (i = 1; i < leaf->keys - 1; i++) {
1620
		if (page < leaf->key[i]) {
1780 jermar 1621
			uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1622
			count_t left_cnt = (count_t) leaf->value[i - 1];
1623
 
1624
			/*
2087 jermar 1625
			 * Now the interval is between intervals corresponding
1626
			 * to (i - 1) and i.
1387 jermar 1627
			 */
2087 jermar 1628
			if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1629
			    count * PAGE_SIZE)) {
1630
				if (page + count * PAGE_SIZE ==
1631
				    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1632
					/*
2087 jermar 1633
					 * The interval is contained in the
1634
					 * interval (i - 1) of the leaf and can
1635
					 * be removed by updating the size of
1636
					 * the bigger interval.
1387 jermar 1637
					 */
1403 jermar 1638
					leaf->value[i - 1] -= count;
1387 jermar 1639
					return 1;
2087 jermar 1640
				} else if (page + count * PAGE_SIZE <
1641
				    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1642
					count_t new_cnt;
1387 jermar 1643
 
1644
					/*
2087 jermar 1645
					 * The interval is contained in the
1646
					 * interval (i - 1) of the leaf but its
1647
					 * removal requires both updating the
1648
					 * size of the original interval and
1387 jermar 1649
					 * also inserting a new interval.
1650
					 */
2087 jermar 1651
					new_cnt = ((left_pg +
1652
					    left_cnt * PAGE_SIZE) -
1653
					    (page + count * PAGE_SIZE)) >>
1654
					    PAGE_WIDTH;
1403 jermar 1655
					leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1656
					btree_insert(&a->used_space, page +
1657
					    count * PAGE_SIZE, (void *) new_cnt,
1658
					    leaf);
1387 jermar 1659
					return 1;
1660
				}
1661
			}
1662
			return 0;
1663
		}
1664
	}
1665
 
1666
error:
2087 jermar 1667
	panic("Inconsistency detected while removing %d pages of used space "
1668
	    "from %p.\n", count, page);
1387 jermar 1669
}
1670
 
1409 jermar 1671
/** Remove reference to address space area share info.
1672
 *
1673
 * If the reference count drops to 0, the sh_info is deallocated.
1674
 *
1675
 * @param sh_info Pointer to address space area share info.
1676
 */
1677
void sh_info_remove_reference(share_info_t *sh_info)
1678
{
1679
	bool dealloc = false;
1680
 
1681
	mutex_lock(&sh_info->lock);
1682
	ASSERT(sh_info->refcount);
1683
	if (--sh_info->refcount == 0) {
1684
		dealloc = true;
1495 jermar 1685
		link_t *cur;
1409 jermar 1686
 
1687
		/*
1688
		 * Now walk carefully the pagemap B+tree and free/remove
1689
		 * reference from all frames found there.
1690
		 */
2087 jermar 1691
		for (cur = sh_info->pagemap.leaf_head.next;
1692
		    cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1693
			btree_node_t *node;
1495 jermar 1694
			int i;
1409 jermar 1695
 
1495 jermar 1696
			node = list_get_instance(cur, btree_node_t, leaf_link);
1697
			for (i = 0; i < node->keys; i++) 
1780 jermar 1698
				frame_free((uintptr_t) node->value[i]);
1409 jermar 1699
		}
1700
 
1701
	}
1702
	mutex_unlock(&sh_info->lock);
1703
 
1704
	if (dealloc) {
1705
		btree_destroy(&sh_info->pagemap);
1706
		free(sh_info);
1707
	}
1708
}
1709
 
1235 jermar 1710
/*
1711
 * Address space related syscalls.
1712
 */
1713
 
1714
/** Wrapper for as_area_create(). */
1780 jermar 1715
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1716
{
2087 jermar 1717
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1718
	    AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1719
		return (unative_t) address;
1235 jermar 1720
	else
1780 jermar 1721
		return (unative_t) -1;
1235 jermar 1722
}
1723
 
1793 jermar 1724
/** Wrapper for as_area_resize(). */
1780 jermar 1725
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1726
{
1780 jermar 1727
	return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1728
}
1729
 
1793 jermar 1730
/** Wrapper for as_area_destroy(). */
1780 jermar 1731
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1732
{
1780 jermar 1733
	return (unative_t) as_area_destroy(AS, address);
1306 jermar 1734
}
1702 cejka 1735
 
1914 jermar 1736
/** Print out information about address space.
1737
 *
1738
 * @param as Address space.
1739
 */
1740
void as_print(as_t *as)
1741
{
1742
	ipl_t ipl;
1743
 
1744
	ipl = interrupts_disable();
1745
	mutex_lock(&as->lock);
1746
 
1747
	/* print out info about address space areas */
1748
	link_t *cur;
2087 jermar 1749
	for (cur = as->as_area_btree.leaf_head.next;
1750
	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1751
		btree_node_t *node;
1914 jermar 1752
 
2087 jermar 1753
		node = list_get_instance(cur, btree_node_t, leaf_link);
1754
 
1914 jermar 1755
		int i;
1756
		for (i = 0; i < node->keys; i++) {
1915 jermar 1757
			as_area_t *area = node->value[i];
1914 jermar 1758
 
1759
			mutex_lock(&area->lock);
1760
			printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
2087 jermar 1761
			    area, area->base, area->pages, area->base,
1762
			    area->base + area->pages*PAGE_SIZE);
1914 jermar 1763
			mutex_unlock(&area->lock);
1764
		}
1765
	}
1766
 
1767
	mutex_unlock(&as->lock);
1768
	interrupts_restore(ipl);
1769
}
1770
 
1757 jermar 1771
/** @}
1702 cejka 1772
 */