Subversion Repositories HelenOS

Rev

Rev 2071 | Rev 2089 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2071 jermar 2
 * Copyright (c) 2001-2006 Jakub Jermar
703 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1757 jermar 29
/** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
1757 jermar 42
 * create address spaces and create, resize and share
1248 jermar 43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
77
#include <typedefs.h>
1288 jermar 78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
703 jermar 80
 
2009 jermar 81
#ifdef CONFIG_VIRT_IDX_DCACHE
82
#include <arch/mm/cache.h>
83
#endif /* CONFIG_VIRT_IDX_DCACHE */
84
 
1757 jermar 85
/**
86
 * Each architecture decides what functions will be used to carry out
87
 * address space operations such as creating or locking page tables.
88
 */
756 jermar 89
as_operations_t *as_operations = NULL;
703 jermar 90
 
1890 jermar 91
/**
92
 * Slab for as_t objects.
93
 */
94
static slab_cache_t *as_slab;
95
 
2087 jermar 96
/**
97
 * This lock protects inactive_as_with_asid_head list. It must be acquired
98
 * before as_t mutex.
99
 */
1415 jermar 100
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 101
 
102
/**
103
 * This list contains address spaces that are not active on any
104
 * processor and that have valid ASID.
105
 */
106
LIST_INITIALIZE(inactive_as_with_asid_head);
107
 
757 jermar 108
/** Kernel address space. */
109
as_t *AS_KERNEL = NULL;
110
 
1235 jermar 111
static int area_flags_to_page_flags(int aflags);
1780 jermar 112
static as_area_t *find_area_and_lock(as_t *as, uintptr_t va);
2087 jermar 113
static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
114
    as_area_t *avoid_area);
1409 jermar 115
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 116
 
1891 jermar 117
static int as_constructor(void *obj, int flags)
118
{
119
	as_t *as = (as_t *) obj;
120
	int rc;
121
 
122
	link_initialize(&as->inactive_as_with_asid_link);
123
	mutex_initialize(&as->lock);	
124
 
125
	rc = as_constructor_arch(as, flags);
126
 
127
	return rc;
128
}
129
 
130
static int as_destructor(void *obj)
131
{
132
	as_t *as = (as_t *) obj;
133
 
134
	return as_destructor_arch(as);
135
}
136
 
756 jermar 137
/** Initialize address space subsystem. */
138
void as_init(void)
139
{
140
	as_arch_init();
1890 jermar 141
 
1891 jermar 142
	as_slab = slab_cache_create("as_slab", sizeof(as_t), 0,
2087 jermar 143
	    as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED);
1890 jermar 144
 
789 palkovsky 145
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 146
	if (!AS_KERNEL)
147
		panic("can't create kernel address space\n");
148
 
756 jermar 149
}
150
 
757 jermar 151
/** Create address space.
152
 *
153
 * @param flags Flags that influence way in wich the address space is created.
154
 */
756 jermar 155
as_t *as_create(int flags)
703 jermar 156
{
157
	as_t *as;
158
 
1890 jermar 159
	as = (as_t *) slab_alloc(as_slab, 0);
1891 jermar 160
	(void) as_create_arch(as, 0);
161
 
1147 jermar 162
	btree_create(&as->as_area_btree);
822 palkovsky 163
 
164
	if (flags & FLAG_AS_KERNEL)
165
		as->asid = ASID_KERNEL;
166
	else
167
		as->asid = ASID_INVALID;
168
 
1468 jermar 169
	as->refcount = 0;
1415 jermar 170
	as->cpu_refcount = 0;
822 palkovsky 171
	as->page_table = page_table_create(flags);
703 jermar 172
 
173
	return as;
174
}
175
 
1468 jermar 176
/** Destroy adress space.
177
 *
2087 jermar 178
 * When there are no tasks referencing this address space (i.e. its refcount is
179
 * zero), the address space can be destroyed.
1468 jermar 180
 */
181
void as_destroy(as_t *as)
973 palkovsky 182
{
1468 jermar 183
	ipl_t ipl;
1594 jermar 184
	bool cond;
973 palkovsky 185
 
1468 jermar 186
	ASSERT(as->refcount == 0);
187
 
188
	/*
189
	 * Since there is no reference to this area,
190
	 * it is safe not to lock its mutex.
191
	 */
192
	ipl = interrupts_disable();
193
	spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 194
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 195
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 196
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 197
		asid_put(as->asid);
198
	}
199
	spinlock_unlock(&inactive_as_with_asid_lock);
200
 
201
	/*
202
	 * Destroy address space areas of the address space.
1954 jermar 203
	 * The B+tree must be walked carefully because it is
1594 jermar 204
	 * also being destroyed.
1468 jermar 205
	 */	
1594 jermar 206
	for (cond = true; cond; ) {
1468 jermar 207
		btree_node_t *node;
1594 jermar 208
 
209
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
2087 jermar 210
		node = list_get_instance(as->as_area_btree.leaf_head.next,
211
		    btree_node_t, leaf_link);
1594 jermar 212
 
213
		if ((cond = node->keys)) {
214
			as_area_destroy(as, node->key[0]);
215
		}
1468 jermar 216
	}
1495 jermar 217
 
1483 jermar 218
	btree_destroy(&as->as_area_btree);
1468 jermar 219
	page_table_destroy(as->page_table);
220
 
221
	interrupts_restore(ipl);
222
 
1890 jermar 223
	slab_free(as_slab, as);
973 palkovsky 224
}
225
 
703 jermar 226
/** Create address space area of common attributes.
227
 *
228
 * The created address space area is added to the target address space.
229
 *
230
 * @param as Target address space.
1239 jermar 231
 * @param flags Flags of the area memory.
1048 jermar 232
 * @param size Size of area.
703 jermar 233
 * @param base Base address of area.
1239 jermar 234
 * @param attrs Attributes of the area.
1409 jermar 235
 * @param backend Address space area backend. NULL if no backend is used.
236
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 237
 *
238
 * @return Address space area on success or NULL on failure.
239
 */
2069 jermar 240
as_area_t *
241
as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
1424 jermar 242
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 243
{
244
	ipl_t ipl;
245
	as_area_t *a;
246
 
247
	if (base % PAGE_SIZE)
1048 jermar 248
		return NULL;
249
 
1233 jermar 250
	if (!size)
251
		return NULL;
252
 
1048 jermar 253
	/* Writeable executable areas are not supported. */
254
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
255
		return NULL;
703 jermar 256
 
257
	ipl = interrupts_disable();
1380 jermar 258
	mutex_lock(&as->lock);
703 jermar 259
 
1048 jermar 260
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 261
		mutex_unlock(&as->lock);
1048 jermar 262
		interrupts_restore(ipl);
263
		return NULL;
264
	}
703 jermar 265
 
822 palkovsky 266
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 267
 
1380 jermar 268
	mutex_initialize(&a->lock);
822 palkovsky 269
 
1424 jermar 270
	a->as = as;
1026 jermar 271
	a->flags = flags;
1239 jermar 272
	a->attributes = attrs;
1048 jermar 273
	a->pages = SIZE2FRAMES(size);
822 palkovsky 274
	a->base = base;
1409 jermar 275
	a->sh_info = NULL;
276
	a->backend = backend;
1424 jermar 277
	if (backend_data)
278
		a->backend_data = *backend_data;
279
	else
2087 jermar 280
		memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
281
		    0);
1424 jermar 282
 
1387 jermar 283
	btree_create(&a->used_space);
822 palkovsky 284
 
1147 jermar 285
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 286
 
1380 jermar 287
	mutex_unlock(&as->lock);
703 jermar 288
	interrupts_restore(ipl);
704 jermar 289
 
703 jermar 290
	return a;
291
}
292
 
1235 jermar 293
/** Find address space area and change it.
294
 *
295
 * @param as Address space.
2087 jermar 296
 * @param address Virtual address belonging to the area to be changed. Must be
297
 *     page-aligned.
1235 jermar 298
 * @param size New size of the virtual memory block starting at address. 
299
 * @param flags Flags influencing the remap operation. Currently unused.
300
 *
1306 jermar 301
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 302
 */ 
1780 jermar 303
int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags)
1235 jermar 304
{
1306 jermar 305
	as_area_t *area;
1235 jermar 306
	ipl_t ipl;
307
	size_t pages;
308
 
309
	ipl = interrupts_disable();
1380 jermar 310
	mutex_lock(&as->lock);
1235 jermar 311
 
312
	/*
313
	 * Locate the area.
314
	 */
315
	area = find_area_and_lock(as, address);
316
	if (!area) {
1380 jermar 317
		mutex_unlock(&as->lock);
1235 jermar 318
		interrupts_restore(ipl);
1306 jermar 319
		return ENOENT;
1235 jermar 320
	}
321
 
1424 jermar 322
	if (area->backend == &phys_backend) {
1235 jermar 323
		/*
324
		 * Remapping of address space areas associated
325
		 * with memory mapped devices is not supported.
326
		 */
1380 jermar 327
		mutex_unlock(&area->lock);
328
		mutex_unlock(&as->lock);
1235 jermar 329
		interrupts_restore(ipl);
1306 jermar 330
		return ENOTSUP;
1235 jermar 331
	}
1409 jermar 332
	if (area->sh_info) {
333
		/*
334
		 * Remapping of shared address space areas 
335
		 * is not supported.
336
		 */
337
		mutex_unlock(&area->lock);
338
		mutex_unlock(&as->lock);
339
		interrupts_restore(ipl);
340
		return ENOTSUP;
341
	}
1235 jermar 342
 
343
	pages = SIZE2FRAMES((address - area->base) + size);
344
	if (!pages) {
345
		/*
346
		 * Zero size address space areas are not allowed.
347
		 */
1380 jermar 348
		mutex_unlock(&area->lock);
349
		mutex_unlock(&as->lock);
1235 jermar 350
		interrupts_restore(ipl);
1306 jermar 351
		return EPERM;
1235 jermar 352
	}
353
 
354
	if (pages < area->pages) {
1403 jermar 355
		bool cond;
1780 jermar 356
		uintptr_t start_free = area->base + pages*PAGE_SIZE;
1235 jermar 357
 
358
		/*
359
		 * Shrinking the area.
360
		 * No need to check for overlaps.
361
		 */
1403 jermar 362
 
363
		/*
1436 jermar 364
		 * Start TLB shootdown sequence.
365
		 */
2087 jermar 366
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base +
367
		    pages * PAGE_SIZE, area->pages - pages);
1436 jermar 368
 
369
		/*
1403 jermar 370
		 * Remove frames belonging to used space starting from
371
		 * the highest addresses downwards until an overlap with
372
		 * the resized address space area is found. Note that this
373
		 * is also the right way to remove part of the used_space
374
		 * B+tree leaf list.
375
		 */		
376
		for (cond = true; cond;) {
377
			btree_node_t *node;
378
 
379
			ASSERT(!list_empty(&area->used_space.leaf_head));
2087 jermar 380
			node = 
381
			    list_get_instance(area->used_space.leaf_head.prev,
382
			    btree_node_t, leaf_link);
1403 jermar 383
			if ((cond = (bool) node->keys)) {
1780 jermar 384
				uintptr_t b = node->key[node->keys - 1];
2087 jermar 385
				count_t c =
386
				    (count_t) node->value[node->keys - 1];
1403 jermar 387
				int i = 0;
1235 jermar 388
 
2087 jermar 389
				if (overlaps(b, c * PAGE_SIZE, area->base,
390
				    pages*PAGE_SIZE)) {
1403 jermar 391
 
2087 jermar 392
					if (b + c * PAGE_SIZE <= start_free) {
1403 jermar 393
						/*
2087 jermar 394
						 * The whole interval fits
395
						 * completely in the resized
396
						 * address space area.
1403 jermar 397
						 */
398
						break;
399
					}
400
 
401
					/*
2087 jermar 402
					 * Part of the interval corresponding
403
					 * to b and c overlaps with the resized
404
					 * address space area.
1403 jermar 405
					 */
406
 
407
					cond = false;	/* we are almost done */
408
					i = (start_free - b) >> PAGE_WIDTH;
2087 jermar 409
					if (!used_space_remove(area, start_free,
410
					    c - i))
411
						panic("Could not remove used "
412
						    "space.\n");
1403 jermar 413
				} else {
414
					/*
2087 jermar 415
					 * The interval of used space can be
416
					 * completely removed.
1403 jermar 417
					 */
418
					if (!used_space_remove(area, b, c))
2087 jermar 419
						panic("Could not remove used "
420
						    "space.\n");
1403 jermar 421
				}
422
 
423
				for (; i < c; i++) {
424
					pte_t *pte;
425
 
426
					page_table_lock(as, false);
2087 jermar 427
					pte = page_mapping_find(as, b +
428
					    i * PAGE_SIZE);
429
					ASSERT(pte && PTE_VALID(pte) &&
430
					    PTE_PRESENT(pte));
431
					if (area->backend &&
432
					    area->backend->frame_free) {
1424 jermar 433
						area->backend->frame_free(area,
2087 jermar 434
						    b + i * PAGE_SIZE,
435
						    PTE_GET_FRAME(pte));
1409 jermar 436
					}
2087 jermar 437
					page_mapping_remove(as, b +
438
					    i * PAGE_SIZE);
1403 jermar 439
					page_table_unlock(as, false);
440
				}
1235 jermar 441
			}
442
		}
1436 jermar 443
 
1235 jermar 444
		/*
1436 jermar 445
		 * Finish TLB shootdown sequence.
1235 jermar 446
		 */
2087 jermar 447
		tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
448
		    area->pages - pages);
1235 jermar 449
		tlb_shootdown_finalize();
1889 jermar 450
 
451
		/*
452
		 * Invalidate software translation caches (e.g. TSB on sparc64).
453
		 */
2087 jermar 454
		as_invalidate_translation_cache(as, area->base +
455
		    pages * PAGE_SIZE, area->pages - pages);
1235 jermar 456
	} else {
457
		/*
458
		 * Growing the area.
459
		 * Check for overlaps with other address space areas.
460
		 */
2087 jermar 461
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE,
462
		    area)) {
1380 jermar 463
			mutex_unlock(&area->lock);
464
			mutex_unlock(&as->lock);		
1235 jermar 465
			interrupts_restore(ipl);
1306 jermar 466
			return EADDRNOTAVAIL;
1235 jermar 467
		}
468
	} 
469
 
470
	area->pages = pages;
471
 
1380 jermar 472
	mutex_unlock(&area->lock);
473
	mutex_unlock(&as->lock);
1235 jermar 474
	interrupts_restore(ipl);
475
 
1306 jermar 476
	return 0;
1235 jermar 477
}
478
 
1306 jermar 479
/** Destroy address space area.
480
 *
481
 * @param as Address space.
482
 * @param address Address withing the area to be deleted.
483
 *
484
 * @return Zero on success or a value from @ref errno.h on failure. 
485
 */
1780 jermar 486
int as_area_destroy(as_t *as, uintptr_t address)
1306 jermar 487
{
488
	as_area_t *area;
1780 jermar 489
	uintptr_t base;
1495 jermar 490
	link_t *cur;
1306 jermar 491
	ipl_t ipl;
492
 
493
	ipl = interrupts_disable();
1380 jermar 494
	mutex_lock(&as->lock);
1306 jermar 495
 
496
	area = find_area_and_lock(as, address);
497
	if (!area) {
1380 jermar 498
		mutex_unlock(&as->lock);
1306 jermar 499
		interrupts_restore(ipl);
500
		return ENOENT;
501
	}
502
 
1403 jermar 503
	base = area->base;
504
 
1411 jermar 505
	/*
1436 jermar 506
	 * Start TLB shootdown sequence.
507
	 */
1889 jermar 508
	tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages);
1436 jermar 509
 
510
	/*
1411 jermar 511
	 * Visit only the pages mapped by used_space B+tree.
512
	 */
2087 jermar 513
	for (cur = area->used_space.leaf_head.next;
514
	    cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 515
		btree_node_t *node;
1495 jermar 516
		int i;
1403 jermar 517
 
1495 jermar 518
		node = list_get_instance(cur, btree_node_t, leaf_link);
519
		for (i = 0; i < node->keys; i++) {
1780 jermar 520
			uintptr_t b = node->key[i];
1495 jermar 521
			count_t j;
1411 jermar 522
			pte_t *pte;
1403 jermar 523
 
1495 jermar 524
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 525
				page_table_lock(as, false);
2087 jermar 526
				pte = page_mapping_find(as, b + j * PAGE_SIZE);
527
				ASSERT(pte && PTE_VALID(pte) &&
528
				    PTE_PRESENT(pte));
529
				if (area->backend &&
530
				    area->backend->frame_free) {
531
					area->backend->frame_free(area,	b +
532
					j * PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 533
				}
2087 jermar 534
				page_mapping_remove(as, b + j * PAGE_SIZE);				
1411 jermar 535
				page_table_unlock(as, false);
1306 jermar 536
			}
537
		}
538
	}
1403 jermar 539
 
1306 jermar 540
	/*
1436 jermar 541
	 * Finish TLB shootdown sequence.
1306 jermar 542
	 */
1889 jermar 543
	tlb_invalidate_pages(as->asid, area->base, area->pages);
1306 jermar 544
	tlb_shootdown_finalize();
1436 jermar 545
 
1889 jermar 546
	/*
2087 jermar 547
	 * Invalidate potential software translation caches (e.g. TSB on
548
	 * sparc64).
1889 jermar 549
	 */
550
	as_invalidate_translation_cache(as, area->base, area->pages);
551
 
1436 jermar 552
	btree_destroy(&area->used_space);
1306 jermar 553
 
1309 jermar 554
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 555
 
556
	if (area->sh_info)
557
		sh_info_remove_reference(area->sh_info);
558
 
1380 jermar 559
	mutex_unlock(&area->lock);
1306 jermar 560
 
561
	/*
562
	 * Remove the empty area from address space.
563
	 */
1889 jermar 564
	btree_remove(&as->as_area_btree, base, NULL);
1306 jermar 565
 
1309 jermar 566
	free(area);
567
 
1889 jermar 568
	mutex_unlock(&as->lock);
1306 jermar 569
	interrupts_restore(ipl);
570
	return 0;
571
}
572
 
1413 jermar 573
/** Share address space area with another or the same address space.
1235 jermar 574
 *
1424 jermar 575
 * Address space area mapping is shared with a new address space area.
576
 * If the source address space area has not been shared so far,
577
 * a new sh_info is created. The new address space area simply gets the
578
 * sh_info of the source area. The process of duplicating the
579
 * mapping is done through the backend share function.
1413 jermar 580
 * 
1417 jermar 581
 * @param src_as Pointer to source address space.
1239 jermar 582
 * @param src_base Base address of the source address space area.
1417 jermar 583
 * @param acc_size Expected size of the source area.
1428 palkovsky 584
 * @param dst_as Pointer to destination address space.
1417 jermar 585
 * @param dst_base Target base address.
586
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 587
 *
2007 jermar 588
 * @return Zero on success or ENOENT if there is no such task or if there is no
589
 * such address space area, EPERM if there was a problem in accepting the area
590
 * or ENOMEM if there was a problem in allocating destination address space
591
 * area. ENOTSUP is returned if the address space area backend does not support
2015 jermar 592
 * sharing or if the kernel detects an attempt to create an illegal address
593
 * alias.
1235 jermar 594
 */
1780 jermar 595
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
596
		  as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
1235 jermar 597
{
598
	ipl_t ipl;
1239 jermar 599
	int src_flags;
600
	size_t src_size;
601
	as_area_t *src_area, *dst_area;
1413 jermar 602
	share_info_t *sh_info;
1424 jermar 603
	mem_backend_t *src_backend;
604
	mem_backend_data_t src_backend_data;
1434 palkovsky 605
 
1235 jermar 606
	ipl = interrupts_disable();
1380 jermar 607
	mutex_lock(&src_as->lock);
1329 palkovsky 608
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 609
	if (!src_area) {
1238 jermar 610
		/*
611
		 * Could not find the source address space area.
612
		 */
1380 jermar 613
		mutex_unlock(&src_as->lock);
1238 jermar 614
		interrupts_restore(ipl);
615
		return ENOENT;
616
	}
2007 jermar 617
 
1424 jermar 618
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 619
		/*
1851 jermar 620
		 * There is no backend or the backend does not
1424 jermar 621
		 * know how to share the area.
1413 jermar 622
		 */
623
		mutex_unlock(&src_area->lock);
624
		mutex_unlock(&src_as->lock);
625
		interrupts_restore(ipl);
626
		return ENOTSUP;
627
	}
628
 
1239 jermar 629
	src_size = src_area->pages * PAGE_SIZE;
630
	src_flags = src_area->flags;
1424 jermar 631
	src_backend = src_area->backend;
632
	src_backend_data = src_area->backend_data;
1544 palkovsky 633
 
634
	/* Share the cacheable flag from the original mapping */
635
	if (src_flags & AS_AREA_CACHEABLE)
636
		dst_flags_mask |= AS_AREA_CACHEABLE;
637
 
2087 jermar 638
	if (src_size != acc_size ||
639
	    (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 640
		mutex_unlock(&src_area->lock);
641
		mutex_unlock(&src_as->lock);
1235 jermar 642
		interrupts_restore(ipl);
643
		return EPERM;
644
	}
1413 jermar 645
 
2015 jermar 646
#ifdef CONFIG_VIRT_IDX_DCACHE
647
	if (!(dst_flags_mask & AS_AREA_EXEC)) {
648
		if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
649
			/*
650
			 * Refuse to create an illegal address alias.
651
			 */
652
			mutex_unlock(&src_area->lock);
653
			mutex_unlock(&src_as->lock);
654
			interrupts_restore(ipl);
655
			return ENOTSUP;
656
		}
657
	}
658
#endif /* CONFIG_VIRT_IDX_DCACHE */
659
 
1235 jermar 660
	/*
1413 jermar 661
	 * Now we are committed to sharing the area.
1954 jermar 662
	 * First, prepare the area for sharing.
1413 jermar 663
	 * Then it will be safe to unlock it.
664
	 */
665
	sh_info = src_area->sh_info;
666
	if (!sh_info) {
667
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
668
		mutex_initialize(&sh_info->lock);
669
		sh_info->refcount = 2;
670
		btree_create(&sh_info->pagemap);
671
		src_area->sh_info = sh_info;
672
	} else {
673
		mutex_lock(&sh_info->lock);
674
		sh_info->refcount++;
675
		mutex_unlock(&sh_info->lock);
676
	}
677
 
1424 jermar 678
	src_area->backend->share(src_area);
1413 jermar 679
 
680
	mutex_unlock(&src_area->lock);
681
	mutex_unlock(&src_as->lock);
682
 
683
	/*
1239 jermar 684
	 * Create copy of the source address space area.
685
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
686
	 * attribute set which prevents race condition with
687
	 * preliminary as_page_fault() calls.
1417 jermar 688
	 * The flags of the source area are masked against dst_flags_mask
689
	 * to support sharing in less privileged mode.
1235 jermar 690
	 */
1461 palkovsky 691
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
2087 jermar 692
	    AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 693
	if (!dst_area) {
1235 jermar 694
		/*
695
		 * Destination address space area could not be created.
696
		 */
1413 jermar 697
		sh_info_remove_reference(sh_info);
698
 
1235 jermar 699
		interrupts_restore(ipl);
700
		return ENOMEM;
701
	}
2009 jermar 702
 
1235 jermar 703
	/*
1239 jermar 704
	 * Now the destination address space area has been
705
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 706
	 * attribute and set the sh_info.
1239 jermar 707
	 */	
2009 jermar 708
	mutex_lock(&dst_as->lock);	
1380 jermar 709
	mutex_lock(&dst_area->lock);
1239 jermar 710
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 711
	dst_area->sh_info = sh_info;
1380 jermar 712
	mutex_unlock(&dst_area->lock);
2009 jermar 713
	mutex_unlock(&dst_as->lock);	
714
 
1235 jermar 715
	interrupts_restore(ipl);
716
 
717
	return 0;
718
}
719
 
1423 jermar 720
/** Check access mode for address space area.
721
 *
722
 * The address space area must be locked prior to this call.
723
 *
724
 * @param area Address space area.
725
 * @param access Access mode.
726
 *
727
 * @return False if access violates area's permissions, true otherwise.
728
 */
729
bool as_area_check_access(as_area_t *area, pf_access_t access)
730
{
731
	int flagmap[] = {
732
		[PF_ACCESS_READ] = AS_AREA_READ,
733
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
734
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
735
	};
736
 
737
	if (!(area->flags & flagmap[access]))
738
		return false;
739
 
740
	return true;
741
}
742
 
703 jermar 743
/** Handle page fault within the current address space.
744
 *
1409 jermar 745
 * This is the high-level page fault handler. It decides
746
 * whether the page fault can be resolved by any backend
747
 * and if so, it invokes the backend to resolve the page
748
 * fault.
749
 *
703 jermar 750
 * Interrupts are assumed disabled.
751
 *
752
 * @param page Faulting page.
1411 jermar 753
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 754
 * @param istate Pointer to interrupted state.
703 jermar 755
 *
1409 jermar 756
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
757
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 758
 */
1780 jermar 759
int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate)
703 jermar 760
{
1044 jermar 761
	pte_t *pte;
977 jermar 762
	as_area_t *area;
703 jermar 763
 
1380 jermar 764
	if (!THREAD)
1409 jermar 765
		return AS_PF_FAULT;
1380 jermar 766
 
703 jermar 767
	ASSERT(AS);
1044 jermar 768
 
1380 jermar 769
	mutex_lock(&AS->lock);
977 jermar 770
	area = find_area_and_lock(AS, page);	
703 jermar 771
	if (!area) {
772
		/*
773
		 * No area contained mapping for 'page'.
774
		 * Signal page fault to low-level handler.
775
		 */
1380 jermar 776
		mutex_unlock(&AS->lock);
1288 jermar 777
		goto page_fault;
703 jermar 778
	}
779
 
1239 jermar 780
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
781
		/*
782
		 * The address space area is not fully initialized.
783
		 * Avoid possible race by returning error.
784
		 */
1380 jermar 785
		mutex_unlock(&area->lock);
786
		mutex_unlock(&AS->lock);
1288 jermar 787
		goto page_fault;		
1239 jermar 788
	}
789
 
1424 jermar 790
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 791
		/*
792
		 * The address space area is not backed by any backend
793
		 * or the backend cannot handle page faults.
794
		 */
795
		mutex_unlock(&area->lock);
796
		mutex_unlock(&AS->lock);
797
		goto page_fault;		
798
	}
1179 jermar 799
 
1044 jermar 800
	page_table_lock(AS, false);
801
 
703 jermar 802
	/*
1044 jermar 803
	 * To avoid race condition between two page faults
804
	 * on the same address, we need to make sure
805
	 * the mapping has not been already inserted.
806
	 */
807
	if ((pte = page_mapping_find(AS, page))) {
808
		if (PTE_PRESENT(pte)) {
1423 jermar 809
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
2087 jermar 810
			    (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
811
			    (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
1423 jermar 812
				page_table_unlock(AS, false);
813
				mutex_unlock(&area->lock);
814
				mutex_unlock(&AS->lock);
815
				return AS_PF_OK;
816
			}
1044 jermar 817
		}
818
	}
1409 jermar 819
 
1044 jermar 820
	/*
1409 jermar 821
	 * Resort to the backend page fault handler.
703 jermar 822
	 */
1424 jermar 823
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 824
		page_table_unlock(AS, false);
825
		mutex_unlock(&area->lock);
826
		mutex_unlock(&AS->lock);
827
		goto page_fault;
828
	}
703 jermar 829
 
1044 jermar 830
	page_table_unlock(AS, false);
1380 jermar 831
	mutex_unlock(&area->lock);
832
	mutex_unlock(&AS->lock);
1288 jermar 833
	return AS_PF_OK;
834
 
835
page_fault:
836
	if (THREAD->in_copy_from_uspace) {
837
		THREAD->in_copy_from_uspace = false;
2087 jermar 838
		istate_set_retaddr(istate,
839
		    (uintptr_t) &memcpy_from_uspace_failover_address);
1288 jermar 840
	} else if (THREAD->in_copy_to_uspace) {
841
		THREAD->in_copy_to_uspace = false;
2087 jermar 842
		istate_set_retaddr(istate,
843
		    (uintptr_t) &memcpy_to_uspace_failover_address);
1288 jermar 844
	} else {
845
		return AS_PF_FAULT;
846
	}
847
 
848
	return AS_PF_DEFER;
703 jermar 849
}
850
 
823 jermar 851
/** Switch address spaces.
703 jermar 852
 *
1380 jermar 853
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 854
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 855
 *
823 jermar 856
 * @param old Old address space or NULL.
857
 * @param new New address space.
703 jermar 858
 */
823 jermar 859
void as_switch(as_t *old, as_t *new)
703 jermar 860
{
861
	ipl_t ipl;
823 jermar 862
	bool needs_asid = false;
703 jermar 863
 
864
	ipl = interrupts_disable();
1415 jermar 865
	spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 866
 
867
	/*
823 jermar 868
	 * First, take care of the old address space.
869
	 */	
870
	if (old) {
1380 jermar 871
		mutex_lock_active(&old->lock);
1415 jermar 872
		ASSERT(old->cpu_refcount);
873
		if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 874
			/*
875
			 * The old address space is no longer active on
876
			 * any processor. It can be appended to the
877
			 * list of inactive address spaces with assigned
878
			 * ASID.
879
			 */
880
			 ASSERT(old->asid != ASID_INVALID);
2087 jermar 881
			 list_append(&old->inactive_as_with_asid_link,
882
			     &inactive_as_with_asid_head);
823 jermar 883
		}
1380 jermar 884
		mutex_unlock(&old->lock);
1890 jermar 885
 
886
		/*
887
		 * Perform architecture-specific tasks when the address space
888
		 * is being removed from the CPU.
889
		 */
890
		as_deinstall_arch(old);
823 jermar 891
	}
892
 
893
	/*
894
	 * Second, prepare the new address space.
895
	 */
1380 jermar 896
	mutex_lock_active(&new->lock);
1415 jermar 897
	if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
2087 jermar 898
		if (new->asid != ASID_INVALID) {
823 jermar 899
			list_remove(&new->inactive_as_with_asid_link);
2087 jermar 900
		} else {
901
			/*
902
			 * Defer call to asid_get() until new->lock is released.
903
			 */
904
			needs_asid = true;
905
		}
823 jermar 906
	}
907
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 908
	mutex_unlock(&new->lock);
823 jermar 909
 
910
	if (needs_asid) {
911
		/*
912
		 * Allocation of new ASID was deferred
913
		 * until now in order to avoid deadlock.
914
		 */
915
		asid_t asid;
916
 
917
		asid = asid_get();
1380 jermar 918
		mutex_lock_active(&new->lock);
823 jermar 919
		new->asid = asid;
1380 jermar 920
		mutex_unlock(&new->lock);
823 jermar 921
	}
1415 jermar 922
	spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 923
	interrupts_restore(ipl);
924
 
925
	/*
703 jermar 926
	 * Perform architecture-specific steps.
727 jermar 927
	 * (e.g. write ASID to hardware register etc.)
703 jermar 928
	 */
823 jermar 929
	as_install_arch(new);
703 jermar 930
 
823 jermar 931
	AS = new;
703 jermar 932
}
754 jermar 933
 
1235 jermar 934
/** Convert address space area flags to page flags.
754 jermar 935
 *
1235 jermar 936
 * @param aflags Flags of some address space area.
754 jermar 937
 *
1235 jermar 938
 * @return Flags to be passed to page_mapping_insert().
754 jermar 939
 */
1235 jermar 940
int area_flags_to_page_flags(int aflags)
754 jermar 941
{
942
	int flags;
943
 
1178 jermar 944
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 945
 
1235 jermar 946
	if (aflags & AS_AREA_READ)
1026 jermar 947
		flags |= PAGE_READ;
948
 
1235 jermar 949
	if (aflags & AS_AREA_WRITE)
1026 jermar 950
		flags |= PAGE_WRITE;
951
 
1235 jermar 952
	if (aflags & AS_AREA_EXEC)
1026 jermar 953
		flags |= PAGE_EXEC;
954
 
1424 jermar 955
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 956
		flags |= PAGE_CACHEABLE;
957
 
754 jermar 958
	return flags;
959
}
756 jermar 960
 
1235 jermar 961
/** Compute flags for virtual address translation subsytem.
962
 *
963
 * The address space area must be locked.
964
 * Interrupts must be disabled.
965
 *
966
 * @param a Address space area.
967
 *
968
 * @return Flags to be used in page_mapping_insert().
969
 */
1409 jermar 970
int as_area_get_flags(as_area_t *a)
1235 jermar 971
{
972
	return area_flags_to_page_flags(a->flags);
973
}
974
 
756 jermar 975
/** Create page table.
976
 *
977
 * Depending on architecture, create either address space
978
 * private or global page table.
979
 *
980
 * @param flags Flags saying whether the page table is for kernel address space.
981
 *
982
 * @return First entry of the page table.
983
 */
984
pte_t *page_table_create(int flags)
985
{
986
        ASSERT(as_operations);
987
        ASSERT(as_operations->page_table_create);
988
 
989
        return as_operations->page_table_create(flags);
990
}
977 jermar 991
 
1468 jermar 992
/** Destroy page table.
993
 *
994
 * Destroy page table in architecture specific way.
995
 *
996
 * @param page_table Physical address of PTL0.
997
 */
998
void page_table_destroy(pte_t *page_table)
999
{
1000
        ASSERT(as_operations);
1001
        ASSERT(as_operations->page_table_destroy);
1002
 
1003
        as_operations->page_table_destroy(page_table);
1004
}
1005
 
1044 jermar 1006
/** Lock page table.
1007
 *
1008
 * This function should be called before any page_mapping_insert(),
1009
 * page_mapping_remove() and page_mapping_find().
1010
 * 
1011
 * Locking order is such that address space areas must be locked
1012
 * prior to this call. Address space can be locked prior to this
1013
 * call in which case the lock argument is false.
1014
 *
1015
 * @param as Address space.
1248 jermar 1016
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 1017
 */
1018
void page_table_lock(as_t *as, bool lock)
1019
{
1020
	ASSERT(as_operations);
1021
	ASSERT(as_operations->page_table_lock);
1022
 
1023
	as_operations->page_table_lock(as, lock);
1024
}
1025
 
1026
/** Unlock page table.
1027
 *
1028
 * @param as Address space.
1248 jermar 1029
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 1030
 */
1031
void page_table_unlock(as_t *as, bool unlock)
1032
{
1033
	ASSERT(as_operations);
1034
	ASSERT(as_operations->page_table_unlock);
1035
 
1036
	as_operations->page_table_unlock(as, unlock);
1037
}
1038
 
977 jermar 1039
 
1040
/** Find address space area and lock it.
1041
 *
1042
 * The address space must be locked and interrupts must be disabled.
1043
 *
1044
 * @param as Address space.
1045
 * @param va Virtual address.
1046
 *
2087 jermar 1047
 * @return Locked address space area containing va on success or NULL on
1048
 *     failure.
977 jermar 1049
 */
1780 jermar 1050
as_area_t *find_area_and_lock(as_t *as, uintptr_t va)
977 jermar 1051
{
1052
	as_area_t *a;
1147 jermar 1053
	btree_node_t *leaf, *lnode;
1054
	int i;
977 jermar 1055
 
1147 jermar 1056
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
1057
	if (a) {
1058
		/* va is the base address of an address space area */
1380 jermar 1059
		mutex_lock(&a->lock);
1147 jermar 1060
		return a;
1061
	}
1062
 
1063
	/*
1150 jermar 1064
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 1065
	 * to find out whether this is a miss or va belongs to an address
1066
	 * space area found there.
1067
	 */
1068
 
1069
	/* First, search the leaf node itself. */
1070
	for (i = 0; i < leaf->keys; i++) {
1071
		a = (as_area_t *) leaf->value[i];
1380 jermar 1072
		mutex_lock(&a->lock);
1147 jermar 1073
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
1074
			return a;
1075
		}
1380 jermar 1076
		mutex_unlock(&a->lock);
1147 jermar 1077
	}
977 jermar 1078
 
1147 jermar 1079
	/*
1150 jermar 1080
	 * Second, locate the left neighbour and test its last record.
1148 jermar 1081
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 1082
	 */
2087 jermar 1083
	lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);
1084
	if (lnode) {
1147 jermar 1085
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 1086
		mutex_lock(&a->lock);
1147 jermar 1087
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 1088
			return a;
1147 jermar 1089
		}
1380 jermar 1090
		mutex_unlock(&a->lock);
977 jermar 1091
	}
1092
 
1093
	return NULL;
1094
}
1048 jermar 1095
 
1096
/** Check area conflicts with other areas.
1097
 *
1098
 * The address space must be locked and interrupts must be disabled.
1099
 *
1100
 * @param as Address space.
1101
 * @param va Starting virtual address of the area being tested.
1102
 * @param size Size of the area being tested.
1103
 * @param avoid_area Do not touch this area. 
1104
 *
1105
 * @return True if there is no conflict, false otherwise.
1106
 */
2087 jermar 1107
bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,
1108
			  as_area_t *avoid_area)
1048 jermar 1109
{
1110
	as_area_t *a;
1147 jermar 1111
	btree_node_t *leaf, *node;
1112
	int i;
1048 jermar 1113
 
1070 jermar 1114
	/*
1115
	 * We don't want any area to have conflicts with NULL page.
1116
	 */
1117
	if (overlaps(va, size, NULL, PAGE_SIZE))
1118
		return false;
1119
 
1147 jermar 1120
	/*
1121
	 * The leaf node is found in O(log n), where n is proportional to
1122
	 * the number of address space areas belonging to as.
1123
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1124
	 * record in the left neighbour, the leftmost record in the right
1125
	 * neighbour and all records in the leaf node itself.
1147 jermar 1126
	 */
1048 jermar 1127
 
1147 jermar 1128
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1129
		if (a != avoid_area)
1130
			return false;
1131
	}
1132
 
1133
	/* First, check the two border cases. */
1150 jermar 1134
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1135
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1136
		mutex_lock(&a->lock);
1147 jermar 1137
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1138
			mutex_unlock(&a->lock);
1147 jermar 1139
			return false;
1140
		}
1380 jermar 1141
		mutex_unlock(&a->lock);
1147 jermar 1142
	}
2087 jermar 1143
	node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);
1144
	if (node) {
1147 jermar 1145
		a = (as_area_t *) node->value[0];
1380 jermar 1146
		mutex_lock(&a->lock);
1147 jermar 1147
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1148
			mutex_unlock(&a->lock);
1147 jermar 1149
			return false;
1150
		}
1380 jermar 1151
		mutex_unlock(&a->lock);
1147 jermar 1152
	}
1153
 
1154
	/* Second, check the leaf node. */
1155
	for (i = 0; i < leaf->keys; i++) {
1156
		a = (as_area_t *) leaf->value[i];
1157
 
1048 jermar 1158
		if (a == avoid_area)
1159
			continue;
1147 jermar 1160
 
1380 jermar 1161
		mutex_lock(&a->lock);
1147 jermar 1162
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1163
			mutex_unlock(&a->lock);
1147 jermar 1164
			return false;
1165
		}
1380 jermar 1166
		mutex_unlock(&a->lock);
1048 jermar 1167
	}
1168
 
1070 jermar 1169
	/*
1170
	 * So far, the area does not conflict with other areas.
1171
	 * Check if it doesn't conflict with kernel address space.
1172
	 */	 
1173
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1174
		return !overlaps(va, size, 
2087 jermar 1175
		    KERNEL_ADDRESS_SPACE_START,
1176
		    KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);
1070 jermar 1177
	}
1178
 
1048 jermar 1179
	return true;
1180
}
1235 jermar 1181
 
1380 jermar 1182
/** Return size of the address space area with given base.  */
1780 jermar 1183
size_t as_get_size(uintptr_t base)
1329 palkovsky 1184
{
1185
	ipl_t ipl;
1186
	as_area_t *src_area;
1187
	size_t size;
1188
 
1189
	ipl = interrupts_disable();
1190
	src_area = find_area_and_lock(AS, base);
1191
	if (src_area){
1192
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1193
		mutex_unlock(&src_area->lock);
1329 palkovsky 1194
	} else {
1195
		size = 0;
1196
	}
1197
	interrupts_restore(ipl);
1198
	return size;
1199
}
1200
 
1387 jermar 1201
/** Mark portion of address space area as used.
1202
 *
1203
 * The address space area must be already locked.
1204
 *
1205
 * @param a Address space area.
1206
 * @param page First page to be marked.
1207
 * @param count Number of page to be marked.
1208
 *
1209
 * @return 0 on failure and 1 on success.
1210
 */
1780 jermar 1211
int used_space_insert(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1212
{
1213
	btree_node_t *leaf, *node;
1214
	count_t pages;
1215
	int i;
1216
 
1217
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1218
	ASSERT(count);
1219
 
1220
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1221
	if (pages) {
1222
		/*
1223
		 * We hit the beginning of some used space.
1224
		 */
1225
		return 0;
1226
	}
1227
 
1437 jermar 1228
	if (!leaf->keys) {
1229
		btree_insert(&a->used_space, page, (void *) count, leaf);
1230
		return 1;
1231
	}
1232
 
1387 jermar 1233
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1234
	if (node) {
2087 jermar 1235
		uintptr_t left_pg = node->key[node->keys - 1];
1236
		uintptr_t right_pg = leaf->key[0];
1237
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1238
		count_t right_cnt = (count_t) leaf->value[0];
1387 jermar 1239
 
1240
		/*
1241
		 * Examine the possibility that the interval fits
1242
		 * somewhere between the rightmost interval of
1243
		 * the left neigbour and the first interval of the leaf.
1244
		 */
1245
 
1246
		if (page >= right_pg) {
1247
			/* Do nothing. */
2087 jermar 1248
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1249
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1250
			/* The interval intersects with the left interval. */
1251
			return 0;
2087 jermar 1252
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1253
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1254
			/* The interval intersects with the right interval. */
1255
			return 0;			
2087 jermar 1256
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1257
		    (page + count * PAGE_SIZE == right_pg)) {
1258
			/*
1259
			 * The interval can be added by merging the two already
1260
			 * present intervals.
1261
			 */
1403 jermar 1262
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1263
			btree_remove(&a->used_space, right_pg, leaf);
1264
			return 1; 
2087 jermar 1265
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1266
			/* 
1267
			 * The interval can be added by simply growing the left
1268
			 * interval.
1269
			 */
1403 jermar 1270
			node->value[node->keys - 1] += count;
1387 jermar 1271
			return 1;
2087 jermar 1272
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1273
			/*
2087 jermar 1274
			 * The interval can be addded by simply moving base of
1275
			 * the right interval down and increasing its size
1276
			 * accordingly.
1387 jermar 1277
			 */
1403 jermar 1278
			leaf->value[0] += count;
1387 jermar 1279
			leaf->key[0] = page;
1280
			return 1;
1281
		} else {
1282
			/*
1283
			 * The interval is between both neigbouring intervals,
1284
			 * but cannot be merged with any of them.
1285
			 */
2087 jermar 1286
			btree_insert(&a->used_space, page, (void *) count,
1287
			    leaf);
1387 jermar 1288
			return 1;
1289
		}
1290
	} else if (page < leaf->key[0]) {
1780 jermar 1291
		uintptr_t right_pg = leaf->key[0];
1387 jermar 1292
		count_t right_cnt = (count_t) leaf->value[0];
1293
 
1294
		/*
2087 jermar 1295
		 * Investigate the border case in which the left neighbour does
1296
		 * not exist but the interval fits from the left.
1387 jermar 1297
		 */
1298
 
2087 jermar 1299
		if (overlaps(page, count * PAGE_SIZE, right_pg,
1300
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1301
			/* The interval intersects with the right interval. */
1302
			return 0;
2087 jermar 1303
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1304
			/*
2087 jermar 1305
			 * The interval can be added by moving the base of the
1306
			 * right interval down and increasing its size
1307
			 * accordingly.
1387 jermar 1308
			 */
1309
			leaf->key[0] = page;
1403 jermar 1310
			leaf->value[0] += count;
1387 jermar 1311
			return 1;
1312
		} else {
1313
			/*
1314
			 * The interval doesn't adjoin with the right interval.
1315
			 * It must be added individually.
1316
			 */
2087 jermar 1317
			btree_insert(&a->used_space, page, (void *) count,
1318
			    leaf);
1387 jermar 1319
			return 1;
1320
		}
1321
	}
1322
 
1323
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1324
	if (node) {
2087 jermar 1325
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1326
		uintptr_t right_pg = node->key[0];
1327
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1328
		count_t right_cnt = (count_t) node->value[0];
1387 jermar 1329
 
1330
		/*
1331
		 * Examine the possibility that the interval fits
1332
		 * somewhere between the leftmost interval of
1333
		 * the right neigbour and the last interval of the leaf.
1334
		 */
1335
 
1336
		if (page < left_pg) {
1337
			/* Do nothing. */
2087 jermar 1338
		} else if (overlaps(page, count * PAGE_SIZE, left_pg,
1339
		    left_cnt * PAGE_SIZE)) {
1387 jermar 1340
			/* The interval intersects with the left interval. */
1341
			return 0;
2087 jermar 1342
		} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1343
		    right_cnt * PAGE_SIZE)) {
1387 jermar 1344
			/* The interval intersects with the right interval. */
1345
			return 0;			
2087 jermar 1346
		} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1347
		    (page + count * PAGE_SIZE == right_pg)) {
1348
			/*
1349
			 * The interval can be added by merging the two already
1350
			 * present intervals.
1351
			 * */
1403 jermar 1352
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1353
			btree_remove(&a->used_space, right_pg, node);
1354
			return 1; 
2087 jermar 1355
		} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1356
			/*
1357
			 * The interval can be added by simply growing the left
1358
			 * interval.
1359
			 * */
1403 jermar 1360
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1361
			return 1;
2087 jermar 1362
		} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1363
			/*
2087 jermar 1364
			 * The interval can be addded by simply moving base of
1365
			 * the right interval down and increasing its size
1366
			 * accordingly.
1387 jermar 1367
			 */
1403 jermar 1368
			node->value[0] += count;
1387 jermar 1369
			node->key[0] = page;
1370
			return 1;
1371
		} else {
1372
			/*
1373
			 * The interval is between both neigbouring intervals,
1374
			 * but cannot be merged with any of them.
1375
			 */
2087 jermar 1376
			btree_insert(&a->used_space, page, (void *) count,
1377
			    leaf);
1387 jermar 1378
			return 1;
1379
		}
1380
	} else if (page >= leaf->key[leaf->keys - 1]) {
1780 jermar 1381
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1382
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1383
 
1384
		/*
2087 jermar 1385
		 * Investigate the border case in which the right neighbour
1386
		 * does not exist but the interval fits from the right.
1387 jermar 1387
		 */
1388
 
2087 jermar 1389
		if (overlaps(page, count * PAGE_SIZE, left_pg,
1390
		    left_cnt * PAGE_SIZE)) {
1403 jermar 1391
			/* The interval intersects with the left interval. */
1387 jermar 1392
			return 0;
2087 jermar 1393
		} else if (left_pg + left_cnt * PAGE_SIZE == page) {
1394
			/*
1395
			 * The interval can be added by growing the left
1396
			 * interval.
1397
			 */
1403 jermar 1398
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1399
			return 1;
1400
		} else {
1401
			/*
1402
			 * The interval doesn't adjoin with the left interval.
1403
			 * It must be added individually.
1404
			 */
2087 jermar 1405
			btree_insert(&a->used_space, page, (void *) count,
1406
			    leaf);
1387 jermar 1407
			return 1;
1408
		}
1409
	}
1410
 
1411
	/*
2087 jermar 1412
	 * Note that if the algorithm made it thus far, the interval can fit
1413
	 * only between two other intervals of the leaf. The two border cases
1414
	 * were already resolved.
1387 jermar 1415
	 */
1416
	for (i = 1; i < leaf->keys; i++) {
1417
		if (page < leaf->key[i]) {
2087 jermar 1418
			uintptr_t left_pg = leaf->key[i - 1];
1419
			uintptr_t right_pg = leaf->key[i];
1420
			count_t left_cnt = (count_t) leaf->value[i - 1];
1421
			count_t right_cnt = (count_t) leaf->value[i];
1387 jermar 1422
 
1423
			/*
1424
			 * The interval fits between left_pg and right_pg.
1425
			 */
1426
 
2087 jermar 1427
			if (overlaps(page, count * PAGE_SIZE, left_pg,
1428
			    left_cnt * PAGE_SIZE)) {
1429
				/*
1430
				 * The interval intersects with the left
1431
				 * interval.
1432
				 */
1387 jermar 1433
				return 0;
2087 jermar 1434
			} else if (overlaps(page, count * PAGE_SIZE, right_pg,
1435
			    right_cnt * PAGE_SIZE)) {
1436
				/*
1437
				 * The interval intersects with the right
1438
				 * interval.
1439
				 */
1387 jermar 1440
				return 0;			
2087 jermar 1441
			} else if ((page == left_pg + left_cnt * PAGE_SIZE) &&
1442
			    (page + count * PAGE_SIZE == right_pg)) {
1443
				/*
1444
				 * The interval can be added by merging the two
1445
				 * already present intervals.
1446
				 */
1403 jermar 1447
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1448
				btree_remove(&a->used_space, right_pg, leaf);
1449
				return 1; 
2087 jermar 1450
			} else if (page == left_pg + left_cnt * PAGE_SIZE) {
1451
				/*
1452
				 * The interval can be added by simply growing
1453
				 * the left interval.
1454
				 */
1403 jermar 1455
				leaf->value[i - 1] += count;
1387 jermar 1456
				return 1;
2087 jermar 1457
			} else if (page + count * PAGE_SIZE == right_pg) {
1387 jermar 1458
				/*
2087 jermar 1459
			         * The interval can be addded by simply moving
1460
				 * base of the right interval down and
1461
				 * increasing its size accordingly.
1387 jermar 1462
			 	 */
1403 jermar 1463
				leaf->value[i] += count;
1387 jermar 1464
				leaf->key[i] = page;
1465
				return 1;
1466
			} else {
1467
				/*
2087 jermar 1468
				 * The interval is between both neigbouring
1469
				 * intervals, but cannot be merged with any of
1470
				 * them.
1387 jermar 1471
				 */
2087 jermar 1472
				btree_insert(&a->used_space, page,
1473
				    (void *) count, leaf);
1387 jermar 1474
				return 1;
1475
			}
1476
		}
1477
	}
1478
 
2087 jermar 1479
	panic("Inconsistency detected while adding %d pages of used space at "
1480
	    "%p.\n", count, page);
1387 jermar 1481
}
1482
 
1483
/** Mark portion of address space area as unused.
1484
 *
1485
 * The address space area must be already locked.
1486
 *
1487
 * @param a Address space area.
1488
 * @param page First page to be marked.
1489
 * @param count Number of page to be marked.
1490
 *
1491
 * @return 0 on failure and 1 on success.
1492
 */
1780 jermar 1493
int used_space_remove(as_area_t *a, uintptr_t page, count_t count)
1387 jermar 1494
{
1495
	btree_node_t *leaf, *node;
1496
	count_t pages;
1497
	int i;
1498
 
1499
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1500
	ASSERT(count);
1501
 
1502
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1503
	if (pages) {
1504
		/*
1505
		 * We are lucky, page is the beginning of some interval.
1506
		 */
1507
		if (count > pages) {
1508
			return 0;
1509
		} else if (count == pages) {
1510
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1511
			return 1;
1387 jermar 1512
		} else {
1513
			/*
1514
			 * Find the respective interval.
1515
			 * Decrease its size and relocate its start address.
1516
			 */
1517
			for (i = 0; i < leaf->keys; i++) {
1518
				if (leaf->key[i] == page) {
2087 jermar 1519
					leaf->key[i] += count * PAGE_SIZE;
1403 jermar 1520
					leaf->value[i] -= count;
1387 jermar 1521
					return 1;
1522
				}
1523
			}
1524
			goto error;
1525
		}
1526
	}
1527
 
1528
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1529
	if (node && page < leaf->key[0]) {
1780 jermar 1530
		uintptr_t left_pg = node->key[node->keys - 1];
1387 jermar 1531
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1532
 
2087 jermar 1533
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1534
		    count * PAGE_SIZE)) {
1535
			if (page + count * PAGE_SIZE ==
1536
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1537
				/*
2087 jermar 1538
				 * The interval is contained in the rightmost
1539
				 * interval of the left neighbour and can be
1540
				 * removed by updating the size of the bigger
1541
				 * interval.
1387 jermar 1542
				 */
1403 jermar 1543
				node->value[node->keys - 1] -= count;
1387 jermar 1544
				return 1;
2087 jermar 1545
			} else if (page + count * PAGE_SIZE <
1546
			    left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1547
				count_t new_cnt;
1387 jermar 1548
 
1549
				/*
2087 jermar 1550
				 * The interval is contained in the rightmost
1551
				 * interval of the left neighbour but its
1552
				 * removal requires both updating the size of
1553
				 * the original interval and also inserting a
1554
				 * new interval.
1387 jermar 1555
				 */
2087 jermar 1556
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1557
				    (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1558
				node->value[node->keys - 1] -= count + new_cnt;
2087 jermar 1559
				btree_insert(&a->used_space, page +
1560
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1561
				return 1;
1562
			}
1563
		}
1564
		return 0;
1565
	} else if (page < leaf->key[0]) {
1566
		return 0;
1567
	}
1568
 
1569
	if (page > leaf->key[leaf->keys - 1]) {
1780 jermar 1570
		uintptr_t left_pg = leaf->key[leaf->keys - 1];
1387 jermar 1571
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1572
 
2087 jermar 1573
		if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1574
		    count * PAGE_SIZE)) {
1575
			if (page + count * PAGE_SIZE == 
1576
			    left_pg + left_cnt * PAGE_SIZE) {
1387 jermar 1577
				/*
2087 jermar 1578
				 * The interval is contained in the rightmost
1579
				 * interval of the leaf and can be removed by
1580
				 * updating the size of the bigger interval.
1387 jermar 1581
				 */
1403 jermar 1582
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1583
				return 1;
2087 jermar 1584
			} else if (page + count * PAGE_SIZE < left_pg +
1585
			    left_cnt * PAGE_SIZE) {
1403 jermar 1586
				count_t new_cnt;
1387 jermar 1587
 
1588
				/*
2087 jermar 1589
				 * The interval is contained in the rightmost
1590
				 * interval of the leaf but its removal
1591
				 * requires both updating the size of the
1592
				 * original interval and also inserting a new
1593
				 * interval.
1387 jermar 1594
				 */
2087 jermar 1595
				new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -
1596
				    (page + count * PAGE_SIZE)) >> PAGE_WIDTH;
1403 jermar 1597
				leaf->value[leaf->keys - 1] -= count + new_cnt;
2087 jermar 1598
				btree_insert(&a->used_space, page +
1599
				    count * PAGE_SIZE, (void *) new_cnt, leaf);
1387 jermar 1600
				return 1;
1601
			}
1602
		}
1603
		return 0;
1604
	}	
1605
 
1606
	/*
1607
	 * The border cases have been already resolved.
1608
	 * Now the interval can be only between intervals of the leaf. 
1609
	 */
1610
	for (i = 1; i < leaf->keys - 1; i++) {
1611
		if (page < leaf->key[i]) {
1780 jermar 1612
			uintptr_t left_pg = leaf->key[i - 1];
1387 jermar 1613
			count_t left_cnt = (count_t) leaf->value[i - 1];
1614
 
1615
			/*
2087 jermar 1616
			 * Now the interval is between intervals corresponding
1617
			 * to (i - 1) and i.
1387 jermar 1618
			 */
2087 jermar 1619
			if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,
1620
			    count * PAGE_SIZE)) {
1621
				if (page + count * PAGE_SIZE ==
1622
				    left_pg + left_cnt*PAGE_SIZE) {
1387 jermar 1623
					/*
2087 jermar 1624
					 * The interval is contained in the
1625
					 * interval (i - 1) of the leaf and can
1626
					 * be removed by updating the size of
1627
					 * the bigger interval.
1387 jermar 1628
					 */
1403 jermar 1629
					leaf->value[i - 1] -= count;
1387 jermar 1630
					return 1;
2087 jermar 1631
				} else if (page + count * PAGE_SIZE <
1632
				    left_pg + left_cnt * PAGE_SIZE) {
1403 jermar 1633
					count_t new_cnt;
1387 jermar 1634
 
1635
					/*
2087 jermar 1636
					 * The interval is contained in the
1637
					 * interval (i - 1) of the leaf but its
1638
					 * removal requires both updating the
1639
					 * size of the original interval and
1387 jermar 1640
					 * also inserting a new interval.
1641
					 */
2087 jermar 1642
					new_cnt = ((left_pg +
1643
					    left_cnt * PAGE_SIZE) -
1644
					    (page + count * PAGE_SIZE)) >>
1645
					    PAGE_WIDTH;
1403 jermar 1646
					leaf->value[i - 1] -= count + new_cnt;
2087 jermar 1647
					btree_insert(&a->used_space, page +
1648
					    count * PAGE_SIZE, (void *) new_cnt,
1649
					    leaf);
1387 jermar 1650
					return 1;
1651
				}
1652
			}
1653
			return 0;
1654
		}
1655
	}
1656
 
1657
error:
2087 jermar 1658
	panic("Inconsistency detected while removing %d pages of used space "
1659
	    "from %p.\n", count, page);
1387 jermar 1660
}
1661
 
1409 jermar 1662
/** Remove reference to address space area share info.
1663
 *
1664
 * If the reference count drops to 0, the sh_info is deallocated.
1665
 *
1666
 * @param sh_info Pointer to address space area share info.
1667
 */
1668
void sh_info_remove_reference(share_info_t *sh_info)
1669
{
1670
	bool dealloc = false;
1671
 
1672
	mutex_lock(&sh_info->lock);
1673
	ASSERT(sh_info->refcount);
1674
	if (--sh_info->refcount == 0) {
1675
		dealloc = true;
1495 jermar 1676
		link_t *cur;
1409 jermar 1677
 
1678
		/*
1679
		 * Now walk carefully the pagemap B+tree and free/remove
1680
		 * reference from all frames found there.
1681
		 */
2087 jermar 1682
		for (cur = sh_info->pagemap.leaf_head.next;
1683
		    cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1684
			btree_node_t *node;
1495 jermar 1685
			int i;
1409 jermar 1686
 
1495 jermar 1687
			node = list_get_instance(cur, btree_node_t, leaf_link);
1688
			for (i = 0; i < node->keys; i++) 
1780 jermar 1689
				frame_free((uintptr_t) node->value[i]);
1409 jermar 1690
		}
1691
 
1692
	}
1693
	mutex_unlock(&sh_info->lock);
1694
 
1695
	if (dealloc) {
1696
		btree_destroy(&sh_info->pagemap);
1697
		free(sh_info);
1698
	}
1699
}
1700
 
1235 jermar 1701
/*
1702
 * Address space related syscalls.
1703
 */
1704
 
1705
/** Wrapper for as_area_create(). */
1780 jermar 1706
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)
1235 jermar 1707
{
2087 jermar 1708
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,
1709
	    AS_AREA_ATTR_NONE, &anon_backend, NULL))
1780 jermar 1710
		return (unative_t) address;
1235 jermar 1711
	else
1780 jermar 1712
		return (unative_t) -1;
1235 jermar 1713
}
1714
 
1793 jermar 1715
/** Wrapper for as_area_resize(). */
1780 jermar 1716
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)
1235 jermar 1717
{
1780 jermar 1718
	return (unative_t) as_area_resize(AS, address, size, 0);
1235 jermar 1719
}
1720
 
1793 jermar 1721
/** Wrapper for as_area_destroy(). */
1780 jermar 1722
unative_t sys_as_area_destroy(uintptr_t address)
1306 jermar 1723
{
1780 jermar 1724
	return (unative_t) as_area_destroy(AS, address);
1306 jermar 1725
}
1702 cejka 1726
 
1914 jermar 1727
/** Print out information about address space.
1728
 *
1729
 * @param as Address space.
1730
 */
1731
void as_print(as_t *as)
1732
{
1733
	ipl_t ipl;
1734
 
1735
	ipl = interrupts_disable();
1736
	mutex_lock(&as->lock);
1737
 
1738
	/* print out info about address space areas */
1739
	link_t *cur;
2087 jermar 1740
	for (cur = as->as_area_btree.leaf_head.next;
1741
	    cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1742
		btree_node_t *node;
1914 jermar 1743
 
2087 jermar 1744
		node = list_get_instance(cur, btree_node_t, leaf_link);
1745
 
1914 jermar 1746
		int i;
1747
		for (i = 0; i < node->keys; i++) {
1915 jermar 1748
			as_area_t *area = node->value[i];
1914 jermar 1749
 
1750
			mutex_lock(&area->lock);
1751
			printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
2087 jermar 1752
			    area, area->base, area->pages, area->base,
1753
			    area->base + area->pages*PAGE_SIZE);
1914 jermar 1754
			mutex_unlock(&area->lock);
1755
		}
1756
	}
1757
 
1758
	mutex_unlock(&as->lock);
1759
	interrupts_restore(ipl);
1760
}
1761
 
1757 jermar 1762
/** @}
1702 cejka 1763
 */