Subversion Repositories HelenOS

Rev

Rev 1544 | Rev 1594 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file	as.c
31
 * @brief	Address space related functions.
32
 *
703 jermar 33
 * This file contains address space manipulation functions.
34
 * Roughly speaking, this is a higher-level client of
35
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 36
 *
37
 * Functionality provided by this file allows one to
38
 * create address space and create, resize and share
39
 * address space areas.
40
 *
41
 * @see page.c
42
 *
703 jermar 43
 */
44
 
45
#include <mm/as.h>
756 jermar 46
#include <arch/mm/as.h>
703 jermar 47
#include <mm/page.h>
48
#include <mm/frame.h>
814 palkovsky 49
#include <mm/slab.h>
703 jermar 50
#include <mm/tlb.h>
51
#include <arch/mm/page.h>
52
#include <genarch/mm/page_pt.h>
1108 jermar 53
#include <genarch/mm/page_ht.h>
727 jermar 54
#include <mm/asid.h>
703 jermar 55
#include <arch/mm/asid.h>
56
#include <synch/spinlock.h>
1380 jermar 57
#include <synch/mutex.h>
788 jermar 58
#include <adt/list.h>
1147 jermar 59
#include <adt/btree.h>
1235 jermar 60
#include <proc/task.h>
1288 jermar 61
#include <proc/thread.h>
1235 jermar 62
#include <arch/asm.h>
703 jermar 63
#include <panic.h>
64
#include <debug.h>
1235 jermar 65
#include <print.h>
703 jermar 66
#include <memstr.h>
1070 jermar 67
#include <macros.h>
703 jermar 68
#include <arch.h>
1235 jermar 69
#include <errno.h>
70
#include <config.h>
1387 jermar 71
#include <align.h>
1235 jermar 72
#include <arch/types.h>
73
#include <typedefs.h>
1288 jermar 74
#include <syscall/copy.h>
75
#include <arch/interrupt.h>
703 jermar 76
 
756 jermar 77
as_operations_t *as_operations = NULL;
703 jermar 78
 
1415 jermar 79
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
80
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 81
 
82
/**
83
 * This list contains address spaces that are not active on any
84
 * processor and that have valid ASID.
85
 */
86
LIST_INITIALIZE(inactive_as_with_asid_head);
87
 
757 jermar 88
/** Kernel address space. */
89
as_t *AS_KERNEL = NULL;
90
 
1235 jermar 91
static int area_flags_to_page_flags(int aflags);
977 jermar 92
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 93
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
1409 jermar 94
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 95
 
756 jermar 96
/** Initialize address space subsystem. */
97
void as_init(void)
98
{
99
	as_arch_init();
789 palkovsky 100
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 101
	if (!AS_KERNEL)
102
		panic("can't create kernel address space\n");
103
 
756 jermar 104
}
105
 
757 jermar 106
/** Create address space.
107
 *
108
 * @param flags Flags that influence way in wich the address space is created.
109
 */
756 jermar 110
as_t *as_create(int flags)
703 jermar 111
{
112
	as_t *as;
113
 
822 palkovsky 114
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 115
	link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 116
	mutex_initialize(&as->lock);
1147 jermar 117
	btree_create(&as->as_area_btree);
822 palkovsky 118
 
119
	if (flags & FLAG_AS_KERNEL)
120
		as->asid = ASID_KERNEL;
121
	else
122
		as->asid = ASID_INVALID;
123
 
1468 jermar 124
	as->refcount = 0;
1415 jermar 125
	as->cpu_refcount = 0;
822 palkovsky 126
	as->page_table = page_table_create(flags);
703 jermar 127
 
128
	return as;
129
}
130
 
1468 jermar 131
/** Destroy adress space.
132
 *
133
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
134
 * the address space can be destroyed.
135
 */
136
void as_destroy(as_t *as)
973 palkovsky 137
{
1468 jermar 138
	ipl_t ipl;
1495 jermar 139
	link_t *cur;
973 palkovsky 140
 
1468 jermar 141
	ASSERT(as->refcount == 0);
142
 
143
	/*
144
	 * Since there is no reference to this area,
145
	 * it is safe not to lock its mutex.
146
	 */
147
 
148
	ipl = interrupts_disable();
149
	spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 150
 
151
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
152
		if (!as->cpu_refcount)
153
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 154
		asid_put(as->asid);
155
	}
156
	spinlock_unlock(&inactive_as_with_asid_lock);
157
 
158
	/*
159
	 * Destroy address space areas of the address space.
160
	 */	
1495 jermar 161
	for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) {
1468 jermar 162
		btree_node_t *node;
1495 jermar 163
		int i;
1468 jermar 164
 
1495 jermar 165
		node = list_get_instance(cur, btree_node_t, leaf_link);
166
		for (i = 0; i < node->keys; i++)
167
			as_area_destroy(as, node->key[i]);
1468 jermar 168
	}
1495 jermar 169
 
1483 jermar 170
	btree_destroy(&as->as_area_btree);
1468 jermar 171
	page_table_destroy(as->page_table);
172
 
173
	interrupts_restore(ipl);
174
 
973 palkovsky 175
	free(as);
176
}
177
 
703 jermar 178
/** Create address space area of common attributes.
179
 *
180
 * The created address space area is added to the target address space.
181
 *
182
 * @param as Target address space.
1239 jermar 183
 * @param flags Flags of the area memory.
1048 jermar 184
 * @param size Size of area.
703 jermar 185
 * @param base Base address of area.
1239 jermar 186
 * @param attrs Attributes of the area.
1409 jermar 187
 * @param backend Address space area backend. NULL if no backend is used.
188
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 189
 *
190
 * @return Address space area on success or NULL on failure.
191
 */
1409 jermar 192
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
1424 jermar 193
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 194
{
195
	ipl_t ipl;
196
	as_area_t *a;
197
 
198
	if (base % PAGE_SIZE)
1048 jermar 199
		return NULL;
200
 
1233 jermar 201
	if (!size)
202
		return NULL;
203
 
1048 jermar 204
	/* Writeable executable areas are not supported. */
205
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
206
		return NULL;
703 jermar 207
 
208
	ipl = interrupts_disable();
1380 jermar 209
	mutex_lock(&as->lock);
703 jermar 210
 
1048 jermar 211
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 212
		mutex_unlock(&as->lock);
1048 jermar 213
		interrupts_restore(ipl);
214
		return NULL;
215
	}
703 jermar 216
 
822 palkovsky 217
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 218
 
1380 jermar 219
	mutex_initialize(&a->lock);
822 palkovsky 220
 
1424 jermar 221
	a->as = as;
1026 jermar 222
	a->flags = flags;
1239 jermar 223
	a->attributes = attrs;
1048 jermar 224
	a->pages = SIZE2FRAMES(size);
822 palkovsky 225
	a->base = base;
1409 jermar 226
	a->sh_info = NULL;
227
	a->backend = backend;
1424 jermar 228
	if (backend_data)
229
		a->backend_data = *backend_data;
230
	else
231
		memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
232
 
1387 jermar 233
	btree_create(&a->used_space);
822 palkovsky 234
 
1147 jermar 235
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 236
 
1380 jermar 237
	mutex_unlock(&as->lock);
703 jermar 238
	interrupts_restore(ipl);
704 jermar 239
 
703 jermar 240
	return a;
241
}
242
 
1235 jermar 243
/** Find address space area and change it.
244
 *
245
 * @param as Address space.
246
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
247
 * @param size New size of the virtual memory block starting at address. 
248
 * @param flags Flags influencing the remap operation. Currently unused.
249
 *
1306 jermar 250
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 251
 */ 
1306 jermar 252
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 253
{
1306 jermar 254
	as_area_t *area;
1235 jermar 255
	ipl_t ipl;
256
	size_t pages;
257
 
258
	ipl = interrupts_disable();
1380 jermar 259
	mutex_lock(&as->lock);
1235 jermar 260
 
261
	/*
262
	 * Locate the area.
263
	 */
264
	area = find_area_and_lock(as, address);
265
	if (!area) {
1380 jermar 266
		mutex_unlock(&as->lock);
1235 jermar 267
		interrupts_restore(ipl);
1306 jermar 268
		return ENOENT;
1235 jermar 269
	}
270
 
1424 jermar 271
	if (area->backend == &phys_backend) {
1235 jermar 272
		/*
273
		 * Remapping of address space areas associated
274
		 * with memory mapped devices is not supported.
275
		 */
1380 jermar 276
		mutex_unlock(&area->lock);
277
		mutex_unlock(&as->lock);
1235 jermar 278
		interrupts_restore(ipl);
1306 jermar 279
		return ENOTSUP;
1235 jermar 280
	}
1409 jermar 281
	if (area->sh_info) {
282
		/*
283
		 * Remapping of shared address space areas 
284
		 * is not supported.
285
		 */
286
		mutex_unlock(&area->lock);
287
		mutex_unlock(&as->lock);
288
		interrupts_restore(ipl);
289
		return ENOTSUP;
290
	}
1235 jermar 291
 
292
	pages = SIZE2FRAMES((address - area->base) + size);
293
	if (!pages) {
294
		/*
295
		 * Zero size address space areas are not allowed.
296
		 */
1380 jermar 297
		mutex_unlock(&area->lock);
298
		mutex_unlock(&as->lock);
1235 jermar 299
		interrupts_restore(ipl);
1306 jermar 300
		return EPERM;
1235 jermar 301
	}
302
 
303
	if (pages < area->pages) {
1403 jermar 304
		bool cond;
305
		__address start_free = area->base + pages*PAGE_SIZE;
1235 jermar 306
 
307
		/*
308
		 * Shrinking the area.
309
		 * No need to check for overlaps.
310
		 */
1403 jermar 311
 
312
		/*
1436 jermar 313
		 * Start TLB shootdown sequence.
314
		 */
315
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
316
 
317
		/*
1403 jermar 318
		 * Remove frames belonging to used space starting from
319
		 * the highest addresses downwards until an overlap with
320
		 * the resized address space area is found. Note that this
321
		 * is also the right way to remove part of the used_space
322
		 * B+tree leaf list.
323
		 */		
324
		for (cond = true; cond;) {
325
			btree_node_t *node;
326
 
327
			ASSERT(!list_empty(&area->used_space.leaf_head));
328
			node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
329
			if ((cond = (bool) node->keys)) {
330
				__address b = node->key[node->keys - 1];
331
				count_t c = (count_t) node->value[node->keys - 1];
332
				int i = 0;
1235 jermar 333
 
1403 jermar 334
				if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
335
 
336
					if (b + c*PAGE_SIZE <= start_free) {
337
						/*
338
						 * The whole interval fits completely
339
						 * in the resized address space area.
340
						 */
341
						break;
342
					}
343
 
344
					/*
345
					 * Part of the interval corresponding to b and c
346
					 * overlaps with the resized address space area.
347
					 */
348
 
349
					cond = false;	/* we are almost done */
350
					i = (start_free - b) >> PAGE_WIDTH;
351
					if (!used_space_remove(area, start_free, c - i))
352
						panic("Could not remove used space.");
353
				} else {
354
					/*
355
					 * The interval of used space can be completely removed.
356
					 */
357
					if (!used_space_remove(area, b, c))
358
						panic("Could not remove used space.\n");
359
				}
360
 
361
				for (; i < c; i++) {
362
					pte_t *pte;
363
 
364
					page_table_lock(as, false);
365
					pte = page_mapping_find(as, b + i*PAGE_SIZE);
366
					ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 367
					if (area->backend && area->backend->frame_free) {
368
						area->backend->frame_free(area,
1409 jermar 369
							b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
370
					}
1403 jermar 371
					page_mapping_remove(as, b + i*PAGE_SIZE);
372
					page_table_unlock(as, false);
373
				}
1235 jermar 374
			}
375
		}
1436 jermar 376
 
1235 jermar 377
		/*
1436 jermar 378
		 * Finish TLB shootdown sequence.
1235 jermar 379
		 */
380
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
381
		tlb_shootdown_finalize();
382
	} else {
383
		/*
384
		 * Growing the area.
385
		 * Check for overlaps with other address space areas.
386
		 */
387
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 388
			mutex_unlock(&area->lock);
389
			mutex_unlock(&as->lock);		
1235 jermar 390
			interrupts_restore(ipl);
1306 jermar 391
			return EADDRNOTAVAIL;
1235 jermar 392
		}
393
	} 
394
 
395
	area->pages = pages;
396
 
1380 jermar 397
	mutex_unlock(&area->lock);
398
	mutex_unlock(&as->lock);
1235 jermar 399
	interrupts_restore(ipl);
400
 
1306 jermar 401
	return 0;
1235 jermar 402
}
403
 
1306 jermar 404
/** Destroy address space area.
405
 *
406
 * @param as Address space.
407
 * @param address Address withing the area to be deleted.
408
 *
409
 * @return Zero on success or a value from @ref errno.h on failure. 
410
 */
411
int as_area_destroy(as_t *as, __address address)
412
{
413
	as_area_t *area;
414
	__address base;
1495 jermar 415
	link_t *cur;
1306 jermar 416
	ipl_t ipl;
417
 
418
	ipl = interrupts_disable();
1380 jermar 419
	mutex_lock(&as->lock);
1306 jermar 420
 
421
	area = find_area_and_lock(as, address);
422
	if (!area) {
1380 jermar 423
		mutex_unlock(&as->lock);
1306 jermar 424
		interrupts_restore(ipl);
425
		return ENOENT;
426
	}
427
 
1403 jermar 428
	base = area->base;
429
 
1411 jermar 430
	/*
1436 jermar 431
	 * Start TLB shootdown sequence.
432
	 */
433
	tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
434
 
435
	/*
1411 jermar 436
	 * Visit only the pages mapped by used_space B+tree.
437
	 */
1495 jermar 438
	for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 439
		btree_node_t *node;
1495 jermar 440
		int i;
1403 jermar 441
 
1495 jermar 442
		node = list_get_instance(cur, btree_node_t, leaf_link);
443
		for (i = 0; i < node->keys; i++) {
444
			__address b = node->key[i];
445
			count_t j;
1411 jermar 446
			pte_t *pte;
1403 jermar 447
 
1495 jermar 448
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 449
				page_table_lock(as, false);
1495 jermar 450
				pte = page_mapping_find(as, b + j*PAGE_SIZE);
1411 jermar 451
				ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 452
				if (area->backend && area->backend->frame_free) {
453
					area->backend->frame_free(area,
1495 jermar 454
						b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 455
				}
1495 jermar 456
				page_mapping_remove(as, b + j*PAGE_SIZE);
1411 jermar 457
				page_table_unlock(as, false);
1306 jermar 458
			}
459
		}
460
	}
1403 jermar 461
 
1306 jermar 462
	/*
1436 jermar 463
	 * Finish TLB shootdown sequence.
1306 jermar 464
	 */
465
	tlb_invalidate_pages(AS->asid, area->base, area->pages);
466
	tlb_shootdown_finalize();
1436 jermar 467
 
468
	btree_destroy(&area->used_space);
1306 jermar 469
 
1309 jermar 470
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 471
 
472
	if (area->sh_info)
473
		sh_info_remove_reference(area->sh_info);
474
 
1380 jermar 475
	mutex_unlock(&area->lock);
1306 jermar 476
 
477
	/*
478
	 * Remove the empty area from address space.
479
	 */
480
	btree_remove(&AS->as_area_btree, base, NULL);
481
 
1309 jermar 482
	free(area);
483
 
1380 jermar 484
	mutex_unlock(&AS->lock);
1306 jermar 485
	interrupts_restore(ipl);
486
	return 0;
487
}
488
 
1413 jermar 489
/** Share address space area with another or the same address space.
1235 jermar 490
 *
1424 jermar 491
 * Address space area mapping is shared with a new address space area.
492
 * If the source address space area has not been shared so far,
493
 * a new sh_info is created. The new address space area simply gets the
494
 * sh_info of the source area. The process of duplicating the
495
 * mapping is done through the backend share function.
1413 jermar 496
 * 
1417 jermar 497
 * @param src_as Pointer to source address space.
1239 jermar 498
 * @param src_base Base address of the source address space area.
1417 jermar 499
 * @param acc_size Expected size of the source area.
1428 palkovsky 500
 * @param dst_as Pointer to destination address space.
1417 jermar 501
 * @param dst_base Target base address.
502
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 503
 *
1306 jermar 504
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 505
 *	   if there is no such address space area,
506
 *	   EPERM if there was a problem in accepting the area or
507
 *	   ENOMEM if there was a problem in allocating destination
1413 jermar 508
 *	   address space area. ENOTSUP is returned if an attempt
509
 *	   to share non-anonymous address space area is detected.
1235 jermar 510
 */
1413 jermar 511
int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
1428 palkovsky 512
		  as_t *dst_as, __address dst_base, int dst_flags_mask)
1235 jermar 513
{
514
	ipl_t ipl;
1239 jermar 515
	int src_flags;
516
	size_t src_size;
517
	as_area_t *src_area, *dst_area;
1413 jermar 518
	share_info_t *sh_info;
1424 jermar 519
	mem_backend_t *src_backend;
520
	mem_backend_data_t src_backend_data;
1434 palkovsky 521
 
1235 jermar 522
	ipl = interrupts_disable();
1380 jermar 523
	mutex_lock(&src_as->lock);
1329 palkovsky 524
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 525
	if (!src_area) {
1238 jermar 526
		/*
527
		 * Could not find the source address space area.
528
		 */
1380 jermar 529
		mutex_unlock(&src_as->lock);
1238 jermar 530
		interrupts_restore(ipl);
531
		return ENOENT;
532
	}
1413 jermar 533
 
1424 jermar 534
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 535
		/*
1424 jermar 536
		 * There is now backend or the backend does not
537
		 * know how to share the area.
1413 jermar 538
		 */
539
		mutex_unlock(&src_area->lock);
540
		mutex_unlock(&src_as->lock);
541
		interrupts_restore(ipl);
542
		return ENOTSUP;
543
	}
544
 
1239 jermar 545
	src_size = src_area->pages * PAGE_SIZE;
546
	src_flags = src_area->flags;
1424 jermar 547
	src_backend = src_area->backend;
548
	src_backend_data = src_area->backend_data;
1544 palkovsky 549
 
550
	/* Share the cacheable flag from the original mapping */
551
	if (src_flags & AS_AREA_CACHEABLE)
552
		dst_flags_mask |= AS_AREA_CACHEABLE;
553
 
1461 palkovsky 554
	if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 555
		mutex_unlock(&src_area->lock);
556
		mutex_unlock(&src_as->lock);
1235 jermar 557
		interrupts_restore(ipl);
558
		return EPERM;
559
	}
1413 jermar 560
 
1235 jermar 561
	/*
1413 jermar 562
	 * Now we are committed to sharing the area.
563
	 * First prepare the area for sharing.
564
	 * Then it will be safe to unlock it.
565
	 */
566
	sh_info = src_area->sh_info;
567
	if (!sh_info) {
568
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
569
		mutex_initialize(&sh_info->lock);
570
		sh_info->refcount = 2;
571
		btree_create(&sh_info->pagemap);
572
		src_area->sh_info = sh_info;
573
	} else {
574
		mutex_lock(&sh_info->lock);
575
		sh_info->refcount++;
576
		mutex_unlock(&sh_info->lock);
577
	}
578
 
1424 jermar 579
	src_area->backend->share(src_area);
1413 jermar 580
 
581
	mutex_unlock(&src_area->lock);
582
	mutex_unlock(&src_as->lock);
583
 
584
	/*
1239 jermar 585
	 * Create copy of the source address space area.
586
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
587
	 * attribute set which prevents race condition with
588
	 * preliminary as_page_fault() calls.
1417 jermar 589
	 * The flags of the source area are masked against dst_flags_mask
590
	 * to support sharing in less privileged mode.
1235 jermar 591
	 */
1461 palkovsky 592
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
1424 jermar 593
				  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 594
	if (!dst_area) {
1235 jermar 595
		/*
596
		 * Destination address space area could not be created.
597
		 */
1413 jermar 598
		sh_info_remove_reference(sh_info);
599
 
1235 jermar 600
		interrupts_restore(ipl);
601
		return ENOMEM;
602
	}
603
 
604
	/*
1239 jermar 605
	 * Now the destination address space area has been
606
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 607
	 * attribute and set the sh_info.
1239 jermar 608
	 */	
1380 jermar 609
	mutex_lock(&dst_area->lock);
1239 jermar 610
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 611
	dst_area->sh_info = sh_info;
1380 jermar 612
	mutex_unlock(&dst_area->lock);
1235 jermar 613
 
614
	interrupts_restore(ipl);
615
 
616
	return 0;
617
}
618
 
1423 jermar 619
/** Check access mode for address space area.
620
 *
621
 * The address space area must be locked prior to this call.
622
 *
623
 * @param area Address space area.
624
 * @param access Access mode.
625
 *
626
 * @return False if access violates area's permissions, true otherwise.
627
 */
628
bool as_area_check_access(as_area_t *area, pf_access_t access)
629
{
630
	int flagmap[] = {
631
		[PF_ACCESS_READ] = AS_AREA_READ,
632
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
633
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
634
	};
635
 
636
	if (!(area->flags & flagmap[access]))
637
		return false;
638
 
639
	return true;
640
}
641
 
703 jermar 642
/** Handle page fault within the current address space.
643
 *
1409 jermar 644
 * This is the high-level page fault handler. It decides
645
 * whether the page fault can be resolved by any backend
646
 * and if so, it invokes the backend to resolve the page
647
 * fault.
648
 *
703 jermar 649
 * Interrupts are assumed disabled.
650
 *
651
 * @param page Faulting page.
1411 jermar 652
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 653
 * @param istate Pointer to interrupted state.
703 jermar 654
 *
1409 jermar 655
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
656
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 657
 */
1411 jermar 658
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
703 jermar 659
{
1044 jermar 660
	pte_t *pte;
977 jermar 661
	as_area_t *area;
703 jermar 662
 
1380 jermar 663
	if (!THREAD)
1409 jermar 664
		return AS_PF_FAULT;
1380 jermar 665
 
703 jermar 666
	ASSERT(AS);
1044 jermar 667
 
1380 jermar 668
	mutex_lock(&AS->lock);
977 jermar 669
	area = find_area_and_lock(AS, page);	
703 jermar 670
	if (!area) {
671
		/*
672
		 * No area contained mapping for 'page'.
673
		 * Signal page fault to low-level handler.
674
		 */
1380 jermar 675
		mutex_unlock(&AS->lock);
1288 jermar 676
		goto page_fault;
703 jermar 677
	}
678
 
1239 jermar 679
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
680
		/*
681
		 * The address space area is not fully initialized.
682
		 * Avoid possible race by returning error.
683
		 */
1380 jermar 684
		mutex_unlock(&area->lock);
685
		mutex_unlock(&AS->lock);
1288 jermar 686
		goto page_fault;		
1239 jermar 687
	}
688
 
1424 jermar 689
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 690
		/*
691
		 * The address space area is not backed by any backend
692
		 * or the backend cannot handle page faults.
693
		 */
694
		mutex_unlock(&area->lock);
695
		mutex_unlock(&AS->lock);
696
		goto page_fault;		
697
	}
1179 jermar 698
 
1044 jermar 699
	page_table_lock(AS, false);
700
 
703 jermar 701
	/*
1044 jermar 702
	 * To avoid race condition between two page faults
703
	 * on the same address, we need to make sure
704
	 * the mapping has not been already inserted.
705
	 */
706
	if ((pte = page_mapping_find(AS, page))) {
707
		if (PTE_PRESENT(pte)) {
1423 jermar 708
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
709
				(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
710
				(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
711
				page_table_unlock(AS, false);
712
				mutex_unlock(&area->lock);
713
				mutex_unlock(&AS->lock);
714
				return AS_PF_OK;
715
			}
1044 jermar 716
		}
717
	}
1409 jermar 718
 
1044 jermar 719
	/*
1409 jermar 720
	 * Resort to the backend page fault handler.
703 jermar 721
	 */
1424 jermar 722
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 723
		page_table_unlock(AS, false);
724
		mutex_unlock(&area->lock);
725
		mutex_unlock(&AS->lock);
726
		goto page_fault;
727
	}
703 jermar 728
 
1044 jermar 729
	page_table_unlock(AS, false);
1380 jermar 730
	mutex_unlock(&area->lock);
731
	mutex_unlock(&AS->lock);
1288 jermar 732
	return AS_PF_OK;
733
 
734
page_fault:
735
	if (THREAD->in_copy_from_uspace) {
736
		THREAD->in_copy_from_uspace = false;
737
		istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
738
	} else if (THREAD->in_copy_to_uspace) {
739
		THREAD->in_copy_to_uspace = false;
740
		istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
741
	} else {
742
		return AS_PF_FAULT;
743
	}
744
 
745
	return AS_PF_DEFER;
703 jermar 746
}
747
 
823 jermar 748
/** Switch address spaces.
703 jermar 749
 *
1380 jermar 750
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 751
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 752
 *
823 jermar 753
 * @param old Old address space or NULL.
754
 * @param new New address space.
703 jermar 755
 */
823 jermar 756
void as_switch(as_t *old, as_t *new)
703 jermar 757
{
758
	ipl_t ipl;
823 jermar 759
	bool needs_asid = false;
703 jermar 760
 
761
	ipl = interrupts_disable();
1415 jermar 762
	spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 763
 
764
	/*
823 jermar 765
	 * First, take care of the old address space.
766
	 */	
767
	if (old) {
1380 jermar 768
		mutex_lock_active(&old->lock);
1415 jermar 769
		ASSERT(old->cpu_refcount);
770
		if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 771
			/*
772
			 * The old address space is no longer active on
773
			 * any processor. It can be appended to the
774
			 * list of inactive address spaces with assigned
775
			 * ASID.
776
			 */
777
			 ASSERT(old->asid != ASID_INVALID);
778
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
779
		}
1380 jermar 780
		mutex_unlock(&old->lock);
823 jermar 781
	}
782
 
783
	/*
784
	 * Second, prepare the new address space.
785
	 */
1380 jermar 786
	mutex_lock_active(&new->lock);
1415 jermar 787
	if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
823 jermar 788
		if (new->asid != ASID_INVALID)
789
			list_remove(&new->inactive_as_with_asid_link);
790
		else
791
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
792
	}
793
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 794
	mutex_unlock(&new->lock);
823 jermar 795
 
796
	if (needs_asid) {
797
		/*
798
		 * Allocation of new ASID was deferred
799
		 * until now in order to avoid deadlock.
800
		 */
801
		asid_t asid;
802
 
803
		asid = asid_get();
1380 jermar 804
		mutex_lock_active(&new->lock);
823 jermar 805
		new->asid = asid;
1380 jermar 806
		mutex_unlock(&new->lock);
823 jermar 807
	}
1415 jermar 808
	spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 809
	interrupts_restore(ipl);
810
 
811
	/*
703 jermar 812
	 * Perform architecture-specific steps.
727 jermar 813
	 * (e.g. write ASID to hardware register etc.)
703 jermar 814
	 */
823 jermar 815
	as_install_arch(new);
703 jermar 816
 
823 jermar 817
	AS = new;
703 jermar 818
}
754 jermar 819
 
1235 jermar 820
/** Convert address space area flags to page flags.
754 jermar 821
 *
1235 jermar 822
 * @param aflags Flags of some address space area.
754 jermar 823
 *
1235 jermar 824
 * @return Flags to be passed to page_mapping_insert().
754 jermar 825
 */
1235 jermar 826
int area_flags_to_page_flags(int aflags)
754 jermar 827
{
828
	int flags;
829
 
1178 jermar 830
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 831
 
1235 jermar 832
	if (aflags & AS_AREA_READ)
1026 jermar 833
		flags |= PAGE_READ;
834
 
1235 jermar 835
	if (aflags & AS_AREA_WRITE)
1026 jermar 836
		flags |= PAGE_WRITE;
837
 
1235 jermar 838
	if (aflags & AS_AREA_EXEC)
1026 jermar 839
		flags |= PAGE_EXEC;
840
 
1424 jermar 841
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 842
		flags |= PAGE_CACHEABLE;
843
 
754 jermar 844
	return flags;
845
}
756 jermar 846
 
1235 jermar 847
/** Compute flags for virtual address translation subsytem.
848
 *
849
 * The address space area must be locked.
850
 * Interrupts must be disabled.
851
 *
852
 * @param a Address space area.
853
 *
854
 * @return Flags to be used in page_mapping_insert().
855
 */
1409 jermar 856
int as_area_get_flags(as_area_t *a)
1235 jermar 857
{
858
	return area_flags_to_page_flags(a->flags);
859
}
860
 
756 jermar 861
/** Create page table.
862
 *
863
 * Depending on architecture, create either address space
864
 * private or global page table.
865
 *
866
 * @param flags Flags saying whether the page table is for kernel address space.
867
 *
868
 * @return First entry of the page table.
869
 */
870
pte_t *page_table_create(int flags)
871
{
872
        ASSERT(as_operations);
873
        ASSERT(as_operations->page_table_create);
874
 
875
        return as_operations->page_table_create(flags);
876
}
977 jermar 877
 
1468 jermar 878
/** Destroy page table.
879
 *
880
 * Destroy page table in architecture specific way.
881
 *
882
 * @param page_table Physical address of PTL0.
883
 */
884
void page_table_destroy(pte_t *page_table)
885
{
886
        ASSERT(as_operations);
887
        ASSERT(as_operations->page_table_destroy);
888
 
889
        as_operations->page_table_destroy(page_table);
890
}
891
 
1044 jermar 892
/** Lock page table.
893
 *
894
 * This function should be called before any page_mapping_insert(),
895
 * page_mapping_remove() and page_mapping_find().
896
 * 
897
 * Locking order is such that address space areas must be locked
898
 * prior to this call. Address space can be locked prior to this
899
 * call in which case the lock argument is false.
900
 *
901
 * @param as Address space.
1248 jermar 902
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 903
 */
904
void page_table_lock(as_t *as, bool lock)
905
{
906
	ASSERT(as_operations);
907
	ASSERT(as_operations->page_table_lock);
908
 
909
	as_operations->page_table_lock(as, lock);
910
}
911
 
912
/** Unlock page table.
913
 *
914
 * @param as Address space.
1248 jermar 915
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 916
 */
917
void page_table_unlock(as_t *as, bool unlock)
918
{
919
	ASSERT(as_operations);
920
	ASSERT(as_operations->page_table_unlock);
921
 
922
	as_operations->page_table_unlock(as, unlock);
923
}
924
 
977 jermar 925
 
926
/** Find address space area and lock it.
927
 *
928
 * The address space must be locked and interrupts must be disabled.
929
 *
930
 * @param as Address space.
931
 * @param va Virtual address.
932
 *
933
 * @return Locked address space area containing va on success or NULL on failure.
934
 */
935
as_area_t *find_area_and_lock(as_t *as, __address va)
936
{
937
	as_area_t *a;
1147 jermar 938
	btree_node_t *leaf, *lnode;
939
	int i;
977 jermar 940
 
1147 jermar 941
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
942
	if (a) {
943
		/* va is the base address of an address space area */
1380 jermar 944
		mutex_lock(&a->lock);
1147 jermar 945
		return a;
946
	}
947
 
948
	/*
1150 jermar 949
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 950
	 * to find out whether this is a miss or va belongs to an address
951
	 * space area found there.
952
	 */
953
 
954
	/* First, search the leaf node itself. */
955
	for (i = 0; i < leaf->keys; i++) {
956
		a = (as_area_t *) leaf->value[i];
1380 jermar 957
		mutex_lock(&a->lock);
1147 jermar 958
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
959
			return a;
960
		}
1380 jermar 961
		mutex_unlock(&a->lock);
1147 jermar 962
	}
977 jermar 963
 
1147 jermar 964
	/*
1150 jermar 965
	 * Second, locate the left neighbour and test its last record.
1148 jermar 966
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 967
	 */
1150 jermar 968
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 969
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 970
		mutex_lock(&a->lock);
1147 jermar 971
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 972
			return a;
1147 jermar 973
		}
1380 jermar 974
		mutex_unlock(&a->lock);
977 jermar 975
	}
976
 
977
	return NULL;
978
}
1048 jermar 979
 
980
/** Check area conflicts with other areas.
981
 *
982
 * The address space must be locked and interrupts must be disabled.
983
 *
984
 * @param as Address space.
985
 * @param va Starting virtual address of the area being tested.
986
 * @param size Size of the area being tested.
987
 * @param avoid_area Do not touch this area. 
988
 *
989
 * @return True if there is no conflict, false otherwise.
990
 */
991
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
992
{
993
	as_area_t *a;
1147 jermar 994
	btree_node_t *leaf, *node;
995
	int i;
1048 jermar 996
 
1070 jermar 997
	/*
998
	 * We don't want any area to have conflicts with NULL page.
999
	 */
1000
	if (overlaps(va, size, NULL, PAGE_SIZE))
1001
		return false;
1002
 
1147 jermar 1003
	/*
1004
	 * The leaf node is found in O(log n), where n is proportional to
1005
	 * the number of address space areas belonging to as.
1006
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1007
	 * record in the left neighbour, the leftmost record in the right
1008
	 * neighbour and all records in the leaf node itself.
1147 jermar 1009
	 */
1048 jermar 1010
 
1147 jermar 1011
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1012
		if (a != avoid_area)
1013
			return false;
1014
	}
1015
 
1016
	/* First, check the two border cases. */
1150 jermar 1017
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1018
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1019
		mutex_lock(&a->lock);
1147 jermar 1020
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1021
			mutex_unlock(&a->lock);
1147 jermar 1022
			return false;
1023
		}
1380 jermar 1024
		mutex_unlock(&a->lock);
1147 jermar 1025
	}
1150 jermar 1026
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1027
		a = (as_area_t *) node->value[0];
1380 jermar 1028
		mutex_lock(&a->lock);
1147 jermar 1029
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1030
			mutex_unlock(&a->lock);
1147 jermar 1031
			return false;
1032
		}
1380 jermar 1033
		mutex_unlock(&a->lock);
1147 jermar 1034
	}
1035
 
1036
	/* Second, check the leaf node. */
1037
	for (i = 0; i < leaf->keys; i++) {
1038
		a = (as_area_t *) leaf->value[i];
1039
 
1048 jermar 1040
		if (a == avoid_area)
1041
			continue;
1147 jermar 1042
 
1380 jermar 1043
		mutex_lock(&a->lock);
1147 jermar 1044
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1045
			mutex_unlock(&a->lock);
1147 jermar 1046
			return false;
1047
		}
1380 jermar 1048
		mutex_unlock(&a->lock);
1048 jermar 1049
	}
1050
 
1070 jermar 1051
	/*
1052
	 * So far, the area does not conflict with other areas.
1053
	 * Check if it doesn't conflict with kernel address space.
1054
	 */	 
1055
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1056
		return !overlaps(va, size, 
1057
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1058
	}
1059
 
1048 jermar 1060
	return true;
1061
}
1235 jermar 1062
 
1380 jermar 1063
/** Return size of the address space area with given base.  */
1329 palkovsky 1064
size_t as_get_size(__address base)
1065
{
1066
	ipl_t ipl;
1067
	as_area_t *src_area;
1068
	size_t size;
1069
 
1070
	ipl = interrupts_disable();
1071
	src_area = find_area_and_lock(AS, base);
1072
	if (src_area){
1073
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1074
		mutex_unlock(&src_area->lock);
1329 palkovsky 1075
	} else {
1076
		size = 0;
1077
	}
1078
	interrupts_restore(ipl);
1079
	return size;
1080
}
1081
 
1387 jermar 1082
/** Mark portion of address space area as used.
1083
 *
1084
 * The address space area must be already locked.
1085
 *
1086
 * @param a Address space area.
1087
 * @param page First page to be marked.
1088
 * @param count Number of page to be marked.
1089
 *
1090
 * @return 0 on failure and 1 on success.
1091
 */
1092
int used_space_insert(as_area_t *a, __address page, count_t count)
1093
{
1094
	btree_node_t *leaf, *node;
1095
	count_t pages;
1096
	int i;
1097
 
1098
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1099
	ASSERT(count);
1100
 
1101
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1102
	if (pages) {
1103
		/*
1104
		 * We hit the beginning of some used space.
1105
		 */
1106
		return 0;
1107
	}
1108
 
1437 jermar 1109
	if (!leaf->keys) {
1110
		btree_insert(&a->used_space, page, (void *) count, leaf);
1111
		return 1;
1112
	}
1113
 
1387 jermar 1114
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1115
	if (node) {
1116
		__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1117
		count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1118
 
1119
		/*
1120
		 * Examine the possibility that the interval fits
1121
		 * somewhere between the rightmost interval of
1122
		 * the left neigbour and the first interval of the leaf.
1123
		 */
1124
 
1125
		if (page >= right_pg) {
1126
			/* Do nothing. */
1127
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1128
			/* The interval intersects with the left interval. */
1129
			return 0;
1130
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1131
			/* The interval intersects with the right interval. */
1132
			return 0;			
1133
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1134
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1135
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1136
			btree_remove(&a->used_space, right_pg, leaf);
1137
			return 1; 
1138
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1139
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1140
			node->value[node->keys - 1] += count;
1387 jermar 1141
			return 1;
1142
		} else if (page + count*PAGE_SIZE == right_pg) {
1143
			/*
1144
			 * The interval can be addded by simply moving base of the right
1145
			 * interval down and increasing its size accordingly.
1146
			 */
1403 jermar 1147
			leaf->value[0] += count;
1387 jermar 1148
			leaf->key[0] = page;
1149
			return 1;
1150
		} else {
1151
			/*
1152
			 * The interval is between both neigbouring intervals,
1153
			 * but cannot be merged with any of them.
1154
			 */
1155
			btree_insert(&a->used_space, page, (void *) count, leaf);
1156
			return 1;
1157
		}
1158
	} else if (page < leaf->key[0]) {
1159
		__address right_pg = leaf->key[0];
1160
		count_t right_cnt = (count_t) leaf->value[0];
1161
 
1162
		/*
1163
		 * Investigate the border case in which the left neighbour does not
1164
		 * exist but the interval fits from the left.
1165
		 */
1166
 
1167
		if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1168
			/* The interval intersects with the right interval. */
1169
			return 0;
1170
		} else if (page + count*PAGE_SIZE == right_pg) {
1171
			/*
1172
			 * The interval can be added by moving the base of the right interval down
1173
			 * and increasing its size accordingly.
1174
			 */
1175
			leaf->key[0] = page;
1403 jermar 1176
			leaf->value[0] += count;
1387 jermar 1177
			return 1;
1178
		} else {
1179
			/*
1180
			 * The interval doesn't adjoin with the right interval.
1181
			 * It must be added individually.
1182
			 */
1183
			btree_insert(&a->used_space, page, (void *) count, leaf);
1184
			return 1;
1185
		}
1186
	}
1187
 
1188
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1189
	if (node) {
1190
		__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1191
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1192
 
1193
		/*
1194
		 * Examine the possibility that the interval fits
1195
		 * somewhere between the leftmost interval of
1196
		 * the right neigbour and the last interval of the leaf.
1197
		 */
1198
 
1199
		if (page < left_pg) {
1200
			/* Do nothing. */
1201
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1202
			/* The interval intersects with the left interval. */
1203
			return 0;
1204
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1205
			/* The interval intersects with the right interval. */
1206
			return 0;			
1207
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1208
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1209
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1210
			btree_remove(&a->used_space, right_pg, node);
1211
			return 1; 
1212
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1213
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1214
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1215
			return 1;
1216
		} else if (page + count*PAGE_SIZE == right_pg) {
1217
			/*
1218
			 * The interval can be addded by simply moving base of the right
1219
			 * interval down and increasing its size accordingly.
1220
			 */
1403 jermar 1221
			node->value[0] += count;
1387 jermar 1222
			node->key[0] = page;
1223
			return 1;
1224
		} else {
1225
			/*
1226
			 * The interval is between both neigbouring intervals,
1227
			 * but cannot be merged with any of them.
1228
			 */
1229
			btree_insert(&a->used_space, page, (void *) count, leaf);
1230
			return 1;
1231
		}
1232
	} else if (page >= leaf->key[leaf->keys - 1]) {
1233
		__address left_pg = leaf->key[leaf->keys - 1];
1234
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1235
 
1236
		/*
1237
		 * Investigate the border case in which the right neighbour does not
1238
		 * exist but the interval fits from the right.
1239
		 */
1240
 
1241
		if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1242
			/* The interval intersects with the left interval. */
1387 jermar 1243
			return 0;
1244
		} else if (left_pg + left_cnt*PAGE_SIZE == page) {
1245
			/* The interval can be added by growing the left interval. */
1403 jermar 1246
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1247
			return 1;
1248
		} else {
1249
			/*
1250
			 * The interval doesn't adjoin with the left interval.
1251
			 * It must be added individually.
1252
			 */
1253
			btree_insert(&a->used_space, page, (void *) count, leaf);
1254
			return 1;
1255
		}
1256
	}
1257
 
1258
	/*
1259
	 * Note that if the algorithm made it thus far, the interval can fit only
1260
	 * between two other intervals of the leaf. The two border cases were already
1261
	 * resolved.
1262
	 */
1263
	for (i = 1; i < leaf->keys; i++) {
1264
		if (page < leaf->key[i]) {
1265
			__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1266
			count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1267
 
1268
			/*
1269
			 * The interval fits between left_pg and right_pg.
1270
			 */
1271
 
1272
			if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1273
				/* The interval intersects with the left interval. */
1274
				return 0;
1275
			} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1276
				/* The interval intersects with the right interval. */
1277
				return 0;			
1278
			} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1279
				/* The interval can be added by merging the two already present intervals. */
1403 jermar 1280
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1281
				btree_remove(&a->used_space, right_pg, leaf);
1282
				return 1; 
1283
			} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1284
				/* The interval can be added by simply growing the left interval. */
1403 jermar 1285
				leaf->value[i - 1] += count;
1387 jermar 1286
				return 1;
1287
			} else if (page + count*PAGE_SIZE == right_pg) {
1288
				/*
1289
			         * The interval can be addded by simply moving base of the right
1290
			 	 * interval down and increasing its size accordingly.
1291
			 	 */
1403 jermar 1292
				leaf->value[i] += count;
1387 jermar 1293
				leaf->key[i] = page;
1294
				return 1;
1295
			} else {
1296
				/*
1297
				 * The interval is between both neigbouring intervals,
1298
				 * but cannot be merged with any of them.
1299
				 */
1300
				btree_insert(&a->used_space, page, (void *) count, leaf);
1301
				return 1;
1302
			}
1303
		}
1304
	}
1305
 
1306
	panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1307
}
1308
 
1309
/** Mark portion of address space area as unused.
1310
 *
1311
 * The address space area must be already locked.
1312
 *
1313
 * @param a Address space area.
1314
 * @param page First page to be marked.
1315
 * @param count Number of page to be marked.
1316
 *
1317
 * @return 0 on failure and 1 on success.
1318
 */
1319
int used_space_remove(as_area_t *a, __address page, count_t count)
1320
{
1321
	btree_node_t *leaf, *node;
1322
	count_t pages;
1323
	int i;
1324
 
1325
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1326
	ASSERT(count);
1327
 
1328
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1329
	if (pages) {
1330
		/*
1331
		 * We are lucky, page is the beginning of some interval.
1332
		 */
1333
		if (count > pages) {
1334
			return 0;
1335
		} else if (count == pages) {
1336
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1337
			return 1;
1387 jermar 1338
		} else {
1339
			/*
1340
			 * Find the respective interval.
1341
			 * Decrease its size and relocate its start address.
1342
			 */
1343
			for (i = 0; i < leaf->keys; i++) {
1344
				if (leaf->key[i] == page) {
1345
					leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1346
					leaf->value[i] -= count;
1387 jermar 1347
					return 1;
1348
				}
1349
			}
1350
			goto error;
1351
		}
1352
	}
1353
 
1354
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1355
	if (node && page < leaf->key[0]) {
1356
		__address left_pg = node->key[node->keys - 1];
1357
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1358
 
1359
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1360
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1361
				/*
1362
				 * The interval is contained in the rightmost interval
1363
				 * of the left neighbour and can be removed by
1364
				 * updating the size of the bigger interval.
1365
				 */
1403 jermar 1366
				node->value[node->keys - 1] -= count;
1387 jermar 1367
				return 1;
1368
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1369
				count_t new_cnt;
1387 jermar 1370
 
1371
				/*
1372
				 * The interval is contained in the rightmost interval
1373
				 * of the left neighbour but its removal requires
1374
				 * both updating the size of the original interval and
1375
				 * also inserting a new interval.
1376
				 */
1403 jermar 1377
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1378
				node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1379
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1380
				return 1;
1381
			}
1382
		}
1383
		return 0;
1384
	} else if (page < leaf->key[0]) {
1385
		return 0;
1386
	}
1387
 
1388
	if (page > leaf->key[leaf->keys - 1]) {
1389
		__address left_pg = leaf->key[leaf->keys - 1];
1390
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1391
 
1392
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1393
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1394
				/*
1395
				 * The interval is contained in the rightmost interval
1396
				 * of the leaf and can be removed by updating the size
1397
				 * of the bigger interval.
1398
				 */
1403 jermar 1399
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1400
				return 1;
1401
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1402
				count_t new_cnt;
1387 jermar 1403
 
1404
				/*
1405
				 * The interval is contained in the rightmost interval
1406
				 * of the leaf but its removal requires both updating
1407
				 * the size of the original interval and
1408
				 * also inserting a new interval.
1409
				 */
1403 jermar 1410
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1411
				leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1412
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1413
				return 1;
1414
			}
1415
		}
1416
		return 0;
1417
	}	
1418
 
1419
	/*
1420
	 * The border cases have been already resolved.
1421
	 * Now the interval can be only between intervals of the leaf. 
1422
	 */
1423
	for (i = 1; i < leaf->keys - 1; i++) {
1424
		if (page < leaf->key[i]) {
1425
			__address left_pg = leaf->key[i - 1];
1426
			count_t left_cnt = (count_t) leaf->value[i - 1];
1427
 
1428
			/*
1429
			 * Now the interval is between intervals corresponding to (i - 1) and i.
1430
			 */
1431
			if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1432
				if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1433
					/*
1434
				 	* The interval is contained in the interval (i - 1)
1435
					 * of the leaf and can be removed by updating the size
1436
					 * of the bigger interval.
1437
					 */
1403 jermar 1438
					leaf->value[i - 1] -= count;
1387 jermar 1439
					return 1;
1440
				} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1441
					count_t new_cnt;
1387 jermar 1442
 
1443
					/*
1444
					 * The interval is contained in the interval (i - 1)
1445
					 * of the leaf but its removal requires both updating
1446
					 * the size of the original interval and
1447
					 * also inserting a new interval.
1448
					 */
1403 jermar 1449
					new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1450
					leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1451
					btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1452
					return 1;
1453
				}
1454
			}
1455
			return 0;
1456
		}
1457
	}
1458
 
1459
error:
1460
	panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1461
}
1462
 
1409 jermar 1463
/** Remove reference to address space area share info.
1464
 *
1465
 * If the reference count drops to 0, the sh_info is deallocated.
1466
 *
1467
 * @param sh_info Pointer to address space area share info.
1468
 */
1469
void sh_info_remove_reference(share_info_t *sh_info)
1470
{
1471
	bool dealloc = false;
1472
 
1473
	mutex_lock(&sh_info->lock);
1474
	ASSERT(sh_info->refcount);
1475
	if (--sh_info->refcount == 0) {
1476
		dealloc = true;
1495 jermar 1477
		link_t *cur;
1409 jermar 1478
 
1479
		/*
1480
		 * Now walk carefully the pagemap B+tree and free/remove
1481
		 * reference from all frames found there.
1482
		 */
1495 jermar 1483
		for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1484
			btree_node_t *node;
1495 jermar 1485
			int i;
1409 jermar 1486
 
1495 jermar 1487
			node = list_get_instance(cur, btree_node_t, leaf_link);
1488
			for (i = 0; i < node->keys; i++) 
1489
				frame_free(ADDR2PFN((__address) node->value[i]));
1409 jermar 1490
		}
1491
 
1492
	}
1493
	mutex_unlock(&sh_info->lock);
1494
 
1495
	if (dealloc) {
1496
		btree_destroy(&sh_info->pagemap);
1497
		free(sh_info);
1498
	}
1499
}
1500
 
1235 jermar 1501
/*
1502
 * Address space related syscalls.
1503
 */
1504
 
1505
/** Wrapper for as_area_create(). */
1506
__native sys_as_area_create(__address address, size_t size, int flags)
1507
{
1424 jermar 1508
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1235 jermar 1509
		return (__native) address;
1510
	else
1511
		return (__native) -1;
1512
}
1513
 
1514
/** Wrapper for as_area_resize. */
1515
__native sys_as_area_resize(__address address, size_t size, int flags)
1516
{
1306 jermar 1517
	return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 1518
}
1519
 
1306 jermar 1520
/** Wrapper for as_area_destroy. */
1521
__native sys_as_area_destroy(__address address)
1522
{
1523
	return (__native) as_area_destroy(AS, address);
1524
}