Subversion Repositories HelenOS

Rev

Rev 1594 | Rev 1705 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1702 cejka 29
 /** @defgroup mm Memory management
30
  * @ingroup kernel
31
  * @{
32
  * @}
33
  */
34
 
35
 /** @addtogroup genericmm generic
36
  * @ingroup mm
37
 * @{
38
 */
39
 
1248 jermar 40
/**
1702 cejka 41
 * @file
1248 jermar 42
 * @brief	Address space related functions.
43
 *
703 jermar 44
 * This file contains address space manipulation functions.
45
 * Roughly speaking, this is a higher-level client of
46
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 47
 *
48
 * Functionality provided by this file allows one to
49
 * create address space and create, resize and share
50
 * address space areas.
51
 *
52
 * @see page.c
53
 *
703 jermar 54
 */
55
 
56
#include <mm/as.h>
756 jermar 57
#include <arch/mm/as.h>
703 jermar 58
#include <mm/page.h>
59
#include <mm/frame.h>
814 palkovsky 60
#include <mm/slab.h>
703 jermar 61
#include <mm/tlb.h>
62
#include <arch/mm/page.h>
63
#include <genarch/mm/page_pt.h>
1108 jermar 64
#include <genarch/mm/page_ht.h>
727 jermar 65
#include <mm/asid.h>
703 jermar 66
#include <arch/mm/asid.h>
67
#include <synch/spinlock.h>
1380 jermar 68
#include <synch/mutex.h>
788 jermar 69
#include <adt/list.h>
1147 jermar 70
#include <adt/btree.h>
1235 jermar 71
#include <proc/task.h>
1288 jermar 72
#include <proc/thread.h>
1235 jermar 73
#include <arch/asm.h>
703 jermar 74
#include <panic.h>
75
#include <debug.h>
1235 jermar 76
#include <print.h>
703 jermar 77
#include <memstr.h>
1070 jermar 78
#include <macros.h>
703 jermar 79
#include <arch.h>
1235 jermar 80
#include <errno.h>
81
#include <config.h>
1387 jermar 82
#include <align.h>
1235 jermar 83
#include <arch/types.h>
84
#include <typedefs.h>
1288 jermar 85
#include <syscall/copy.h>
86
#include <arch/interrupt.h>
703 jermar 87
 
756 jermar 88
as_operations_t *as_operations = NULL;
703 jermar 89
 
1415 jermar 90
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
91
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 92
 
93
/**
94
 * This list contains address spaces that are not active on any
95
 * processor and that have valid ASID.
96
 */
97
LIST_INITIALIZE(inactive_as_with_asid_head);
98
 
757 jermar 99
/** Kernel address space. */
100
as_t *AS_KERNEL = NULL;
101
 
1235 jermar 102
static int area_flags_to_page_flags(int aflags);
977 jermar 103
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 104
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
1409 jermar 105
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 106
 
756 jermar 107
/** Initialize address space subsystem. */
108
void as_init(void)
109
{
110
	as_arch_init();
789 palkovsky 111
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 112
	if (!AS_KERNEL)
113
		panic("can't create kernel address space\n");
114
 
756 jermar 115
}
116
 
757 jermar 117
/** Create address space.
118
 *
119
 * @param flags Flags that influence way in wich the address space is created.
120
 */
756 jermar 121
as_t *as_create(int flags)
703 jermar 122
{
123
	as_t *as;
124
 
822 palkovsky 125
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 126
	link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 127
	mutex_initialize(&as->lock);
1147 jermar 128
	btree_create(&as->as_area_btree);
822 palkovsky 129
 
130
	if (flags & FLAG_AS_KERNEL)
131
		as->asid = ASID_KERNEL;
132
	else
133
		as->asid = ASID_INVALID;
134
 
1468 jermar 135
	as->refcount = 0;
1415 jermar 136
	as->cpu_refcount = 0;
822 palkovsky 137
	as->page_table = page_table_create(flags);
703 jermar 138
 
139
	return as;
140
}
141
 
1468 jermar 142
/** Destroy adress space.
143
 *
144
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
145
 * the address space can be destroyed.
146
 */
147
void as_destroy(as_t *as)
973 palkovsky 148
{
1468 jermar 149
	ipl_t ipl;
1594 jermar 150
	bool cond;
973 palkovsky 151
 
1468 jermar 152
	ASSERT(as->refcount == 0);
153
 
154
	/*
155
	 * Since there is no reference to this area,
156
	 * it is safe not to lock its mutex.
157
	 */
158
	ipl = interrupts_disable();
159
	spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 160
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 161
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 162
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 163
		asid_put(as->asid);
164
	}
165
	spinlock_unlock(&inactive_as_with_asid_lock);
166
 
167
	/*
168
	 * Destroy address space areas of the address space.
1594 jermar 169
	 * The B+tee must be walked carefully because it is
170
	 * also being destroyed.
1468 jermar 171
	 */	
1594 jermar 172
	for (cond = true; cond; ) {
1468 jermar 173
		btree_node_t *node;
1594 jermar 174
 
175
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
176
		node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
177
 
178
		if ((cond = node->keys)) {
179
			as_area_destroy(as, node->key[0]);
180
		}
1468 jermar 181
	}
1495 jermar 182
 
1483 jermar 183
	btree_destroy(&as->as_area_btree);
1468 jermar 184
	page_table_destroy(as->page_table);
185
 
186
	interrupts_restore(ipl);
187
 
973 palkovsky 188
	free(as);
189
}
190
 
703 jermar 191
/** Create address space area of common attributes.
192
 *
193
 * The created address space area is added to the target address space.
194
 *
195
 * @param as Target address space.
1239 jermar 196
 * @param flags Flags of the area memory.
1048 jermar 197
 * @param size Size of area.
703 jermar 198
 * @param base Base address of area.
1239 jermar 199
 * @param attrs Attributes of the area.
1409 jermar 200
 * @param backend Address space area backend. NULL if no backend is used.
201
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 202
 *
203
 * @return Address space area on success or NULL on failure.
204
 */
1409 jermar 205
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
1424 jermar 206
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 207
{
208
	ipl_t ipl;
209
	as_area_t *a;
210
 
211
	if (base % PAGE_SIZE)
1048 jermar 212
		return NULL;
213
 
1233 jermar 214
	if (!size)
215
		return NULL;
216
 
1048 jermar 217
	/* Writeable executable areas are not supported. */
218
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
219
		return NULL;
703 jermar 220
 
221
	ipl = interrupts_disable();
1380 jermar 222
	mutex_lock(&as->lock);
703 jermar 223
 
1048 jermar 224
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 225
		mutex_unlock(&as->lock);
1048 jermar 226
		interrupts_restore(ipl);
227
		return NULL;
228
	}
703 jermar 229
 
822 palkovsky 230
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 231
 
1380 jermar 232
	mutex_initialize(&a->lock);
822 palkovsky 233
 
1424 jermar 234
	a->as = as;
1026 jermar 235
	a->flags = flags;
1239 jermar 236
	a->attributes = attrs;
1048 jermar 237
	a->pages = SIZE2FRAMES(size);
822 palkovsky 238
	a->base = base;
1409 jermar 239
	a->sh_info = NULL;
240
	a->backend = backend;
1424 jermar 241
	if (backend_data)
242
		a->backend_data = *backend_data;
243
	else
244
		memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
245
 
1387 jermar 246
	btree_create(&a->used_space);
822 palkovsky 247
 
1147 jermar 248
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 249
 
1380 jermar 250
	mutex_unlock(&as->lock);
703 jermar 251
	interrupts_restore(ipl);
704 jermar 252
 
703 jermar 253
	return a;
254
}
255
 
1235 jermar 256
/** Find address space area and change it.
257
 *
258
 * @param as Address space.
259
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
260
 * @param size New size of the virtual memory block starting at address. 
261
 * @param flags Flags influencing the remap operation. Currently unused.
262
 *
1306 jermar 263
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 264
 */ 
1306 jermar 265
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 266
{
1306 jermar 267
	as_area_t *area;
1235 jermar 268
	ipl_t ipl;
269
	size_t pages;
270
 
271
	ipl = interrupts_disable();
1380 jermar 272
	mutex_lock(&as->lock);
1235 jermar 273
 
274
	/*
275
	 * Locate the area.
276
	 */
277
	area = find_area_and_lock(as, address);
278
	if (!area) {
1380 jermar 279
		mutex_unlock(&as->lock);
1235 jermar 280
		interrupts_restore(ipl);
1306 jermar 281
		return ENOENT;
1235 jermar 282
	}
283
 
1424 jermar 284
	if (area->backend == &phys_backend) {
1235 jermar 285
		/*
286
		 * Remapping of address space areas associated
287
		 * with memory mapped devices is not supported.
288
		 */
1380 jermar 289
		mutex_unlock(&area->lock);
290
		mutex_unlock(&as->lock);
1235 jermar 291
		interrupts_restore(ipl);
1306 jermar 292
		return ENOTSUP;
1235 jermar 293
	}
1409 jermar 294
	if (area->sh_info) {
295
		/*
296
		 * Remapping of shared address space areas 
297
		 * is not supported.
298
		 */
299
		mutex_unlock(&area->lock);
300
		mutex_unlock(&as->lock);
301
		interrupts_restore(ipl);
302
		return ENOTSUP;
303
	}
1235 jermar 304
 
305
	pages = SIZE2FRAMES((address - area->base) + size);
306
	if (!pages) {
307
		/*
308
		 * Zero size address space areas are not allowed.
309
		 */
1380 jermar 310
		mutex_unlock(&area->lock);
311
		mutex_unlock(&as->lock);
1235 jermar 312
		interrupts_restore(ipl);
1306 jermar 313
		return EPERM;
1235 jermar 314
	}
315
 
316
	if (pages < area->pages) {
1403 jermar 317
		bool cond;
318
		__address start_free = area->base + pages*PAGE_SIZE;
1235 jermar 319
 
320
		/*
321
		 * Shrinking the area.
322
		 * No need to check for overlaps.
323
		 */
1403 jermar 324
 
325
		/*
1436 jermar 326
		 * Start TLB shootdown sequence.
327
		 */
328
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
329
 
330
		/*
1403 jermar 331
		 * Remove frames belonging to used space starting from
332
		 * the highest addresses downwards until an overlap with
333
		 * the resized address space area is found. Note that this
334
		 * is also the right way to remove part of the used_space
335
		 * B+tree leaf list.
336
		 */		
337
		for (cond = true; cond;) {
338
			btree_node_t *node;
339
 
340
			ASSERT(!list_empty(&area->used_space.leaf_head));
341
			node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
342
			if ((cond = (bool) node->keys)) {
343
				__address b = node->key[node->keys - 1];
344
				count_t c = (count_t) node->value[node->keys - 1];
345
				int i = 0;
1235 jermar 346
 
1403 jermar 347
				if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
348
 
349
					if (b + c*PAGE_SIZE <= start_free) {
350
						/*
351
						 * The whole interval fits completely
352
						 * in the resized address space area.
353
						 */
354
						break;
355
					}
356
 
357
					/*
358
					 * Part of the interval corresponding to b and c
359
					 * overlaps with the resized address space area.
360
					 */
361
 
362
					cond = false;	/* we are almost done */
363
					i = (start_free - b) >> PAGE_WIDTH;
364
					if (!used_space_remove(area, start_free, c - i))
365
						panic("Could not remove used space.");
366
				} else {
367
					/*
368
					 * The interval of used space can be completely removed.
369
					 */
370
					if (!used_space_remove(area, b, c))
371
						panic("Could not remove used space.\n");
372
				}
373
 
374
				for (; i < c; i++) {
375
					pte_t *pte;
376
 
377
					page_table_lock(as, false);
378
					pte = page_mapping_find(as, b + i*PAGE_SIZE);
379
					ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 380
					if (area->backend && area->backend->frame_free) {
381
						area->backend->frame_free(area,
1409 jermar 382
							b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
383
					}
1403 jermar 384
					page_mapping_remove(as, b + i*PAGE_SIZE);
385
					page_table_unlock(as, false);
386
				}
1235 jermar 387
			}
388
		}
1436 jermar 389
 
1235 jermar 390
		/*
1436 jermar 391
		 * Finish TLB shootdown sequence.
1235 jermar 392
		 */
393
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
394
		tlb_shootdown_finalize();
395
	} else {
396
		/*
397
		 * Growing the area.
398
		 * Check for overlaps with other address space areas.
399
		 */
400
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 401
			mutex_unlock(&area->lock);
402
			mutex_unlock(&as->lock);		
1235 jermar 403
			interrupts_restore(ipl);
1306 jermar 404
			return EADDRNOTAVAIL;
1235 jermar 405
		}
406
	} 
407
 
408
	area->pages = pages;
409
 
1380 jermar 410
	mutex_unlock(&area->lock);
411
	mutex_unlock(&as->lock);
1235 jermar 412
	interrupts_restore(ipl);
413
 
1306 jermar 414
	return 0;
1235 jermar 415
}
416
 
1306 jermar 417
/** Destroy address space area.
418
 *
419
 * @param as Address space.
420
 * @param address Address withing the area to be deleted.
421
 *
422
 * @return Zero on success or a value from @ref errno.h on failure. 
423
 */
424
int as_area_destroy(as_t *as, __address address)
425
{
426
	as_area_t *area;
427
	__address base;
1495 jermar 428
	link_t *cur;
1306 jermar 429
	ipl_t ipl;
430
 
431
	ipl = interrupts_disable();
1380 jermar 432
	mutex_lock(&as->lock);
1306 jermar 433
 
434
	area = find_area_and_lock(as, address);
435
	if (!area) {
1380 jermar 436
		mutex_unlock(&as->lock);
1306 jermar 437
		interrupts_restore(ipl);
438
		return ENOENT;
439
	}
440
 
1403 jermar 441
	base = area->base;
442
 
1411 jermar 443
	/*
1436 jermar 444
	 * Start TLB shootdown sequence.
445
	 */
446
	tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
447
 
448
	/*
1411 jermar 449
	 * Visit only the pages mapped by used_space B+tree.
450
	 */
1495 jermar 451
	for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 452
		btree_node_t *node;
1495 jermar 453
		int i;
1403 jermar 454
 
1495 jermar 455
		node = list_get_instance(cur, btree_node_t, leaf_link);
456
		for (i = 0; i < node->keys; i++) {
457
			__address b = node->key[i];
458
			count_t j;
1411 jermar 459
			pte_t *pte;
1403 jermar 460
 
1495 jermar 461
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 462
				page_table_lock(as, false);
1495 jermar 463
				pte = page_mapping_find(as, b + j*PAGE_SIZE);
1411 jermar 464
				ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 465
				if (area->backend && area->backend->frame_free) {
466
					area->backend->frame_free(area,
1495 jermar 467
						b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 468
				}
1495 jermar 469
				page_mapping_remove(as, b + j*PAGE_SIZE);
1411 jermar 470
				page_table_unlock(as, false);
1306 jermar 471
			}
472
		}
473
	}
1403 jermar 474
 
1306 jermar 475
	/*
1436 jermar 476
	 * Finish TLB shootdown sequence.
1306 jermar 477
	 */
478
	tlb_invalidate_pages(AS->asid, area->base, area->pages);
479
	tlb_shootdown_finalize();
1436 jermar 480
 
481
	btree_destroy(&area->used_space);
1306 jermar 482
 
1309 jermar 483
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 484
 
485
	if (area->sh_info)
486
		sh_info_remove_reference(area->sh_info);
487
 
1380 jermar 488
	mutex_unlock(&area->lock);
1306 jermar 489
 
490
	/*
491
	 * Remove the empty area from address space.
492
	 */
493
	btree_remove(&AS->as_area_btree, base, NULL);
494
 
1309 jermar 495
	free(area);
496
 
1380 jermar 497
	mutex_unlock(&AS->lock);
1306 jermar 498
	interrupts_restore(ipl);
499
	return 0;
500
}
501
 
1413 jermar 502
/** Share address space area with another or the same address space.
1235 jermar 503
 *
1424 jermar 504
 * Address space area mapping is shared with a new address space area.
505
 * If the source address space area has not been shared so far,
506
 * a new sh_info is created. The new address space area simply gets the
507
 * sh_info of the source area. The process of duplicating the
508
 * mapping is done through the backend share function.
1413 jermar 509
 * 
1417 jermar 510
 * @param src_as Pointer to source address space.
1239 jermar 511
 * @param src_base Base address of the source address space area.
1417 jermar 512
 * @param acc_size Expected size of the source area.
1428 palkovsky 513
 * @param dst_as Pointer to destination address space.
1417 jermar 514
 * @param dst_base Target base address.
515
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 516
 *
1306 jermar 517
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 518
 *	   if there is no such address space area,
519
 *	   EPERM if there was a problem in accepting the area or
520
 *	   ENOMEM if there was a problem in allocating destination
1413 jermar 521
 *	   address space area. ENOTSUP is returned if an attempt
522
 *	   to share non-anonymous address space area is detected.
1235 jermar 523
 */
1413 jermar 524
int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
1428 palkovsky 525
		  as_t *dst_as, __address dst_base, int dst_flags_mask)
1235 jermar 526
{
527
	ipl_t ipl;
1239 jermar 528
	int src_flags;
529
	size_t src_size;
530
	as_area_t *src_area, *dst_area;
1413 jermar 531
	share_info_t *sh_info;
1424 jermar 532
	mem_backend_t *src_backend;
533
	mem_backend_data_t src_backend_data;
1434 palkovsky 534
 
1235 jermar 535
	ipl = interrupts_disable();
1380 jermar 536
	mutex_lock(&src_as->lock);
1329 palkovsky 537
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 538
	if (!src_area) {
1238 jermar 539
		/*
540
		 * Could not find the source address space area.
541
		 */
1380 jermar 542
		mutex_unlock(&src_as->lock);
1238 jermar 543
		interrupts_restore(ipl);
544
		return ENOENT;
545
	}
1413 jermar 546
 
1424 jermar 547
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 548
		/*
1424 jermar 549
		 * There is now backend or the backend does not
550
		 * know how to share the area.
1413 jermar 551
		 */
552
		mutex_unlock(&src_area->lock);
553
		mutex_unlock(&src_as->lock);
554
		interrupts_restore(ipl);
555
		return ENOTSUP;
556
	}
557
 
1239 jermar 558
	src_size = src_area->pages * PAGE_SIZE;
559
	src_flags = src_area->flags;
1424 jermar 560
	src_backend = src_area->backend;
561
	src_backend_data = src_area->backend_data;
1544 palkovsky 562
 
563
	/* Share the cacheable flag from the original mapping */
564
	if (src_flags & AS_AREA_CACHEABLE)
565
		dst_flags_mask |= AS_AREA_CACHEABLE;
566
 
1461 palkovsky 567
	if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 568
		mutex_unlock(&src_area->lock);
569
		mutex_unlock(&src_as->lock);
1235 jermar 570
		interrupts_restore(ipl);
571
		return EPERM;
572
	}
1413 jermar 573
 
1235 jermar 574
	/*
1413 jermar 575
	 * Now we are committed to sharing the area.
576
	 * First prepare the area for sharing.
577
	 * Then it will be safe to unlock it.
578
	 */
579
	sh_info = src_area->sh_info;
580
	if (!sh_info) {
581
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
582
		mutex_initialize(&sh_info->lock);
583
		sh_info->refcount = 2;
584
		btree_create(&sh_info->pagemap);
585
		src_area->sh_info = sh_info;
586
	} else {
587
		mutex_lock(&sh_info->lock);
588
		sh_info->refcount++;
589
		mutex_unlock(&sh_info->lock);
590
	}
591
 
1424 jermar 592
	src_area->backend->share(src_area);
1413 jermar 593
 
594
	mutex_unlock(&src_area->lock);
595
	mutex_unlock(&src_as->lock);
596
 
597
	/*
1239 jermar 598
	 * Create copy of the source address space area.
599
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
600
	 * attribute set which prevents race condition with
601
	 * preliminary as_page_fault() calls.
1417 jermar 602
	 * The flags of the source area are masked against dst_flags_mask
603
	 * to support sharing in less privileged mode.
1235 jermar 604
	 */
1461 palkovsky 605
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
1424 jermar 606
				  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 607
	if (!dst_area) {
1235 jermar 608
		/*
609
		 * Destination address space area could not be created.
610
		 */
1413 jermar 611
		sh_info_remove_reference(sh_info);
612
 
1235 jermar 613
		interrupts_restore(ipl);
614
		return ENOMEM;
615
	}
616
 
617
	/*
1239 jermar 618
	 * Now the destination address space area has been
619
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 620
	 * attribute and set the sh_info.
1239 jermar 621
	 */	
1380 jermar 622
	mutex_lock(&dst_area->lock);
1239 jermar 623
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 624
	dst_area->sh_info = sh_info;
1380 jermar 625
	mutex_unlock(&dst_area->lock);
1235 jermar 626
 
627
	interrupts_restore(ipl);
628
 
629
	return 0;
630
}
631
 
1423 jermar 632
/** Check access mode for address space area.
633
 *
634
 * The address space area must be locked prior to this call.
635
 *
636
 * @param area Address space area.
637
 * @param access Access mode.
638
 *
639
 * @return False if access violates area's permissions, true otherwise.
640
 */
641
bool as_area_check_access(as_area_t *area, pf_access_t access)
642
{
643
	int flagmap[] = {
644
		[PF_ACCESS_READ] = AS_AREA_READ,
645
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
646
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
647
	};
648
 
649
	if (!(area->flags & flagmap[access]))
650
		return false;
651
 
652
	return true;
653
}
654
 
703 jermar 655
/** Handle page fault within the current address space.
656
 *
1409 jermar 657
 * This is the high-level page fault handler. It decides
658
 * whether the page fault can be resolved by any backend
659
 * and if so, it invokes the backend to resolve the page
660
 * fault.
661
 *
703 jermar 662
 * Interrupts are assumed disabled.
663
 *
664
 * @param page Faulting page.
1411 jermar 665
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 666
 * @param istate Pointer to interrupted state.
703 jermar 667
 *
1409 jermar 668
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
669
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 670
 */
1411 jermar 671
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
703 jermar 672
{
1044 jermar 673
	pte_t *pte;
977 jermar 674
	as_area_t *area;
703 jermar 675
 
1380 jermar 676
	if (!THREAD)
1409 jermar 677
		return AS_PF_FAULT;
1380 jermar 678
 
703 jermar 679
	ASSERT(AS);
1044 jermar 680
 
1380 jermar 681
	mutex_lock(&AS->lock);
977 jermar 682
	area = find_area_and_lock(AS, page);	
703 jermar 683
	if (!area) {
684
		/*
685
		 * No area contained mapping for 'page'.
686
		 * Signal page fault to low-level handler.
687
		 */
1380 jermar 688
		mutex_unlock(&AS->lock);
1288 jermar 689
		goto page_fault;
703 jermar 690
	}
691
 
1239 jermar 692
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
693
		/*
694
		 * The address space area is not fully initialized.
695
		 * Avoid possible race by returning error.
696
		 */
1380 jermar 697
		mutex_unlock(&area->lock);
698
		mutex_unlock(&AS->lock);
1288 jermar 699
		goto page_fault;		
1239 jermar 700
	}
701
 
1424 jermar 702
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 703
		/*
704
		 * The address space area is not backed by any backend
705
		 * or the backend cannot handle page faults.
706
		 */
707
		mutex_unlock(&area->lock);
708
		mutex_unlock(&AS->lock);
709
		goto page_fault;		
710
	}
1179 jermar 711
 
1044 jermar 712
	page_table_lock(AS, false);
713
 
703 jermar 714
	/*
1044 jermar 715
	 * To avoid race condition between two page faults
716
	 * on the same address, we need to make sure
717
	 * the mapping has not been already inserted.
718
	 */
719
	if ((pte = page_mapping_find(AS, page))) {
720
		if (PTE_PRESENT(pte)) {
1423 jermar 721
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
722
				(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
723
				(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
724
				page_table_unlock(AS, false);
725
				mutex_unlock(&area->lock);
726
				mutex_unlock(&AS->lock);
727
				return AS_PF_OK;
728
			}
1044 jermar 729
		}
730
	}
1409 jermar 731
 
1044 jermar 732
	/*
1409 jermar 733
	 * Resort to the backend page fault handler.
703 jermar 734
	 */
1424 jermar 735
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 736
		page_table_unlock(AS, false);
737
		mutex_unlock(&area->lock);
738
		mutex_unlock(&AS->lock);
739
		goto page_fault;
740
	}
703 jermar 741
 
1044 jermar 742
	page_table_unlock(AS, false);
1380 jermar 743
	mutex_unlock(&area->lock);
744
	mutex_unlock(&AS->lock);
1288 jermar 745
	return AS_PF_OK;
746
 
747
page_fault:
748
	if (THREAD->in_copy_from_uspace) {
749
		THREAD->in_copy_from_uspace = false;
750
		istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
751
	} else if (THREAD->in_copy_to_uspace) {
752
		THREAD->in_copy_to_uspace = false;
753
		istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
754
	} else {
755
		return AS_PF_FAULT;
756
	}
757
 
758
	return AS_PF_DEFER;
703 jermar 759
}
760
 
823 jermar 761
/** Switch address spaces.
703 jermar 762
 *
1380 jermar 763
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 764
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 765
 *
823 jermar 766
 * @param old Old address space or NULL.
767
 * @param new New address space.
703 jermar 768
 */
823 jermar 769
void as_switch(as_t *old, as_t *new)
703 jermar 770
{
771
	ipl_t ipl;
823 jermar 772
	bool needs_asid = false;
703 jermar 773
 
774
	ipl = interrupts_disable();
1415 jermar 775
	spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 776
 
777
	/*
823 jermar 778
	 * First, take care of the old address space.
779
	 */	
780
	if (old) {
1380 jermar 781
		mutex_lock_active(&old->lock);
1415 jermar 782
		ASSERT(old->cpu_refcount);
783
		if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 784
			/*
785
			 * The old address space is no longer active on
786
			 * any processor. It can be appended to the
787
			 * list of inactive address spaces with assigned
788
			 * ASID.
789
			 */
790
			 ASSERT(old->asid != ASID_INVALID);
791
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
792
		}
1380 jermar 793
		mutex_unlock(&old->lock);
823 jermar 794
	}
795
 
796
	/*
797
	 * Second, prepare the new address space.
798
	 */
1380 jermar 799
	mutex_lock_active(&new->lock);
1415 jermar 800
	if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
823 jermar 801
		if (new->asid != ASID_INVALID)
802
			list_remove(&new->inactive_as_with_asid_link);
803
		else
804
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
805
	}
806
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 807
	mutex_unlock(&new->lock);
823 jermar 808
 
809
	if (needs_asid) {
810
		/*
811
		 * Allocation of new ASID was deferred
812
		 * until now in order to avoid deadlock.
813
		 */
814
		asid_t asid;
815
 
816
		asid = asid_get();
1380 jermar 817
		mutex_lock_active(&new->lock);
823 jermar 818
		new->asid = asid;
1380 jermar 819
		mutex_unlock(&new->lock);
823 jermar 820
	}
1415 jermar 821
	spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 822
	interrupts_restore(ipl);
823
 
824
	/*
703 jermar 825
	 * Perform architecture-specific steps.
727 jermar 826
	 * (e.g. write ASID to hardware register etc.)
703 jermar 827
	 */
823 jermar 828
	as_install_arch(new);
703 jermar 829
 
823 jermar 830
	AS = new;
703 jermar 831
}
754 jermar 832
 
1235 jermar 833
/** Convert address space area flags to page flags.
754 jermar 834
 *
1235 jermar 835
 * @param aflags Flags of some address space area.
754 jermar 836
 *
1235 jermar 837
 * @return Flags to be passed to page_mapping_insert().
754 jermar 838
 */
1235 jermar 839
int area_flags_to_page_flags(int aflags)
754 jermar 840
{
841
	int flags;
842
 
1178 jermar 843
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 844
 
1235 jermar 845
	if (aflags & AS_AREA_READ)
1026 jermar 846
		flags |= PAGE_READ;
847
 
1235 jermar 848
	if (aflags & AS_AREA_WRITE)
1026 jermar 849
		flags |= PAGE_WRITE;
850
 
1235 jermar 851
	if (aflags & AS_AREA_EXEC)
1026 jermar 852
		flags |= PAGE_EXEC;
853
 
1424 jermar 854
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 855
		flags |= PAGE_CACHEABLE;
856
 
754 jermar 857
	return flags;
858
}
756 jermar 859
 
1235 jermar 860
/** Compute flags for virtual address translation subsytem.
861
 *
862
 * The address space area must be locked.
863
 * Interrupts must be disabled.
864
 *
865
 * @param a Address space area.
866
 *
867
 * @return Flags to be used in page_mapping_insert().
868
 */
1409 jermar 869
int as_area_get_flags(as_area_t *a)
1235 jermar 870
{
871
	return area_flags_to_page_flags(a->flags);
872
}
873
 
756 jermar 874
/** Create page table.
875
 *
876
 * Depending on architecture, create either address space
877
 * private or global page table.
878
 *
879
 * @param flags Flags saying whether the page table is for kernel address space.
880
 *
881
 * @return First entry of the page table.
882
 */
883
pte_t *page_table_create(int flags)
884
{
885
        ASSERT(as_operations);
886
        ASSERT(as_operations->page_table_create);
887
 
888
        return as_operations->page_table_create(flags);
889
}
977 jermar 890
 
1468 jermar 891
/** Destroy page table.
892
 *
893
 * Destroy page table in architecture specific way.
894
 *
895
 * @param page_table Physical address of PTL0.
896
 */
897
void page_table_destroy(pte_t *page_table)
898
{
899
        ASSERT(as_operations);
900
        ASSERT(as_operations->page_table_destroy);
901
 
902
        as_operations->page_table_destroy(page_table);
903
}
904
 
1044 jermar 905
/** Lock page table.
906
 *
907
 * This function should be called before any page_mapping_insert(),
908
 * page_mapping_remove() and page_mapping_find().
909
 * 
910
 * Locking order is such that address space areas must be locked
911
 * prior to this call. Address space can be locked prior to this
912
 * call in which case the lock argument is false.
913
 *
914
 * @param as Address space.
1248 jermar 915
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 916
 */
917
void page_table_lock(as_t *as, bool lock)
918
{
919
	ASSERT(as_operations);
920
	ASSERT(as_operations->page_table_lock);
921
 
922
	as_operations->page_table_lock(as, lock);
923
}
924
 
925
/** Unlock page table.
926
 *
927
 * @param as Address space.
1248 jermar 928
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 929
 */
930
void page_table_unlock(as_t *as, bool unlock)
931
{
932
	ASSERT(as_operations);
933
	ASSERT(as_operations->page_table_unlock);
934
 
935
	as_operations->page_table_unlock(as, unlock);
936
}
937
 
977 jermar 938
 
939
/** Find address space area and lock it.
940
 *
941
 * The address space must be locked and interrupts must be disabled.
942
 *
943
 * @param as Address space.
944
 * @param va Virtual address.
945
 *
946
 * @return Locked address space area containing va on success or NULL on failure.
947
 */
948
as_area_t *find_area_and_lock(as_t *as, __address va)
949
{
950
	as_area_t *a;
1147 jermar 951
	btree_node_t *leaf, *lnode;
952
	int i;
977 jermar 953
 
1147 jermar 954
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
955
	if (a) {
956
		/* va is the base address of an address space area */
1380 jermar 957
		mutex_lock(&a->lock);
1147 jermar 958
		return a;
959
	}
960
 
961
	/*
1150 jermar 962
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 963
	 * to find out whether this is a miss or va belongs to an address
964
	 * space area found there.
965
	 */
966
 
967
	/* First, search the leaf node itself. */
968
	for (i = 0; i < leaf->keys; i++) {
969
		a = (as_area_t *) leaf->value[i];
1380 jermar 970
		mutex_lock(&a->lock);
1147 jermar 971
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
972
			return a;
973
		}
1380 jermar 974
		mutex_unlock(&a->lock);
1147 jermar 975
	}
977 jermar 976
 
1147 jermar 977
	/*
1150 jermar 978
	 * Second, locate the left neighbour and test its last record.
1148 jermar 979
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 980
	 */
1150 jermar 981
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 982
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 983
		mutex_lock(&a->lock);
1147 jermar 984
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 985
			return a;
1147 jermar 986
		}
1380 jermar 987
		mutex_unlock(&a->lock);
977 jermar 988
	}
989
 
990
	return NULL;
991
}
1048 jermar 992
 
993
/** Check area conflicts with other areas.
994
 *
995
 * The address space must be locked and interrupts must be disabled.
996
 *
997
 * @param as Address space.
998
 * @param va Starting virtual address of the area being tested.
999
 * @param size Size of the area being tested.
1000
 * @param avoid_area Do not touch this area. 
1001
 *
1002
 * @return True if there is no conflict, false otherwise.
1003
 */
1004
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
1005
{
1006
	as_area_t *a;
1147 jermar 1007
	btree_node_t *leaf, *node;
1008
	int i;
1048 jermar 1009
 
1070 jermar 1010
	/*
1011
	 * We don't want any area to have conflicts with NULL page.
1012
	 */
1013
	if (overlaps(va, size, NULL, PAGE_SIZE))
1014
		return false;
1015
 
1147 jermar 1016
	/*
1017
	 * The leaf node is found in O(log n), where n is proportional to
1018
	 * the number of address space areas belonging to as.
1019
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1020
	 * record in the left neighbour, the leftmost record in the right
1021
	 * neighbour and all records in the leaf node itself.
1147 jermar 1022
	 */
1048 jermar 1023
 
1147 jermar 1024
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1025
		if (a != avoid_area)
1026
			return false;
1027
	}
1028
 
1029
	/* First, check the two border cases. */
1150 jermar 1030
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1031
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1032
		mutex_lock(&a->lock);
1147 jermar 1033
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1034
			mutex_unlock(&a->lock);
1147 jermar 1035
			return false;
1036
		}
1380 jermar 1037
		mutex_unlock(&a->lock);
1147 jermar 1038
	}
1150 jermar 1039
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1040
		a = (as_area_t *) node->value[0];
1380 jermar 1041
		mutex_lock(&a->lock);
1147 jermar 1042
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1043
			mutex_unlock(&a->lock);
1147 jermar 1044
			return false;
1045
		}
1380 jermar 1046
		mutex_unlock(&a->lock);
1147 jermar 1047
	}
1048
 
1049
	/* Second, check the leaf node. */
1050
	for (i = 0; i < leaf->keys; i++) {
1051
		a = (as_area_t *) leaf->value[i];
1052
 
1048 jermar 1053
		if (a == avoid_area)
1054
			continue;
1147 jermar 1055
 
1380 jermar 1056
		mutex_lock(&a->lock);
1147 jermar 1057
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1058
			mutex_unlock(&a->lock);
1147 jermar 1059
			return false;
1060
		}
1380 jermar 1061
		mutex_unlock(&a->lock);
1048 jermar 1062
	}
1063
 
1070 jermar 1064
	/*
1065
	 * So far, the area does not conflict with other areas.
1066
	 * Check if it doesn't conflict with kernel address space.
1067
	 */	 
1068
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1069
		return !overlaps(va, size, 
1070
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1071
	}
1072
 
1048 jermar 1073
	return true;
1074
}
1235 jermar 1075
 
1380 jermar 1076
/** Return size of the address space area with given base.  */
1329 palkovsky 1077
size_t as_get_size(__address base)
1078
{
1079
	ipl_t ipl;
1080
	as_area_t *src_area;
1081
	size_t size;
1082
 
1083
	ipl = interrupts_disable();
1084
	src_area = find_area_and_lock(AS, base);
1085
	if (src_area){
1086
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1087
		mutex_unlock(&src_area->lock);
1329 palkovsky 1088
	} else {
1089
		size = 0;
1090
	}
1091
	interrupts_restore(ipl);
1092
	return size;
1093
}
1094
 
1387 jermar 1095
/** Mark portion of address space area as used.
1096
 *
1097
 * The address space area must be already locked.
1098
 *
1099
 * @param a Address space area.
1100
 * @param page First page to be marked.
1101
 * @param count Number of page to be marked.
1102
 *
1103
 * @return 0 on failure and 1 on success.
1104
 */
1105
int used_space_insert(as_area_t *a, __address page, count_t count)
1106
{
1107
	btree_node_t *leaf, *node;
1108
	count_t pages;
1109
	int i;
1110
 
1111
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1112
	ASSERT(count);
1113
 
1114
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1115
	if (pages) {
1116
		/*
1117
		 * We hit the beginning of some used space.
1118
		 */
1119
		return 0;
1120
	}
1121
 
1437 jermar 1122
	if (!leaf->keys) {
1123
		btree_insert(&a->used_space, page, (void *) count, leaf);
1124
		return 1;
1125
	}
1126
 
1387 jermar 1127
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1128
	if (node) {
1129
		__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1130
		count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1131
 
1132
		/*
1133
		 * Examine the possibility that the interval fits
1134
		 * somewhere between the rightmost interval of
1135
		 * the left neigbour and the first interval of the leaf.
1136
		 */
1137
 
1138
		if (page >= right_pg) {
1139
			/* Do nothing. */
1140
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1141
			/* The interval intersects with the left interval. */
1142
			return 0;
1143
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1144
			/* The interval intersects with the right interval. */
1145
			return 0;			
1146
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1147
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1148
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1149
			btree_remove(&a->used_space, right_pg, leaf);
1150
			return 1; 
1151
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1152
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1153
			node->value[node->keys - 1] += count;
1387 jermar 1154
			return 1;
1155
		} else if (page + count*PAGE_SIZE == right_pg) {
1156
			/*
1157
			 * The interval can be addded by simply moving base of the right
1158
			 * interval down and increasing its size accordingly.
1159
			 */
1403 jermar 1160
			leaf->value[0] += count;
1387 jermar 1161
			leaf->key[0] = page;
1162
			return 1;
1163
		} else {
1164
			/*
1165
			 * The interval is between both neigbouring intervals,
1166
			 * but cannot be merged with any of them.
1167
			 */
1168
			btree_insert(&a->used_space, page, (void *) count, leaf);
1169
			return 1;
1170
		}
1171
	} else if (page < leaf->key[0]) {
1172
		__address right_pg = leaf->key[0];
1173
		count_t right_cnt = (count_t) leaf->value[0];
1174
 
1175
		/*
1176
		 * Investigate the border case in which the left neighbour does not
1177
		 * exist but the interval fits from the left.
1178
		 */
1179
 
1180
		if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1181
			/* The interval intersects with the right interval. */
1182
			return 0;
1183
		} else if (page + count*PAGE_SIZE == right_pg) {
1184
			/*
1185
			 * The interval can be added by moving the base of the right interval down
1186
			 * and increasing its size accordingly.
1187
			 */
1188
			leaf->key[0] = page;
1403 jermar 1189
			leaf->value[0] += count;
1387 jermar 1190
			return 1;
1191
		} else {
1192
			/*
1193
			 * The interval doesn't adjoin with the right interval.
1194
			 * It must be added individually.
1195
			 */
1196
			btree_insert(&a->used_space, page, (void *) count, leaf);
1197
			return 1;
1198
		}
1199
	}
1200
 
1201
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1202
	if (node) {
1203
		__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1204
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1205
 
1206
		/*
1207
		 * Examine the possibility that the interval fits
1208
		 * somewhere between the leftmost interval of
1209
		 * the right neigbour and the last interval of the leaf.
1210
		 */
1211
 
1212
		if (page < left_pg) {
1213
			/* Do nothing. */
1214
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1215
			/* The interval intersects with the left interval. */
1216
			return 0;
1217
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1218
			/* The interval intersects with the right interval. */
1219
			return 0;			
1220
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1221
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1222
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1223
			btree_remove(&a->used_space, right_pg, node);
1224
			return 1; 
1225
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1226
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1227
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1228
			return 1;
1229
		} else if (page + count*PAGE_SIZE == right_pg) {
1230
			/*
1231
			 * The interval can be addded by simply moving base of the right
1232
			 * interval down and increasing its size accordingly.
1233
			 */
1403 jermar 1234
			node->value[0] += count;
1387 jermar 1235
			node->key[0] = page;
1236
			return 1;
1237
		} else {
1238
			/*
1239
			 * The interval is between both neigbouring intervals,
1240
			 * but cannot be merged with any of them.
1241
			 */
1242
			btree_insert(&a->used_space, page, (void *) count, leaf);
1243
			return 1;
1244
		}
1245
	} else if (page >= leaf->key[leaf->keys - 1]) {
1246
		__address left_pg = leaf->key[leaf->keys - 1];
1247
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1248
 
1249
		/*
1250
		 * Investigate the border case in which the right neighbour does not
1251
		 * exist but the interval fits from the right.
1252
		 */
1253
 
1254
		if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1255
			/* The interval intersects with the left interval. */
1387 jermar 1256
			return 0;
1257
		} else if (left_pg + left_cnt*PAGE_SIZE == page) {
1258
			/* The interval can be added by growing the left interval. */
1403 jermar 1259
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1260
			return 1;
1261
		} else {
1262
			/*
1263
			 * The interval doesn't adjoin with the left interval.
1264
			 * It must be added individually.
1265
			 */
1266
			btree_insert(&a->used_space, page, (void *) count, leaf);
1267
			return 1;
1268
		}
1269
	}
1270
 
1271
	/*
1272
	 * Note that if the algorithm made it thus far, the interval can fit only
1273
	 * between two other intervals of the leaf. The two border cases were already
1274
	 * resolved.
1275
	 */
1276
	for (i = 1; i < leaf->keys; i++) {
1277
		if (page < leaf->key[i]) {
1278
			__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1279
			count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1280
 
1281
			/*
1282
			 * The interval fits between left_pg and right_pg.
1283
			 */
1284
 
1285
			if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1286
				/* The interval intersects with the left interval. */
1287
				return 0;
1288
			} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1289
				/* The interval intersects with the right interval. */
1290
				return 0;			
1291
			} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1292
				/* The interval can be added by merging the two already present intervals. */
1403 jermar 1293
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1294
				btree_remove(&a->used_space, right_pg, leaf);
1295
				return 1; 
1296
			} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1297
				/* The interval can be added by simply growing the left interval. */
1403 jermar 1298
				leaf->value[i - 1] += count;
1387 jermar 1299
				return 1;
1300
			} else if (page + count*PAGE_SIZE == right_pg) {
1301
				/*
1302
			         * The interval can be addded by simply moving base of the right
1303
			 	 * interval down and increasing its size accordingly.
1304
			 	 */
1403 jermar 1305
				leaf->value[i] += count;
1387 jermar 1306
				leaf->key[i] = page;
1307
				return 1;
1308
			} else {
1309
				/*
1310
				 * The interval is between both neigbouring intervals,
1311
				 * but cannot be merged with any of them.
1312
				 */
1313
				btree_insert(&a->used_space, page, (void *) count, leaf);
1314
				return 1;
1315
			}
1316
		}
1317
	}
1318
 
1319
	panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1320
}
1321
 
1322
/** Mark portion of address space area as unused.
1323
 *
1324
 * The address space area must be already locked.
1325
 *
1326
 * @param a Address space area.
1327
 * @param page First page to be marked.
1328
 * @param count Number of page to be marked.
1329
 *
1330
 * @return 0 on failure and 1 on success.
1331
 */
1332
int used_space_remove(as_area_t *a, __address page, count_t count)
1333
{
1334
	btree_node_t *leaf, *node;
1335
	count_t pages;
1336
	int i;
1337
 
1338
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1339
	ASSERT(count);
1340
 
1341
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1342
	if (pages) {
1343
		/*
1344
		 * We are lucky, page is the beginning of some interval.
1345
		 */
1346
		if (count > pages) {
1347
			return 0;
1348
		} else if (count == pages) {
1349
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1350
			return 1;
1387 jermar 1351
		} else {
1352
			/*
1353
			 * Find the respective interval.
1354
			 * Decrease its size and relocate its start address.
1355
			 */
1356
			for (i = 0; i < leaf->keys; i++) {
1357
				if (leaf->key[i] == page) {
1358
					leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1359
					leaf->value[i] -= count;
1387 jermar 1360
					return 1;
1361
				}
1362
			}
1363
			goto error;
1364
		}
1365
	}
1366
 
1367
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1368
	if (node && page < leaf->key[0]) {
1369
		__address left_pg = node->key[node->keys - 1];
1370
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1371
 
1372
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1373
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1374
				/*
1375
				 * The interval is contained in the rightmost interval
1376
				 * of the left neighbour and can be removed by
1377
				 * updating the size of the bigger interval.
1378
				 */
1403 jermar 1379
				node->value[node->keys - 1] -= count;
1387 jermar 1380
				return 1;
1381
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1382
				count_t new_cnt;
1387 jermar 1383
 
1384
				/*
1385
				 * The interval is contained in the rightmost interval
1386
				 * of the left neighbour but its removal requires
1387
				 * both updating the size of the original interval and
1388
				 * also inserting a new interval.
1389
				 */
1403 jermar 1390
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1391
				node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1392
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1393
				return 1;
1394
			}
1395
		}
1396
		return 0;
1397
	} else if (page < leaf->key[0]) {
1398
		return 0;
1399
	}
1400
 
1401
	if (page > leaf->key[leaf->keys - 1]) {
1402
		__address left_pg = leaf->key[leaf->keys - 1];
1403
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1404
 
1405
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1406
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1407
				/*
1408
				 * The interval is contained in the rightmost interval
1409
				 * of the leaf and can be removed by updating the size
1410
				 * of the bigger interval.
1411
				 */
1403 jermar 1412
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1413
				return 1;
1414
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1415
				count_t new_cnt;
1387 jermar 1416
 
1417
				/*
1418
				 * The interval is contained in the rightmost interval
1419
				 * of the leaf but its removal requires both updating
1420
				 * the size of the original interval and
1421
				 * also inserting a new interval.
1422
				 */
1403 jermar 1423
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1424
				leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1425
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1426
				return 1;
1427
			}
1428
		}
1429
		return 0;
1430
	}	
1431
 
1432
	/*
1433
	 * The border cases have been already resolved.
1434
	 * Now the interval can be only between intervals of the leaf. 
1435
	 */
1436
	for (i = 1; i < leaf->keys - 1; i++) {
1437
		if (page < leaf->key[i]) {
1438
			__address left_pg = leaf->key[i - 1];
1439
			count_t left_cnt = (count_t) leaf->value[i - 1];
1440
 
1441
			/*
1442
			 * Now the interval is between intervals corresponding to (i - 1) and i.
1443
			 */
1444
			if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1445
				if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1446
					/*
1447
				 	* The interval is contained in the interval (i - 1)
1448
					 * of the leaf and can be removed by updating the size
1449
					 * of the bigger interval.
1450
					 */
1403 jermar 1451
					leaf->value[i - 1] -= count;
1387 jermar 1452
					return 1;
1453
				} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1454
					count_t new_cnt;
1387 jermar 1455
 
1456
					/*
1457
					 * The interval is contained in the interval (i - 1)
1458
					 * of the leaf but its removal requires both updating
1459
					 * the size of the original interval and
1460
					 * also inserting a new interval.
1461
					 */
1403 jermar 1462
					new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1463
					leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1464
					btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1465
					return 1;
1466
				}
1467
			}
1468
			return 0;
1469
		}
1470
	}
1471
 
1472
error:
1473
	panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1474
}
1475
 
1409 jermar 1476
/** Remove reference to address space area share info.
1477
 *
1478
 * If the reference count drops to 0, the sh_info is deallocated.
1479
 *
1480
 * @param sh_info Pointer to address space area share info.
1481
 */
1482
void sh_info_remove_reference(share_info_t *sh_info)
1483
{
1484
	bool dealloc = false;
1485
 
1486
	mutex_lock(&sh_info->lock);
1487
	ASSERT(sh_info->refcount);
1488
	if (--sh_info->refcount == 0) {
1489
		dealloc = true;
1495 jermar 1490
		link_t *cur;
1409 jermar 1491
 
1492
		/*
1493
		 * Now walk carefully the pagemap B+tree and free/remove
1494
		 * reference from all frames found there.
1495
		 */
1495 jermar 1496
		for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1497
			btree_node_t *node;
1495 jermar 1498
			int i;
1409 jermar 1499
 
1495 jermar 1500
			node = list_get_instance(cur, btree_node_t, leaf_link);
1501
			for (i = 0; i < node->keys; i++) 
1502
				frame_free(ADDR2PFN((__address) node->value[i]));
1409 jermar 1503
		}
1504
 
1505
	}
1506
	mutex_unlock(&sh_info->lock);
1507
 
1508
	if (dealloc) {
1509
		btree_destroy(&sh_info->pagemap);
1510
		free(sh_info);
1511
	}
1512
}
1513
 
1235 jermar 1514
/*
1515
 * Address space related syscalls.
1516
 */
1517
 
1518
/** Wrapper for as_area_create(). */
1519
__native sys_as_area_create(__address address, size_t size, int flags)
1520
{
1424 jermar 1521
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1235 jermar 1522
		return (__native) address;
1523
	else
1524
		return (__native) -1;
1525
}
1526
 
1527
/** Wrapper for as_area_resize. */
1528
__native sys_as_area_resize(__address address, size_t size, int flags)
1529
{
1306 jermar 1530
	return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 1531
}
1532
 
1306 jermar 1533
/** Wrapper for as_area_destroy. */
1534
__native sys_as_area_destroy(__address address)
1535
{
1536
	return (__native) as_area_destroy(AS, address);
1537
}
1702 cejka 1538
 
1539
 /** @}
1540
 */
1541