Subversion Repositories HelenOS-historic

Rev

Rev 1705 | Rev 1757 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1705 cejka 29
 /** @addtogroup genericmm
1702 cejka 30
 * @{
31
 */
32
 
1248 jermar 33
/**
1702 cejka 34
 * @file
1248 jermar 35
 * @brief	Address space related functions.
36
 *
703 jermar 37
 * This file contains address space manipulation functions.
38
 * Roughly speaking, this is a higher-level client of
39
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 40
 *
41
 * Functionality provided by this file allows one to
42
 * create address space and create, resize and share
43
 * address space areas.
44
 *
45
 * @see page.c
46
 *
703 jermar 47
 */
48
 
49
#include <mm/as.h>
756 jermar 50
#include <arch/mm/as.h>
703 jermar 51
#include <mm/page.h>
52
#include <mm/frame.h>
814 palkovsky 53
#include <mm/slab.h>
703 jermar 54
#include <mm/tlb.h>
55
#include <arch/mm/page.h>
56
#include <genarch/mm/page_pt.h>
1108 jermar 57
#include <genarch/mm/page_ht.h>
727 jermar 58
#include <mm/asid.h>
703 jermar 59
#include <arch/mm/asid.h>
60
#include <synch/spinlock.h>
1380 jermar 61
#include <synch/mutex.h>
788 jermar 62
#include <adt/list.h>
1147 jermar 63
#include <adt/btree.h>
1235 jermar 64
#include <proc/task.h>
1288 jermar 65
#include <proc/thread.h>
1235 jermar 66
#include <arch/asm.h>
703 jermar 67
#include <panic.h>
68
#include <debug.h>
1235 jermar 69
#include <print.h>
703 jermar 70
#include <memstr.h>
1070 jermar 71
#include <macros.h>
703 jermar 72
#include <arch.h>
1235 jermar 73
#include <errno.h>
74
#include <config.h>
1387 jermar 75
#include <align.h>
1235 jermar 76
#include <arch/types.h>
77
#include <typedefs.h>
1288 jermar 78
#include <syscall/copy.h>
79
#include <arch/interrupt.h>
703 jermar 80
 
756 jermar 81
as_operations_t *as_operations = NULL;
703 jermar 82
 
1415 jermar 83
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
84
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
823 jermar 85
 
86
/**
87
 * This list contains address spaces that are not active on any
88
 * processor and that have valid ASID.
89
 */
90
LIST_INITIALIZE(inactive_as_with_asid_head);
91
 
757 jermar 92
/** Kernel address space. */
93
as_t *AS_KERNEL = NULL;
94
 
1235 jermar 95
static int area_flags_to_page_flags(int aflags);
977 jermar 96
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 97
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
1409 jermar 98
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 99
 
756 jermar 100
/** Initialize address space subsystem. */
101
void as_init(void)
102
{
103
	as_arch_init();
789 palkovsky 104
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 105
	if (!AS_KERNEL)
106
		panic("can't create kernel address space\n");
107
 
756 jermar 108
}
109
 
757 jermar 110
/** Create address space.
111
 *
112
 * @param flags Flags that influence way in wich the address space is created.
113
 */
756 jermar 114
as_t *as_create(int flags)
703 jermar 115
{
116
	as_t *as;
117
 
822 palkovsky 118
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 119
	link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 120
	mutex_initialize(&as->lock);
1147 jermar 121
	btree_create(&as->as_area_btree);
822 palkovsky 122
 
123
	if (flags & FLAG_AS_KERNEL)
124
		as->asid = ASID_KERNEL;
125
	else
126
		as->asid = ASID_INVALID;
127
 
1468 jermar 128
	as->refcount = 0;
1415 jermar 129
	as->cpu_refcount = 0;
822 palkovsky 130
	as->page_table = page_table_create(flags);
703 jermar 131
 
132
	return as;
133
}
134
 
1468 jermar 135
/** Destroy adress space.
136
 *
137
 * When there are no tasks referencing this address space (i.e. its refcount is zero),
138
 * the address space can be destroyed.
139
 */
140
void as_destroy(as_t *as)
973 palkovsky 141
{
1468 jermar 142
	ipl_t ipl;
1594 jermar 143
	bool cond;
973 palkovsky 144
 
1468 jermar 145
	ASSERT(as->refcount == 0);
146
 
147
	/*
148
	 * Since there is no reference to this area,
149
	 * it is safe not to lock its mutex.
150
	 */
151
	ipl = interrupts_disable();
152
	spinlock_lock(&inactive_as_with_asid_lock);
1587 jermar 153
	if (as->asid != ASID_INVALID && as != AS_KERNEL) {
1594 jermar 154
		if (as != AS && as->cpu_refcount == 0)
1587 jermar 155
			list_remove(&as->inactive_as_with_asid_link);
1468 jermar 156
		asid_put(as->asid);
157
	}
158
	spinlock_unlock(&inactive_as_with_asid_lock);
159
 
160
	/*
161
	 * Destroy address space areas of the address space.
1594 jermar 162
	 * The B+tee must be walked carefully because it is
163
	 * also being destroyed.
1468 jermar 164
	 */	
1594 jermar 165
	for (cond = true; cond; ) {
1468 jermar 166
		btree_node_t *node;
1594 jermar 167
 
168
		ASSERT(!list_empty(&as->as_area_btree.leaf_head));
169
		node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link);
170
 
171
		if ((cond = node->keys)) {
172
			as_area_destroy(as, node->key[0]);
173
		}
1468 jermar 174
	}
1495 jermar 175
 
1483 jermar 176
	btree_destroy(&as->as_area_btree);
1468 jermar 177
	page_table_destroy(as->page_table);
178
 
179
	interrupts_restore(ipl);
180
 
973 palkovsky 181
	free(as);
182
}
183
 
703 jermar 184
/** Create address space area of common attributes.
185
 *
186
 * The created address space area is added to the target address space.
187
 *
188
 * @param as Target address space.
1239 jermar 189
 * @param flags Flags of the area memory.
1048 jermar 190
 * @param size Size of area.
703 jermar 191
 * @param base Base address of area.
1239 jermar 192
 * @param attrs Attributes of the area.
1409 jermar 193
 * @param backend Address space area backend. NULL if no backend is used.
194
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 195
 *
196
 * @return Address space area on success or NULL on failure.
197
 */
1409 jermar 198
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
1424 jermar 199
	       mem_backend_t *backend, mem_backend_data_t *backend_data)
703 jermar 200
{
201
	ipl_t ipl;
202
	as_area_t *a;
203
 
204
	if (base % PAGE_SIZE)
1048 jermar 205
		return NULL;
206
 
1233 jermar 207
	if (!size)
208
		return NULL;
209
 
1048 jermar 210
	/* Writeable executable areas are not supported. */
211
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
212
		return NULL;
703 jermar 213
 
214
	ipl = interrupts_disable();
1380 jermar 215
	mutex_lock(&as->lock);
703 jermar 216
 
1048 jermar 217
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 218
		mutex_unlock(&as->lock);
1048 jermar 219
		interrupts_restore(ipl);
220
		return NULL;
221
	}
703 jermar 222
 
822 palkovsky 223
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 224
 
1380 jermar 225
	mutex_initialize(&a->lock);
822 palkovsky 226
 
1424 jermar 227
	a->as = as;
1026 jermar 228
	a->flags = flags;
1239 jermar 229
	a->attributes = attrs;
1048 jermar 230
	a->pages = SIZE2FRAMES(size);
822 palkovsky 231
	a->base = base;
1409 jermar 232
	a->sh_info = NULL;
233
	a->backend = backend;
1424 jermar 234
	if (backend_data)
235
		a->backend_data = *backend_data;
236
	else
237
		memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
238
 
1387 jermar 239
	btree_create(&a->used_space);
822 palkovsky 240
 
1147 jermar 241
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 242
 
1380 jermar 243
	mutex_unlock(&as->lock);
703 jermar 244
	interrupts_restore(ipl);
704 jermar 245
 
703 jermar 246
	return a;
247
}
248
 
1235 jermar 249
/** Find address space area and change it.
250
 *
251
 * @param as Address space.
252
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
253
 * @param size New size of the virtual memory block starting at address. 
254
 * @param flags Flags influencing the remap operation. Currently unused.
255
 *
1306 jermar 256
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 257
 */ 
1306 jermar 258
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 259
{
1306 jermar 260
	as_area_t *area;
1235 jermar 261
	ipl_t ipl;
262
	size_t pages;
263
 
264
	ipl = interrupts_disable();
1380 jermar 265
	mutex_lock(&as->lock);
1235 jermar 266
 
267
	/*
268
	 * Locate the area.
269
	 */
270
	area = find_area_and_lock(as, address);
271
	if (!area) {
1380 jermar 272
		mutex_unlock(&as->lock);
1235 jermar 273
		interrupts_restore(ipl);
1306 jermar 274
		return ENOENT;
1235 jermar 275
	}
276
 
1424 jermar 277
	if (area->backend == &phys_backend) {
1235 jermar 278
		/*
279
		 * Remapping of address space areas associated
280
		 * with memory mapped devices is not supported.
281
		 */
1380 jermar 282
		mutex_unlock(&area->lock);
283
		mutex_unlock(&as->lock);
1235 jermar 284
		interrupts_restore(ipl);
1306 jermar 285
		return ENOTSUP;
1235 jermar 286
	}
1409 jermar 287
	if (area->sh_info) {
288
		/*
289
		 * Remapping of shared address space areas 
290
		 * is not supported.
291
		 */
292
		mutex_unlock(&area->lock);
293
		mutex_unlock(&as->lock);
294
		interrupts_restore(ipl);
295
		return ENOTSUP;
296
	}
1235 jermar 297
 
298
	pages = SIZE2FRAMES((address - area->base) + size);
299
	if (!pages) {
300
		/*
301
		 * Zero size address space areas are not allowed.
302
		 */
1380 jermar 303
		mutex_unlock(&area->lock);
304
		mutex_unlock(&as->lock);
1235 jermar 305
		interrupts_restore(ipl);
1306 jermar 306
		return EPERM;
1235 jermar 307
	}
308
 
309
	if (pages < area->pages) {
1403 jermar 310
		bool cond;
311
		__address start_free = area->base + pages*PAGE_SIZE;
1235 jermar 312
 
313
		/*
314
		 * Shrinking the area.
315
		 * No need to check for overlaps.
316
		 */
1403 jermar 317
 
318
		/*
1436 jermar 319
		 * Start TLB shootdown sequence.
320
		 */
321
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
322
 
323
		/*
1403 jermar 324
		 * Remove frames belonging to used space starting from
325
		 * the highest addresses downwards until an overlap with
326
		 * the resized address space area is found. Note that this
327
		 * is also the right way to remove part of the used_space
328
		 * B+tree leaf list.
329
		 */		
330
		for (cond = true; cond;) {
331
			btree_node_t *node;
332
 
333
			ASSERT(!list_empty(&area->used_space.leaf_head));
334
			node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
335
			if ((cond = (bool) node->keys)) {
336
				__address b = node->key[node->keys - 1];
337
				count_t c = (count_t) node->value[node->keys - 1];
338
				int i = 0;
1235 jermar 339
 
1403 jermar 340
				if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
341
 
342
					if (b + c*PAGE_SIZE <= start_free) {
343
						/*
344
						 * The whole interval fits completely
345
						 * in the resized address space area.
346
						 */
347
						break;
348
					}
349
 
350
					/*
351
					 * Part of the interval corresponding to b and c
352
					 * overlaps with the resized address space area.
353
					 */
354
 
355
					cond = false;	/* we are almost done */
356
					i = (start_free - b) >> PAGE_WIDTH;
357
					if (!used_space_remove(area, start_free, c - i))
358
						panic("Could not remove used space.");
359
				} else {
360
					/*
361
					 * The interval of used space can be completely removed.
362
					 */
363
					if (!used_space_remove(area, b, c))
364
						panic("Could not remove used space.\n");
365
				}
366
 
367
				for (; i < c; i++) {
368
					pte_t *pte;
369
 
370
					page_table_lock(as, false);
371
					pte = page_mapping_find(as, b + i*PAGE_SIZE);
372
					ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 373
					if (area->backend && area->backend->frame_free) {
374
						area->backend->frame_free(area,
1409 jermar 375
							b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
376
					}
1403 jermar 377
					page_mapping_remove(as, b + i*PAGE_SIZE);
378
					page_table_unlock(as, false);
379
				}
1235 jermar 380
			}
381
		}
1436 jermar 382
 
1235 jermar 383
		/*
1436 jermar 384
		 * Finish TLB shootdown sequence.
1235 jermar 385
		 */
386
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
387
		tlb_shootdown_finalize();
388
	} else {
389
		/*
390
		 * Growing the area.
391
		 * Check for overlaps with other address space areas.
392
		 */
393
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 394
			mutex_unlock(&area->lock);
395
			mutex_unlock(&as->lock);		
1235 jermar 396
			interrupts_restore(ipl);
1306 jermar 397
			return EADDRNOTAVAIL;
1235 jermar 398
		}
399
	} 
400
 
401
	area->pages = pages;
402
 
1380 jermar 403
	mutex_unlock(&area->lock);
404
	mutex_unlock(&as->lock);
1235 jermar 405
	interrupts_restore(ipl);
406
 
1306 jermar 407
	return 0;
1235 jermar 408
}
409
 
1306 jermar 410
/** Destroy address space area.
411
 *
412
 * @param as Address space.
413
 * @param address Address withing the area to be deleted.
414
 *
415
 * @return Zero on success or a value from @ref errno.h on failure. 
416
 */
417
int as_area_destroy(as_t *as, __address address)
418
{
419
	as_area_t *area;
420
	__address base;
1495 jermar 421
	link_t *cur;
1306 jermar 422
	ipl_t ipl;
423
 
424
	ipl = interrupts_disable();
1380 jermar 425
	mutex_lock(&as->lock);
1306 jermar 426
 
427
	area = find_area_and_lock(as, address);
428
	if (!area) {
1380 jermar 429
		mutex_unlock(&as->lock);
1306 jermar 430
		interrupts_restore(ipl);
431
		return ENOENT;
432
	}
433
 
1403 jermar 434
	base = area->base;
435
 
1411 jermar 436
	/*
1436 jermar 437
	 * Start TLB shootdown sequence.
438
	 */
439
	tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
440
 
441
	/*
1411 jermar 442
	 * Visit only the pages mapped by used_space B+tree.
443
	 */
1495 jermar 444
	for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
1411 jermar 445
		btree_node_t *node;
1495 jermar 446
		int i;
1403 jermar 447
 
1495 jermar 448
		node = list_get_instance(cur, btree_node_t, leaf_link);
449
		for (i = 0; i < node->keys; i++) {
450
			__address b = node->key[i];
451
			count_t j;
1411 jermar 452
			pte_t *pte;
1403 jermar 453
 
1495 jermar 454
			for (j = 0; j < (count_t) node->value[i]; j++) {
1411 jermar 455
				page_table_lock(as, false);
1495 jermar 456
				pte = page_mapping_find(as, b + j*PAGE_SIZE);
1411 jermar 457
				ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1424 jermar 458
				if (area->backend && area->backend->frame_free) {
459
					area->backend->frame_free(area,
1495 jermar 460
						b + j*PAGE_SIZE, PTE_GET_FRAME(pte));
1403 jermar 461
				}
1495 jermar 462
				page_mapping_remove(as, b + j*PAGE_SIZE);
1411 jermar 463
				page_table_unlock(as, false);
1306 jermar 464
			}
465
		}
466
	}
1403 jermar 467
 
1306 jermar 468
	/*
1436 jermar 469
	 * Finish TLB shootdown sequence.
1306 jermar 470
	 */
471
	tlb_invalidate_pages(AS->asid, area->base, area->pages);
472
	tlb_shootdown_finalize();
1436 jermar 473
 
474
	btree_destroy(&area->used_space);
1306 jermar 475
 
1309 jermar 476
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 477
 
478
	if (area->sh_info)
479
		sh_info_remove_reference(area->sh_info);
480
 
1380 jermar 481
	mutex_unlock(&area->lock);
1306 jermar 482
 
483
	/*
484
	 * Remove the empty area from address space.
485
	 */
486
	btree_remove(&AS->as_area_btree, base, NULL);
487
 
1309 jermar 488
	free(area);
489
 
1380 jermar 490
	mutex_unlock(&AS->lock);
1306 jermar 491
	interrupts_restore(ipl);
492
	return 0;
493
}
494
 
1413 jermar 495
/** Share address space area with another or the same address space.
1235 jermar 496
 *
1424 jermar 497
 * Address space area mapping is shared with a new address space area.
498
 * If the source address space area has not been shared so far,
499
 * a new sh_info is created. The new address space area simply gets the
500
 * sh_info of the source area. The process of duplicating the
501
 * mapping is done through the backend share function.
1413 jermar 502
 * 
1417 jermar 503
 * @param src_as Pointer to source address space.
1239 jermar 504
 * @param src_base Base address of the source address space area.
1417 jermar 505
 * @param acc_size Expected size of the source area.
1428 palkovsky 506
 * @param dst_as Pointer to destination address space.
1417 jermar 507
 * @param dst_base Target base address.
508
 * @param dst_flags_mask Destination address space area flags mask.
1235 jermar 509
 *
1306 jermar 510
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 511
 *	   if there is no such address space area,
512
 *	   EPERM if there was a problem in accepting the area or
513
 *	   ENOMEM if there was a problem in allocating destination
1413 jermar 514
 *	   address space area. ENOTSUP is returned if an attempt
515
 *	   to share non-anonymous address space area is detected.
1235 jermar 516
 */
1413 jermar 517
int as_area_share(as_t *src_as, __address src_base, size_t acc_size,
1428 palkovsky 518
		  as_t *dst_as, __address dst_base, int dst_flags_mask)
1235 jermar 519
{
520
	ipl_t ipl;
1239 jermar 521
	int src_flags;
522
	size_t src_size;
523
	as_area_t *src_area, *dst_area;
1413 jermar 524
	share_info_t *sh_info;
1424 jermar 525
	mem_backend_t *src_backend;
526
	mem_backend_data_t src_backend_data;
1434 palkovsky 527
 
1235 jermar 528
	ipl = interrupts_disable();
1380 jermar 529
	mutex_lock(&src_as->lock);
1329 palkovsky 530
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 531
	if (!src_area) {
1238 jermar 532
		/*
533
		 * Could not find the source address space area.
534
		 */
1380 jermar 535
		mutex_unlock(&src_as->lock);
1238 jermar 536
		interrupts_restore(ipl);
537
		return ENOENT;
538
	}
1413 jermar 539
 
1424 jermar 540
	if (!src_area->backend || !src_area->backend->share) {
1413 jermar 541
		/*
1424 jermar 542
		 * There is now backend or the backend does not
543
		 * know how to share the area.
1413 jermar 544
		 */
545
		mutex_unlock(&src_area->lock);
546
		mutex_unlock(&src_as->lock);
547
		interrupts_restore(ipl);
548
		return ENOTSUP;
549
	}
550
 
1239 jermar 551
	src_size = src_area->pages * PAGE_SIZE;
552
	src_flags = src_area->flags;
1424 jermar 553
	src_backend = src_area->backend;
554
	src_backend_data = src_area->backend_data;
1544 palkovsky 555
 
556
	/* Share the cacheable flag from the original mapping */
557
	if (src_flags & AS_AREA_CACHEABLE)
558
		dst_flags_mask |= AS_AREA_CACHEABLE;
559
 
1461 palkovsky 560
	if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) {
1413 jermar 561
		mutex_unlock(&src_area->lock);
562
		mutex_unlock(&src_as->lock);
1235 jermar 563
		interrupts_restore(ipl);
564
		return EPERM;
565
	}
1413 jermar 566
 
1235 jermar 567
	/*
1413 jermar 568
	 * Now we are committed to sharing the area.
569
	 * First prepare the area for sharing.
570
	 * Then it will be safe to unlock it.
571
	 */
572
	sh_info = src_area->sh_info;
573
	if (!sh_info) {
574
		sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
575
		mutex_initialize(&sh_info->lock);
576
		sh_info->refcount = 2;
577
		btree_create(&sh_info->pagemap);
578
		src_area->sh_info = sh_info;
579
	} else {
580
		mutex_lock(&sh_info->lock);
581
		sh_info->refcount++;
582
		mutex_unlock(&sh_info->lock);
583
	}
584
 
1424 jermar 585
	src_area->backend->share(src_area);
1413 jermar 586
 
587
	mutex_unlock(&src_area->lock);
588
	mutex_unlock(&src_as->lock);
589
 
590
	/*
1239 jermar 591
	 * Create copy of the source address space area.
592
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
593
	 * attribute set which prevents race condition with
594
	 * preliminary as_page_fault() calls.
1417 jermar 595
	 * The flags of the source area are masked against dst_flags_mask
596
	 * to support sharing in less privileged mode.
1235 jermar 597
	 */
1461 palkovsky 598
	dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,
1424 jermar 599
				  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
1239 jermar 600
	if (!dst_area) {
1235 jermar 601
		/*
602
		 * Destination address space area could not be created.
603
		 */
1413 jermar 604
		sh_info_remove_reference(sh_info);
605
 
1235 jermar 606
		interrupts_restore(ipl);
607
		return ENOMEM;
608
	}
609
 
610
	/*
1239 jermar 611
	 * Now the destination address space area has been
612
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
1413 jermar 613
	 * attribute and set the sh_info.
1239 jermar 614
	 */	
1380 jermar 615
	mutex_lock(&dst_area->lock);
1239 jermar 616
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1413 jermar 617
	dst_area->sh_info = sh_info;
1380 jermar 618
	mutex_unlock(&dst_area->lock);
1235 jermar 619
 
620
	interrupts_restore(ipl);
621
 
622
	return 0;
623
}
624
 
1423 jermar 625
/** Check access mode for address space area.
626
 *
627
 * The address space area must be locked prior to this call.
628
 *
629
 * @param area Address space area.
630
 * @param access Access mode.
631
 *
632
 * @return False if access violates area's permissions, true otherwise.
633
 */
634
bool as_area_check_access(as_area_t *area, pf_access_t access)
635
{
636
	int flagmap[] = {
637
		[PF_ACCESS_READ] = AS_AREA_READ,
638
		[PF_ACCESS_WRITE] = AS_AREA_WRITE,
639
		[PF_ACCESS_EXEC] = AS_AREA_EXEC
640
	};
641
 
642
	if (!(area->flags & flagmap[access]))
643
		return false;
644
 
645
	return true;
646
}
647
 
703 jermar 648
/** Handle page fault within the current address space.
649
 *
1409 jermar 650
 * This is the high-level page fault handler. It decides
651
 * whether the page fault can be resolved by any backend
652
 * and if so, it invokes the backend to resolve the page
653
 * fault.
654
 *
703 jermar 655
 * Interrupts are assumed disabled.
656
 *
657
 * @param page Faulting page.
1411 jermar 658
 * @param access Access mode that caused the fault (i.e. read/write/exec).
1288 jermar 659
 * @param istate Pointer to interrupted state.
703 jermar 660
 *
1409 jermar 661
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
662
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 663
 */
1411 jermar 664
int as_page_fault(__address page, pf_access_t access, istate_t *istate)
703 jermar 665
{
1044 jermar 666
	pte_t *pte;
977 jermar 667
	as_area_t *area;
703 jermar 668
 
1380 jermar 669
	if (!THREAD)
1409 jermar 670
		return AS_PF_FAULT;
1380 jermar 671
 
703 jermar 672
	ASSERT(AS);
1044 jermar 673
 
1380 jermar 674
	mutex_lock(&AS->lock);
977 jermar 675
	area = find_area_and_lock(AS, page);	
703 jermar 676
	if (!area) {
677
		/*
678
		 * No area contained mapping for 'page'.
679
		 * Signal page fault to low-level handler.
680
		 */
1380 jermar 681
		mutex_unlock(&AS->lock);
1288 jermar 682
		goto page_fault;
703 jermar 683
	}
684
 
1239 jermar 685
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
686
		/*
687
		 * The address space area is not fully initialized.
688
		 * Avoid possible race by returning error.
689
		 */
1380 jermar 690
		mutex_unlock(&area->lock);
691
		mutex_unlock(&AS->lock);
1288 jermar 692
		goto page_fault;		
1239 jermar 693
	}
694
 
1424 jermar 695
	if (!area->backend || !area->backend->page_fault) {
1409 jermar 696
		/*
697
		 * The address space area is not backed by any backend
698
		 * or the backend cannot handle page faults.
699
		 */
700
		mutex_unlock(&area->lock);
701
		mutex_unlock(&AS->lock);
702
		goto page_fault;		
703
	}
1179 jermar 704
 
1044 jermar 705
	page_table_lock(AS, false);
706
 
703 jermar 707
	/*
1044 jermar 708
	 * To avoid race condition between two page faults
709
	 * on the same address, we need to make sure
710
	 * the mapping has not been already inserted.
711
	 */
712
	if ((pte = page_mapping_find(AS, page))) {
713
		if (PTE_PRESENT(pte)) {
1423 jermar 714
			if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) ||
715
				(access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) ||
716
				(access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) {
717
				page_table_unlock(AS, false);
718
				mutex_unlock(&area->lock);
719
				mutex_unlock(&AS->lock);
720
				return AS_PF_OK;
721
			}
1044 jermar 722
		}
723
	}
1409 jermar 724
 
1044 jermar 725
	/*
1409 jermar 726
	 * Resort to the backend page fault handler.
703 jermar 727
	 */
1424 jermar 728
	if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
1409 jermar 729
		page_table_unlock(AS, false);
730
		mutex_unlock(&area->lock);
731
		mutex_unlock(&AS->lock);
732
		goto page_fault;
733
	}
703 jermar 734
 
1044 jermar 735
	page_table_unlock(AS, false);
1380 jermar 736
	mutex_unlock(&area->lock);
737
	mutex_unlock(&AS->lock);
1288 jermar 738
	return AS_PF_OK;
739
 
740
page_fault:
741
	if (THREAD->in_copy_from_uspace) {
742
		THREAD->in_copy_from_uspace = false;
743
		istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
744
	} else if (THREAD->in_copy_to_uspace) {
745
		THREAD->in_copy_to_uspace = false;
746
		istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
747
	} else {
748
		return AS_PF_FAULT;
749
	}
750
 
751
	return AS_PF_DEFER;
703 jermar 752
}
753
 
823 jermar 754
/** Switch address spaces.
703 jermar 755
 *
1380 jermar 756
 * Note that this function cannot sleep as it is essentially a part of
1415 jermar 757
 * scheduling. Sleeping here would lead to deadlock on wakeup.
1380 jermar 758
 *
823 jermar 759
 * @param old Old address space or NULL.
760
 * @param new New address space.
703 jermar 761
 */
823 jermar 762
void as_switch(as_t *old, as_t *new)
703 jermar 763
{
764
	ipl_t ipl;
823 jermar 765
	bool needs_asid = false;
703 jermar 766
 
767
	ipl = interrupts_disable();
1415 jermar 768
	spinlock_lock(&inactive_as_with_asid_lock);
703 jermar 769
 
770
	/*
823 jermar 771
	 * First, take care of the old address space.
772
	 */	
773
	if (old) {
1380 jermar 774
		mutex_lock_active(&old->lock);
1415 jermar 775
		ASSERT(old->cpu_refcount);
776
		if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
823 jermar 777
			/*
778
			 * The old address space is no longer active on
779
			 * any processor. It can be appended to the
780
			 * list of inactive address spaces with assigned
781
			 * ASID.
782
			 */
783
			 ASSERT(old->asid != ASID_INVALID);
784
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
785
		}
1380 jermar 786
		mutex_unlock(&old->lock);
823 jermar 787
	}
788
 
789
	/*
790
	 * Second, prepare the new address space.
791
	 */
1380 jermar 792
	mutex_lock_active(&new->lock);
1415 jermar 793
	if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) {
823 jermar 794
		if (new->asid != ASID_INVALID)
795
			list_remove(&new->inactive_as_with_asid_link);
796
		else
797
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
798
	}
799
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 800
	mutex_unlock(&new->lock);
823 jermar 801
 
802
	if (needs_asid) {
803
		/*
804
		 * Allocation of new ASID was deferred
805
		 * until now in order to avoid deadlock.
806
		 */
807
		asid_t asid;
808
 
809
		asid = asid_get();
1380 jermar 810
		mutex_lock_active(&new->lock);
823 jermar 811
		new->asid = asid;
1380 jermar 812
		mutex_unlock(&new->lock);
823 jermar 813
	}
1415 jermar 814
	spinlock_unlock(&inactive_as_with_asid_lock);
823 jermar 815
	interrupts_restore(ipl);
816
 
817
	/*
703 jermar 818
	 * Perform architecture-specific steps.
727 jermar 819
	 * (e.g. write ASID to hardware register etc.)
703 jermar 820
	 */
823 jermar 821
	as_install_arch(new);
703 jermar 822
 
823 jermar 823
	AS = new;
703 jermar 824
}
754 jermar 825
 
1235 jermar 826
/** Convert address space area flags to page flags.
754 jermar 827
 *
1235 jermar 828
 * @param aflags Flags of some address space area.
754 jermar 829
 *
1235 jermar 830
 * @return Flags to be passed to page_mapping_insert().
754 jermar 831
 */
1235 jermar 832
int area_flags_to_page_flags(int aflags)
754 jermar 833
{
834
	int flags;
835
 
1178 jermar 836
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 837
 
1235 jermar 838
	if (aflags & AS_AREA_READ)
1026 jermar 839
		flags |= PAGE_READ;
840
 
1235 jermar 841
	if (aflags & AS_AREA_WRITE)
1026 jermar 842
		flags |= PAGE_WRITE;
843
 
1235 jermar 844
	if (aflags & AS_AREA_EXEC)
1026 jermar 845
		flags |= PAGE_EXEC;
846
 
1424 jermar 847
	if (aflags & AS_AREA_CACHEABLE)
1178 jermar 848
		flags |= PAGE_CACHEABLE;
849
 
754 jermar 850
	return flags;
851
}
756 jermar 852
 
1235 jermar 853
/** Compute flags for virtual address translation subsytem.
854
 *
855
 * The address space area must be locked.
856
 * Interrupts must be disabled.
857
 *
858
 * @param a Address space area.
859
 *
860
 * @return Flags to be used in page_mapping_insert().
861
 */
1409 jermar 862
int as_area_get_flags(as_area_t *a)
1235 jermar 863
{
864
	return area_flags_to_page_flags(a->flags);
865
}
866
 
756 jermar 867
/** Create page table.
868
 *
869
 * Depending on architecture, create either address space
870
 * private or global page table.
871
 *
872
 * @param flags Flags saying whether the page table is for kernel address space.
873
 *
874
 * @return First entry of the page table.
875
 */
876
pte_t *page_table_create(int flags)
877
{
878
        ASSERT(as_operations);
879
        ASSERT(as_operations->page_table_create);
880
 
881
        return as_operations->page_table_create(flags);
882
}
977 jermar 883
 
1468 jermar 884
/** Destroy page table.
885
 *
886
 * Destroy page table in architecture specific way.
887
 *
888
 * @param page_table Physical address of PTL0.
889
 */
890
void page_table_destroy(pte_t *page_table)
891
{
892
        ASSERT(as_operations);
893
        ASSERT(as_operations->page_table_destroy);
894
 
895
        as_operations->page_table_destroy(page_table);
896
}
897
 
1044 jermar 898
/** Lock page table.
899
 *
900
 * This function should be called before any page_mapping_insert(),
901
 * page_mapping_remove() and page_mapping_find().
902
 * 
903
 * Locking order is such that address space areas must be locked
904
 * prior to this call. Address space can be locked prior to this
905
 * call in which case the lock argument is false.
906
 *
907
 * @param as Address space.
1248 jermar 908
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 909
 */
910
void page_table_lock(as_t *as, bool lock)
911
{
912
	ASSERT(as_operations);
913
	ASSERT(as_operations->page_table_lock);
914
 
915
	as_operations->page_table_lock(as, lock);
916
}
917
 
918
/** Unlock page table.
919
 *
920
 * @param as Address space.
1248 jermar 921
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 922
 */
923
void page_table_unlock(as_t *as, bool unlock)
924
{
925
	ASSERT(as_operations);
926
	ASSERT(as_operations->page_table_unlock);
927
 
928
	as_operations->page_table_unlock(as, unlock);
929
}
930
 
977 jermar 931
 
932
/** Find address space area and lock it.
933
 *
934
 * The address space must be locked and interrupts must be disabled.
935
 *
936
 * @param as Address space.
937
 * @param va Virtual address.
938
 *
939
 * @return Locked address space area containing va on success or NULL on failure.
940
 */
941
as_area_t *find_area_and_lock(as_t *as, __address va)
942
{
943
	as_area_t *a;
1147 jermar 944
	btree_node_t *leaf, *lnode;
945
	int i;
977 jermar 946
 
1147 jermar 947
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
948
	if (a) {
949
		/* va is the base address of an address space area */
1380 jermar 950
		mutex_lock(&a->lock);
1147 jermar 951
		return a;
952
	}
953
 
954
	/*
1150 jermar 955
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 956
	 * to find out whether this is a miss or va belongs to an address
957
	 * space area found there.
958
	 */
959
 
960
	/* First, search the leaf node itself. */
961
	for (i = 0; i < leaf->keys; i++) {
962
		a = (as_area_t *) leaf->value[i];
1380 jermar 963
		mutex_lock(&a->lock);
1147 jermar 964
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
965
			return a;
966
		}
1380 jermar 967
		mutex_unlock(&a->lock);
1147 jermar 968
	}
977 jermar 969
 
1147 jermar 970
	/*
1150 jermar 971
	 * Second, locate the left neighbour and test its last record.
1148 jermar 972
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 973
	 */
1150 jermar 974
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 975
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 976
		mutex_lock(&a->lock);
1147 jermar 977
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 978
			return a;
1147 jermar 979
		}
1380 jermar 980
		mutex_unlock(&a->lock);
977 jermar 981
	}
982
 
983
	return NULL;
984
}
1048 jermar 985
 
986
/** Check area conflicts with other areas.
987
 *
988
 * The address space must be locked and interrupts must be disabled.
989
 *
990
 * @param as Address space.
991
 * @param va Starting virtual address of the area being tested.
992
 * @param size Size of the area being tested.
993
 * @param avoid_area Do not touch this area. 
994
 *
995
 * @return True if there is no conflict, false otherwise.
996
 */
997
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
998
{
999
	as_area_t *a;
1147 jermar 1000
	btree_node_t *leaf, *node;
1001
	int i;
1048 jermar 1002
 
1070 jermar 1003
	/*
1004
	 * We don't want any area to have conflicts with NULL page.
1005
	 */
1006
	if (overlaps(va, size, NULL, PAGE_SIZE))
1007
		return false;
1008
 
1147 jermar 1009
	/*
1010
	 * The leaf node is found in O(log n), where n is proportional to
1011
	 * the number of address space areas belonging to as.
1012
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 1013
	 * record in the left neighbour, the leftmost record in the right
1014
	 * neighbour and all records in the leaf node itself.
1147 jermar 1015
	 */
1048 jermar 1016
 
1147 jermar 1017
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
1018
		if (a != avoid_area)
1019
			return false;
1020
	}
1021
 
1022
	/* First, check the two border cases. */
1150 jermar 1023
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1024
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 1025
		mutex_lock(&a->lock);
1147 jermar 1026
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1027
			mutex_unlock(&a->lock);
1147 jermar 1028
			return false;
1029
		}
1380 jermar 1030
		mutex_unlock(&a->lock);
1147 jermar 1031
	}
1150 jermar 1032
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 1033
		a = (as_area_t *) node->value[0];
1380 jermar 1034
		mutex_lock(&a->lock);
1147 jermar 1035
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1036
			mutex_unlock(&a->lock);
1147 jermar 1037
			return false;
1038
		}
1380 jermar 1039
		mutex_unlock(&a->lock);
1147 jermar 1040
	}
1041
 
1042
	/* Second, check the leaf node. */
1043
	for (i = 0; i < leaf->keys; i++) {
1044
		a = (as_area_t *) leaf->value[i];
1045
 
1048 jermar 1046
		if (a == avoid_area)
1047
			continue;
1147 jermar 1048
 
1380 jermar 1049
		mutex_lock(&a->lock);
1147 jermar 1050
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1051
			mutex_unlock(&a->lock);
1147 jermar 1052
			return false;
1053
		}
1380 jermar 1054
		mutex_unlock(&a->lock);
1048 jermar 1055
	}
1056
 
1070 jermar 1057
	/*
1058
	 * So far, the area does not conflict with other areas.
1059
	 * Check if it doesn't conflict with kernel address space.
1060
	 */	 
1061
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1062
		return !overlaps(va, size, 
1063
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1064
	}
1065
 
1048 jermar 1066
	return true;
1067
}
1235 jermar 1068
 
1380 jermar 1069
/** Return size of the address space area with given base.  */
1329 palkovsky 1070
size_t as_get_size(__address base)
1071
{
1072
	ipl_t ipl;
1073
	as_area_t *src_area;
1074
	size_t size;
1075
 
1076
	ipl = interrupts_disable();
1077
	src_area = find_area_and_lock(AS, base);
1078
	if (src_area){
1079
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1080
		mutex_unlock(&src_area->lock);
1329 palkovsky 1081
	} else {
1082
		size = 0;
1083
	}
1084
	interrupts_restore(ipl);
1085
	return size;
1086
}
1087
 
1387 jermar 1088
/** Mark portion of address space area as used.
1089
 *
1090
 * The address space area must be already locked.
1091
 *
1092
 * @param a Address space area.
1093
 * @param page First page to be marked.
1094
 * @param count Number of page to be marked.
1095
 *
1096
 * @return 0 on failure and 1 on success.
1097
 */
1098
int used_space_insert(as_area_t *a, __address page, count_t count)
1099
{
1100
	btree_node_t *leaf, *node;
1101
	count_t pages;
1102
	int i;
1103
 
1104
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1105
	ASSERT(count);
1106
 
1107
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1108
	if (pages) {
1109
		/*
1110
		 * We hit the beginning of some used space.
1111
		 */
1112
		return 0;
1113
	}
1114
 
1437 jermar 1115
	if (!leaf->keys) {
1116
		btree_insert(&a->used_space, page, (void *) count, leaf);
1117
		return 1;
1118
	}
1119
 
1387 jermar 1120
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1121
	if (node) {
1122
		__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1123
		count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1124
 
1125
		/*
1126
		 * Examine the possibility that the interval fits
1127
		 * somewhere between the rightmost interval of
1128
		 * the left neigbour and the first interval of the leaf.
1129
		 */
1130
 
1131
		if (page >= right_pg) {
1132
			/* Do nothing. */
1133
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1134
			/* The interval intersects with the left interval. */
1135
			return 0;
1136
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1137
			/* The interval intersects with the right interval. */
1138
			return 0;			
1139
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1140
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1141
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1142
			btree_remove(&a->used_space, right_pg, leaf);
1143
			return 1; 
1144
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1145
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1146
			node->value[node->keys - 1] += count;
1387 jermar 1147
			return 1;
1148
		} else if (page + count*PAGE_SIZE == right_pg) {
1149
			/*
1150
			 * The interval can be addded by simply moving base of the right
1151
			 * interval down and increasing its size accordingly.
1152
			 */
1403 jermar 1153
			leaf->value[0] += count;
1387 jermar 1154
			leaf->key[0] = page;
1155
			return 1;
1156
		} else {
1157
			/*
1158
			 * The interval is between both neigbouring intervals,
1159
			 * but cannot be merged with any of them.
1160
			 */
1161
			btree_insert(&a->used_space, page, (void *) count, leaf);
1162
			return 1;
1163
		}
1164
	} else if (page < leaf->key[0]) {
1165
		__address right_pg = leaf->key[0];
1166
		count_t right_cnt = (count_t) leaf->value[0];
1167
 
1168
		/*
1169
		 * Investigate the border case in which the left neighbour does not
1170
		 * exist but the interval fits from the left.
1171
		 */
1172
 
1173
		if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1174
			/* The interval intersects with the right interval. */
1175
			return 0;
1176
		} else if (page + count*PAGE_SIZE == right_pg) {
1177
			/*
1178
			 * The interval can be added by moving the base of the right interval down
1179
			 * and increasing its size accordingly.
1180
			 */
1181
			leaf->key[0] = page;
1403 jermar 1182
			leaf->value[0] += count;
1387 jermar 1183
			return 1;
1184
		} else {
1185
			/*
1186
			 * The interval doesn't adjoin with the right interval.
1187
			 * It must be added individually.
1188
			 */
1189
			btree_insert(&a->used_space, page, (void *) count, leaf);
1190
			return 1;
1191
		}
1192
	}
1193
 
1194
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1195
	if (node) {
1196
		__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1197
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1198
 
1199
		/*
1200
		 * Examine the possibility that the interval fits
1201
		 * somewhere between the leftmost interval of
1202
		 * the right neigbour and the last interval of the leaf.
1203
		 */
1204
 
1205
		if (page < left_pg) {
1206
			/* Do nothing. */
1207
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1208
			/* The interval intersects with the left interval. */
1209
			return 0;
1210
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1211
			/* The interval intersects with the right interval. */
1212
			return 0;			
1213
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1214
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1215
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1216
			btree_remove(&a->used_space, right_pg, node);
1217
			return 1; 
1218
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1219
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1220
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1221
			return 1;
1222
		} else if (page + count*PAGE_SIZE == right_pg) {
1223
			/*
1224
			 * The interval can be addded by simply moving base of the right
1225
			 * interval down and increasing its size accordingly.
1226
			 */
1403 jermar 1227
			node->value[0] += count;
1387 jermar 1228
			node->key[0] = page;
1229
			return 1;
1230
		} else {
1231
			/*
1232
			 * The interval is between both neigbouring intervals,
1233
			 * but cannot be merged with any of them.
1234
			 */
1235
			btree_insert(&a->used_space, page, (void *) count, leaf);
1236
			return 1;
1237
		}
1238
	} else if (page >= leaf->key[leaf->keys - 1]) {
1239
		__address left_pg = leaf->key[leaf->keys - 1];
1240
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1241
 
1242
		/*
1243
		 * Investigate the border case in which the right neighbour does not
1244
		 * exist but the interval fits from the right.
1245
		 */
1246
 
1247
		if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1248
			/* The interval intersects with the left interval. */
1387 jermar 1249
			return 0;
1250
		} else if (left_pg + left_cnt*PAGE_SIZE == page) {
1251
			/* The interval can be added by growing the left interval. */
1403 jermar 1252
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1253
			return 1;
1254
		} else {
1255
			/*
1256
			 * The interval doesn't adjoin with the left interval.
1257
			 * It must be added individually.
1258
			 */
1259
			btree_insert(&a->used_space, page, (void *) count, leaf);
1260
			return 1;
1261
		}
1262
	}
1263
 
1264
	/*
1265
	 * Note that if the algorithm made it thus far, the interval can fit only
1266
	 * between two other intervals of the leaf. The two border cases were already
1267
	 * resolved.
1268
	 */
1269
	for (i = 1; i < leaf->keys; i++) {
1270
		if (page < leaf->key[i]) {
1271
			__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1272
			count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1273
 
1274
			/*
1275
			 * The interval fits between left_pg and right_pg.
1276
			 */
1277
 
1278
			if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1279
				/* The interval intersects with the left interval. */
1280
				return 0;
1281
			} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1282
				/* The interval intersects with the right interval. */
1283
				return 0;			
1284
			} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1285
				/* The interval can be added by merging the two already present intervals. */
1403 jermar 1286
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1287
				btree_remove(&a->used_space, right_pg, leaf);
1288
				return 1; 
1289
			} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1290
				/* The interval can be added by simply growing the left interval. */
1403 jermar 1291
				leaf->value[i - 1] += count;
1387 jermar 1292
				return 1;
1293
			} else if (page + count*PAGE_SIZE == right_pg) {
1294
				/*
1295
			         * The interval can be addded by simply moving base of the right
1296
			 	 * interval down and increasing its size accordingly.
1297
			 	 */
1403 jermar 1298
				leaf->value[i] += count;
1387 jermar 1299
				leaf->key[i] = page;
1300
				return 1;
1301
			} else {
1302
				/*
1303
				 * The interval is between both neigbouring intervals,
1304
				 * but cannot be merged with any of them.
1305
				 */
1306
				btree_insert(&a->used_space, page, (void *) count, leaf);
1307
				return 1;
1308
			}
1309
		}
1310
	}
1311
 
1735 decky 1312
	panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page);
1387 jermar 1313
}
1314
 
1315
/** Mark portion of address space area as unused.
1316
 *
1317
 * The address space area must be already locked.
1318
 *
1319
 * @param a Address space area.
1320
 * @param page First page to be marked.
1321
 * @param count Number of page to be marked.
1322
 *
1323
 * @return 0 on failure and 1 on success.
1324
 */
1325
int used_space_remove(as_area_t *a, __address page, count_t count)
1326
{
1327
	btree_node_t *leaf, *node;
1328
	count_t pages;
1329
	int i;
1330
 
1331
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1332
	ASSERT(count);
1333
 
1334
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1335
	if (pages) {
1336
		/*
1337
		 * We are lucky, page is the beginning of some interval.
1338
		 */
1339
		if (count > pages) {
1340
			return 0;
1341
		} else if (count == pages) {
1342
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1343
			return 1;
1387 jermar 1344
		} else {
1345
			/*
1346
			 * Find the respective interval.
1347
			 * Decrease its size and relocate its start address.
1348
			 */
1349
			for (i = 0; i < leaf->keys; i++) {
1350
				if (leaf->key[i] == page) {
1351
					leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1352
					leaf->value[i] -= count;
1387 jermar 1353
					return 1;
1354
				}
1355
			}
1356
			goto error;
1357
		}
1358
	}
1359
 
1360
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1361
	if (node && page < leaf->key[0]) {
1362
		__address left_pg = node->key[node->keys - 1];
1363
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1364
 
1365
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1366
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1367
				/*
1368
				 * The interval is contained in the rightmost interval
1369
				 * of the left neighbour and can be removed by
1370
				 * updating the size of the bigger interval.
1371
				 */
1403 jermar 1372
				node->value[node->keys - 1] -= count;
1387 jermar 1373
				return 1;
1374
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1375
				count_t new_cnt;
1387 jermar 1376
 
1377
				/*
1378
				 * The interval is contained in the rightmost interval
1379
				 * of the left neighbour but its removal requires
1380
				 * both updating the size of the original interval and
1381
				 * also inserting a new interval.
1382
				 */
1403 jermar 1383
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1384
				node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1385
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1386
				return 1;
1387
			}
1388
		}
1389
		return 0;
1390
	} else if (page < leaf->key[0]) {
1391
		return 0;
1392
	}
1393
 
1394
	if (page > leaf->key[leaf->keys - 1]) {
1395
		__address left_pg = leaf->key[leaf->keys - 1];
1396
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1397
 
1398
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1399
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1400
				/*
1401
				 * The interval is contained in the rightmost interval
1402
				 * of the leaf and can be removed by updating the size
1403
				 * of the bigger interval.
1404
				 */
1403 jermar 1405
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1406
				return 1;
1407
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1408
				count_t new_cnt;
1387 jermar 1409
 
1410
				/*
1411
				 * The interval is contained in the rightmost interval
1412
				 * of the leaf but its removal requires both updating
1413
				 * the size of the original interval and
1414
				 * also inserting a new interval.
1415
				 */
1403 jermar 1416
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1417
				leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1418
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1419
				return 1;
1420
			}
1421
		}
1422
		return 0;
1423
	}	
1424
 
1425
	/*
1426
	 * The border cases have been already resolved.
1427
	 * Now the interval can be only between intervals of the leaf. 
1428
	 */
1429
	for (i = 1; i < leaf->keys - 1; i++) {
1430
		if (page < leaf->key[i]) {
1431
			__address left_pg = leaf->key[i - 1];
1432
			count_t left_cnt = (count_t) leaf->value[i - 1];
1433
 
1434
			/*
1435
			 * Now the interval is between intervals corresponding to (i - 1) and i.
1436
			 */
1437
			if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1438
				if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1439
					/*
1440
				 	* The interval is contained in the interval (i - 1)
1441
					 * of the leaf and can be removed by updating the size
1442
					 * of the bigger interval.
1443
					 */
1403 jermar 1444
					leaf->value[i - 1] -= count;
1387 jermar 1445
					return 1;
1446
				} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1447
					count_t new_cnt;
1387 jermar 1448
 
1449
					/*
1450
					 * The interval is contained in the interval (i - 1)
1451
					 * of the leaf but its removal requires both updating
1452
					 * the size of the original interval and
1453
					 * also inserting a new interval.
1454
					 */
1403 jermar 1455
					new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1456
					leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1457
					btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1458
					return 1;
1459
				}
1460
			}
1461
			return 0;
1462
		}
1463
	}
1464
 
1465
error:
1735 decky 1466
	panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page);
1387 jermar 1467
}
1468
 
1409 jermar 1469
/** Remove reference to address space area share info.
1470
 *
1471
 * If the reference count drops to 0, the sh_info is deallocated.
1472
 *
1473
 * @param sh_info Pointer to address space area share info.
1474
 */
1475
void sh_info_remove_reference(share_info_t *sh_info)
1476
{
1477
	bool dealloc = false;
1478
 
1479
	mutex_lock(&sh_info->lock);
1480
	ASSERT(sh_info->refcount);
1481
	if (--sh_info->refcount == 0) {
1482
		dealloc = true;
1495 jermar 1483
		link_t *cur;
1409 jermar 1484
 
1485
		/*
1486
		 * Now walk carefully the pagemap B+tree and free/remove
1487
		 * reference from all frames found there.
1488
		 */
1495 jermar 1489
		for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
1409 jermar 1490
			btree_node_t *node;
1495 jermar 1491
			int i;
1409 jermar 1492
 
1495 jermar 1493
			node = list_get_instance(cur, btree_node_t, leaf_link);
1494
			for (i = 0; i < node->keys; i++) 
1495
				frame_free(ADDR2PFN((__address) node->value[i]));
1409 jermar 1496
		}
1497
 
1498
	}
1499
	mutex_unlock(&sh_info->lock);
1500
 
1501
	if (dealloc) {
1502
		btree_destroy(&sh_info->pagemap);
1503
		free(sh_info);
1504
	}
1505
}
1506
 
1235 jermar 1507
/*
1508
 * Address space related syscalls.
1509
 */
1510
 
1511
/** Wrapper for as_area_create(). */
1512
__native sys_as_area_create(__address address, size_t size, int flags)
1513
{
1424 jermar 1514
	if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1235 jermar 1515
		return (__native) address;
1516
	else
1517
		return (__native) -1;
1518
}
1519
 
1520
/** Wrapper for as_area_resize. */
1521
__native sys_as_area_resize(__address address, size_t size, int flags)
1522
{
1306 jermar 1523
	return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 1524
}
1525
 
1306 jermar 1526
/** Wrapper for as_area_destroy. */
1527
__native sys_as_area_destroy(__address address)
1528
{
1529
	return (__native) as_area_destroy(AS, address);
1530
}
1702 cejka 1531
 
1532
 /** @}
1533
 */
1534