Subversion Repositories HelenOS-historic

Rev

Rev 1403 | Rev 1411 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file	as.c
31
 * @brief	Address space related functions.
32
 *
703 jermar 33
 * This file contains address space manipulation functions.
34
 * Roughly speaking, this is a higher-level client of
35
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 36
 *
37
 * Functionality provided by this file allows one to
38
 * create address space and create, resize and share
39
 * address space areas.
40
 *
41
 * @see page.c
42
 *
703 jermar 43
 */
44
 
45
#include <mm/as.h>
756 jermar 46
#include <arch/mm/as.h>
703 jermar 47
#include <mm/page.h>
48
#include <mm/frame.h>
814 palkovsky 49
#include <mm/slab.h>
703 jermar 50
#include <mm/tlb.h>
51
#include <arch/mm/page.h>
52
#include <genarch/mm/page_pt.h>
1108 jermar 53
#include <genarch/mm/page_ht.h>
727 jermar 54
#include <mm/asid.h>
703 jermar 55
#include <arch/mm/asid.h>
56
#include <synch/spinlock.h>
1380 jermar 57
#include <synch/mutex.h>
788 jermar 58
#include <adt/list.h>
1147 jermar 59
#include <adt/btree.h>
1235 jermar 60
#include <proc/task.h>
1288 jermar 61
#include <proc/thread.h>
1235 jermar 62
#include <arch/asm.h>
703 jermar 63
#include <panic.h>
64
#include <debug.h>
1235 jermar 65
#include <print.h>
703 jermar 66
#include <memstr.h>
1070 jermar 67
#include <macros.h>
703 jermar 68
#include <arch.h>
1235 jermar 69
#include <errno.h>
70
#include <config.h>
1387 jermar 71
#include <align.h>
1235 jermar 72
#include <arch/types.h>
73
#include <typedefs.h>
1288 jermar 74
#include <syscall/copy.h>
75
#include <arch/interrupt.h>
703 jermar 76
 
1409 jermar 77
/** This structure contains information associated with the shared address space area. */
78
struct share_info {
79
	mutex_t lock;		/**< This lock must be acquired only when the as_area lock is held. */
80
	count_t refcount;	/**< This structure can be deallocated if refcount drops to 0. */
81
	btree_t pagemap;	/**< B+tree containing complete map of anonymous pages of the shared area. */
82
};
83
 
756 jermar 84
as_operations_t *as_operations = NULL;
703 jermar 85
 
1380 jermar 86
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
823 jermar 87
SPINLOCK_INITIALIZE(as_lock);
88
 
89
/**
90
 * This list contains address spaces that are not active on any
91
 * processor and that have valid ASID.
92
 */
93
LIST_INITIALIZE(inactive_as_with_asid_head);
94
 
757 jermar 95
/** Kernel address space. */
96
as_t *AS_KERNEL = NULL;
97
 
1235 jermar 98
static int area_flags_to_page_flags(int aflags);
977 jermar 99
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 100
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
1409 jermar 101
static void sh_info_remove_reference(share_info_t *sh_info);
703 jermar 102
 
756 jermar 103
/** Initialize address space subsystem. */
104
void as_init(void)
105
{
106
	as_arch_init();
789 palkovsky 107
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
1383 decky 108
	if (!AS_KERNEL)
109
		panic("can't create kernel address space\n");
110
 
756 jermar 111
}
112
 
757 jermar 113
/** Create address space.
114
 *
115
 * @param flags Flags that influence way in wich the address space is created.
116
 */
756 jermar 117
as_t *as_create(int flags)
703 jermar 118
{
119
	as_t *as;
120
 
822 palkovsky 121
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 122
	link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 123
	mutex_initialize(&as->lock);
1147 jermar 124
	btree_create(&as->as_area_btree);
822 palkovsky 125
 
126
	if (flags & FLAG_AS_KERNEL)
127
		as->asid = ASID_KERNEL;
128
	else
129
		as->asid = ASID_INVALID;
130
 
823 jermar 131
	as->refcount = 0;
822 palkovsky 132
	as->page_table = page_table_create(flags);
703 jermar 133
 
134
	return as;
135
}
136
 
973 palkovsky 137
/** Free Adress space */
138
void as_free(as_t *as)
139
{
140
	ASSERT(as->refcount == 0);
141
 
142
	/* TODO: free as_areas and other resources held by as */
143
	/* TODO: free page table */
144
	free(as);
145
}
146
 
703 jermar 147
/** Create address space area of common attributes.
148
 *
149
 * The created address space area is added to the target address space.
150
 *
151
 * @param as Target address space.
1239 jermar 152
 * @param flags Flags of the area memory.
1048 jermar 153
 * @param size Size of area.
703 jermar 154
 * @param base Base address of area.
1239 jermar 155
 * @param attrs Attributes of the area.
1409 jermar 156
 * @param backend Address space area backend. NULL if no backend is used.
157
 * @param backend_data NULL or a pointer to an array holding two void *.
703 jermar 158
 *
159
 * @return Address space area on success or NULL on failure.
160
 */
1409 jermar 161
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
162
	       mem_backend_t *backend, void **backend_data)
703 jermar 163
{
164
	ipl_t ipl;
165
	as_area_t *a;
166
 
167
	if (base % PAGE_SIZE)
1048 jermar 168
		return NULL;
169
 
1233 jermar 170
	if (!size)
171
		return NULL;
172
 
1048 jermar 173
	/* Writeable executable areas are not supported. */
174
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
175
		return NULL;
703 jermar 176
 
177
	ipl = interrupts_disable();
1380 jermar 178
	mutex_lock(&as->lock);
703 jermar 179
 
1048 jermar 180
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 181
		mutex_unlock(&as->lock);
1048 jermar 182
		interrupts_restore(ipl);
183
		return NULL;
184
	}
703 jermar 185
 
822 palkovsky 186
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 187
 
1380 jermar 188
	mutex_initialize(&a->lock);
822 palkovsky 189
 
1026 jermar 190
	a->flags = flags;
1239 jermar 191
	a->attributes = attrs;
1048 jermar 192
	a->pages = SIZE2FRAMES(size);
822 palkovsky 193
	a->base = base;
1409 jermar 194
	a->sh_info = NULL;
195
	a->backend = backend;
196
	if (backend_data) {
197
		a->backend_data[0] = backend_data[0];
198
		a->backend_data[1] = backend_data[1];
199
	}
1387 jermar 200
	btree_create(&a->used_space);
822 palkovsky 201
 
1147 jermar 202
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 203
 
1380 jermar 204
	mutex_unlock(&as->lock);
703 jermar 205
	interrupts_restore(ipl);
704 jermar 206
 
703 jermar 207
	return a;
208
}
209
 
1235 jermar 210
/** Find address space area and change it.
211
 *
212
 * @param as Address space.
213
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
214
 * @param size New size of the virtual memory block starting at address. 
215
 * @param flags Flags influencing the remap operation. Currently unused.
216
 *
1306 jermar 217
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 218
 */ 
1306 jermar 219
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 220
{
1306 jermar 221
	as_area_t *area;
1235 jermar 222
	ipl_t ipl;
223
	size_t pages;
224
 
225
	ipl = interrupts_disable();
1380 jermar 226
	mutex_lock(&as->lock);
1235 jermar 227
 
228
	/*
229
	 * Locate the area.
230
	 */
231
	area = find_area_and_lock(as, address);
232
	if (!area) {
1380 jermar 233
		mutex_unlock(&as->lock);
1235 jermar 234
		interrupts_restore(ipl);
1306 jermar 235
		return ENOENT;
1235 jermar 236
	}
237
 
238
	if (area->flags & AS_AREA_DEVICE) {
239
		/*
240
		 * Remapping of address space areas associated
241
		 * with memory mapped devices is not supported.
242
		 */
1380 jermar 243
		mutex_unlock(&area->lock);
244
		mutex_unlock(&as->lock);
1235 jermar 245
		interrupts_restore(ipl);
1306 jermar 246
		return ENOTSUP;
1235 jermar 247
	}
1409 jermar 248
	if (area->sh_info) {
249
		/*
250
		 * Remapping of shared address space areas 
251
		 * is not supported.
252
		 */
253
		mutex_unlock(&area->lock);
254
		mutex_unlock(&as->lock);
255
		interrupts_restore(ipl);
256
		return ENOTSUP;
257
	}
1235 jermar 258
 
259
	pages = SIZE2FRAMES((address - area->base) + size);
260
	if (!pages) {
261
		/*
262
		 * Zero size address space areas are not allowed.
263
		 */
1380 jermar 264
		mutex_unlock(&area->lock);
265
		mutex_unlock(&as->lock);
1235 jermar 266
		interrupts_restore(ipl);
1306 jermar 267
		return EPERM;
1235 jermar 268
	}
269
 
270
	if (pages < area->pages) {
1403 jermar 271
		bool cond;
272
		__address start_free = area->base + pages*PAGE_SIZE;
1235 jermar 273
 
274
		/*
275
		 * Shrinking the area.
276
		 * No need to check for overlaps.
277
		 */
1403 jermar 278
 
279
		/*
280
		 * Remove frames belonging to used space starting from
281
		 * the highest addresses downwards until an overlap with
282
		 * the resized address space area is found. Note that this
283
		 * is also the right way to remove part of the used_space
284
		 * B+tree leaf list.
285
		 */		
286
		for (cond = true; cond;) {
287
			btree_node_t *node;
288
 
289
			ASSERT(!list_empty(&area->used_space.leaf_head));
290
			node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link);
291
			if ((cond = (bool) node->keys)) {
292
				__address b = node->key[node->keys - 1];
293
				count_t c = (count_t) node->value[node->keys - 1];
294
				int i = 0;
1235 jermar 295
 
1403 jermar 296
				if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) {
297
 
298
					if (b + c*PAGE_SIZE <= start_free) {
299
						/*
300
						 * The whole interval fits completely
301
						 * in the resized address space area.
302
						 */
303
						break;
304
					}
305
 
306
					/*
307
					 * Part of the interval corresponding to b and c
308
					 * overlaps with the resized address space area.
309
					 */
310
 
311
					cond = false;	/* we are almost done */
312
					i = (start_free - b) >> PAGE_WIDTH;
313
					if (!used_space_remove(area, start_free, c - i))
314
						panic("Could not remove used space.");
315
				} else {
316
					/*
317
					 * The interval of used space can be completely removed.
318
					 */
319
					if (!used_space_remove(area, b, c))
320
						panic("Could not remove used space.\n");
321
				}
322
 
323
				for (; i < c; i++) {
324
					pte_t *pte;
325
 
326
					page_table_lock(as, false);
327
					pte = page_mapping_find(as, b + i*PAGE_SIZE);
328
					ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1409 jermar 329
					if (area->backend && area->backend->backend_frame_free) {
330
						area->backend->backend_frame_free(area,
331
							b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
332
					}
1403 jermar 333
					page_mapping_remove(as, b + i*PAGE_SIZE);
334
					page_table_unlock(as, false);
335
				}
1235 jermar 336
			}
337
		}
338
		/*
339
		 * Invalidate TLB's.
340
		 */
341
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
342
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
343
		tlb_shootdown_finalize();
344
	} else {
345
		/*
346
		 * Growing the area.
347
		 * Check for overlaps with other address space areas.
348
		 */
349
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 350
			mutex_unlock(&area->lock);
351
			mutex_unlock(&as->lock);		
1235 jermar 352
			interrupts_restore(ipl);
1306 jermar 353
			return EADDRNOTAVAIL;
1235 jermar 354
		}
355
	} 
356
 
357
	area->pages = pages;
358
 
1380 jermar 359
	mutex_unlock(&area->lock);
360
	mutex_unlock(&as->lock);
1235 jermar 361
	interrupts_restore(ipl);
362
 
1306 jermar 363
	return 0;
1235 jermar 364
}
365
 
1306 jermar 366
/** Destroy address space area.
367
 *
368
 * @param as Address space.
369
 * @param address Address withing the area to be deleted.
370
 *
371
 * @return Zero on success or a value from @ref errno.h on failure. 
372
 */
373
int as_area_destroy(as_t *as, __address address)
374
{
375
	as_area_t *area;
376
	__address base;
377
	ipl_t ipl;
378
 
379
	ipl = interrupts_disable();
1380 jermar 380
	mutex_lock(&as->lock);
1306 jermar 381
 
382
	area = find_area_and_lock(as, address);
383
	if (!area) {
1380 jermar 384
		mutex_unlock(&as->lock);
1306 jermar 385
		interrupts_restore(ipl);
386
		return ENOENT;
387
	}
388
 
1403 jermar 389
	base = area->base;
390
	if (!(area->flags & AS_AREA_DEVICE)) {
391
		bool cond;	
392
 
1306 jermar 393
		/*
394
		 * Releasing physical memory.
395
		 * Areas mapping memory-mapped devices are treated differently than
396
		 * areas backing frame_alloc()'ed memory.
397
		 */
1403 jermar 398
 
399
		/*
400
		 * Visit only the pages mapped by used_space B+tree.
401
		 * Note that we must be very careful when walking the tree
402
		 * leaf list and removing used space as the leaf list changes
403
		 * unpredictibly after each remove. The solution is to actually
404
		 * not walk the tree at all, but to remove items from the head
405
		 * of the leaf list until there are some keys left.
406
		 */
407
		for (cond = true; cond;) {
408
			btree_node_t *node;
409
 
410
			ASSERT(!list_empty(&area->used_space.leaf_head));
411
			node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
412
			if ((cond = (bool) node->keys)) {
413
				__address b = node->key[0];
414
				count_t i;
415
				pte_t *pte;
416
 
417
				for (i = 0; i < (count_t) node->value[0]; i++) {
418
					page_table_lock(as, false);
419
					pte = page_mapping_find(as, b + i*PAGE_SIZE);
420
					ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
1409 jermar 421
					if (area->backend && area->backend->backend_frame_free) {
422
						area->backend->backend_frame_free(area,
423
							b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
424
					}
1403 jermar 425
					page_mapping_remove(as, b + i*PAGE_SIZE);
426
					page_table_unlock(as, false);
427
				}
428
				if (!used_space_remove(area, b, i))
429
					panic("Could not remove used space.\n");
1306 jermar 430
			}
431
		}
432
	}
1403 jermar 433
	btree_destroy(&area->used_space);
434
 
1306 jermar 435
	/*
436
	 * Invalidate TLB's.
437
	 */
438
	tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
439
	tlb_invalidate_pages(AS->asid, area->base, area->pages);
440
	tlb_shootdown_finalize();
441
 
1309 jermar 442
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1409 jermar 443
 
444
	if (area->sh_info)
445
		sh_info_remove_reference(area->sh_info);
446
 
1380 jermar 447
	mutex_unlock(&area->lock);
1306 jermar 448
 
449
	/*
450
	 * Remove the empty area from address space.
451
	 */
452
	btree_remove(&AS->as_area_btree, base, NULL);
453
 
1309 jermar 454
	free(area);
455
 
1380 jermar 456
	mutex_unlock(&AS->lock);
1306 jermar 457
	interrupts_restore(ipl);
458
	return 0;
459
}
460
 
1329 palkovsky 461
/** Steal address space area from another task.
1235 jermar 462
 *
1329 palkovsky 463
 * Address space area is stolen from another task
464
 * Moreover, any existing mapping
1235 jermar 465
 * is copied as well, providing thus a mechanism
466
 * for sharing group of pages. The source address
467
 * space area and any associated mapping is preserved.
468
 *
1329 palkovsky 469
 * @param src_task Pointer of source task
1239 jermar 470
 * @param src_base Base address of the source address space area.
1329 palkovsky 471
 * @param acc_size Expected size of the source area
472
 * @param dst_base Target base address
1235 jermar 473
 *
1306 jermar 474
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 475
 *	   if there is no such address space area,
476
 *	   EPERM if there was a problem in accepting the area or
477
 *	   ENOMEM if there was a problem in allocating destination
478
 *	   address space area.
479
 */
1329 palkovsky 480
int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
481
		  __address dst_base)
1235 jermar 482
{
483
	ipl_t ipl;
484
	count_t i;
1329 palkovsky 485
	as_t *src_as;       
1239 jermar 486
	int src_flags;
487
	size_t src_size;
488
	as_area_t *src_area, *dst_area;
1329 palkovsky 489
 
1235 jermar 490
	ipl = interrupts_disable();
1329 palkovsky 491
	spinlock_lock(&src_task->lock);
492
	src_as = src_task->as;
1235 jermar 493
 
1380 jermar 494
	mutex_lock(&src_as->lock);
1329 palkovsky 495
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 496
	if (!src_area) {
1238 jermar 497
		/*
498
		 * Could not find the source address space area.
499
		 */
1329 palkovsky 500
		spinlock_unlock(&src_task->lock);
1380 jermar 501
		mutex_unlock(&src_as->lock);
1238 jermar 502
		interrupts_restore(ipl);
503
		return ENOENT;
504
	}
1239 jermar 505
	src_size = src_area->pages * PAGE_SIZE;
506
	src_flags = src_area->flags;
1380 jermar 507
	mutex_unlock(&src_area->lock);
508
	mutex_unlock(&src_as->lock);
1235 jermar 509
 
1329 palkovsky 510
	if (src_size != acc_size) {
511
		spinlock_unlock(&src_task->lock);
1235 jermar 512
		interrupts_restore(ipl);
513
		return EPERM;
514
	}
515
	/*
1239 jermar 516
	 * Create copy of the source address space area.
517
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
518
	 * attribute set which prevents race condition with
519
	 * preliminary as_page_fault() calls.
1235 jermar 520
	 */
1409 jermar 521
	dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
1239 jermar 522
	if (!dst_area) {
1235 jermar 523
		/*
524
		 * Destination address space area could not be created.
525
		 */
1329 palkovsky 526
		spinlock_unlock(&src_task->lock);
1235 jermar 527
		interrupts_restore(ipl);
528
		return ENOMEM;
529
	}
530
 
1329 palkovsky 531
	spinlock_unlock(&src_task->lock);
1235 jermar 532
 
533
	/*
534
	 * Avoid deadlock by first locking the address space with lower address.
535
	 */
1329 palkovsky 536
	if (AS < src_as) {
1380 jermar 537
		mutex_lock(&AS->lock);
538
		mutex_lock(&src_as->lock);
1235 jermar 539
	} else {
1380 jermar 540
		mutex_lock(&AS->lock);
541
		mutex_lock(&src_as->lock);
1235 jermar 542
	}
543
 
1239 jermar 544
	for (i = 0; i < SIZE2FRAMES(src_size); i++) {
1235 jermar 545
		pte_t *pte;
546
		__address frame;
547
 
1329 palkovsky 548
		page_table_lock(src_as, false);
549
		pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
1235 jermar 550
		if (pte && PTE_VALID(pte)) {
551
			ASSERT(PTE_PRESENT(pte));
552
			frame = PTE_GET_FRAME(pte);
1239 jermar 553
			if (!(src_flags & AS_AREA_DEVICE))
1236 jermar 554
				frame_reference_add(ADDR2PFN(frame));
1329 palkovsky 555
			page_table_unlock(src_as, false);
1235 jermar 556
		} else {
1329 palkovsky 557
			page_table_unlock(src_as, false);
1235 jermar 558
			continue;
559
		}
560
 
1329 palkovsky 561
		page_table_lock(AS, false);
562
		page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
563
		page_table_unlock(AS, false);
1235 jermar 564
	}
1239 jermar 565
 
566
	/*
567
	 * Now the destination address space area has been
568
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
569
	 * attribute.
570
	 */	
1380 jermar 571
	mutex_lock(&dst_area->lock);
1239 jermar 572
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1380 jermar 573
	mutex_unlock(&dst_area->lock);
1235 jermar 574
 
1380 jermar 575
	mutex_unlock(&AS->lock);
576
	mutex_unlock(&src_as->lock);
1235 jermar 577
	interrupts_restore(ipl);
578
 
579
	return 0;
580
}
581
 
754 jermar 582
/** Initialize mapping for one page of address space.
703 jermar 583
 *
754 jermar 584
 * This functions maps 'page' to 'frame' according
585
 * to attributes of the address space area to
586
 * wich 'page' belongs.
703 jermar 587
 *
840 jermar 588
 * @param as Target address space.
754 jermar 589
 * @param page Virtual page within the area.
590
 * @param frame Physical frame to which page will be mapped.
703 jermar 591
 */
754 jermar 592
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 593
{
977 jermar 594
	as_area_t *area;
703 jermar 595
	ipl_t ipl;
596
 
597
	ipl = interrupts_disable();
1044 jermar 598
	page_table_lock(as, true);
703 jermar 599
 
977 jermar 600
	area = find_area_and_lock(as, page);
754 jermar 601
	if (!area) {
1403 jermar 602
		panic("Page not part of any as_area.\n");
754 jermar 603
	}
604
 
1409 jermar 605
	ASSERT(!area->backend);
606
 
607
	page_mapping_insert(as, page, frame, as_area_get_flags(area));
1403 jermar 608
	if (!used_space_insert(area, page, 1))
609
		panic("Could not insert used space.\n");
754 jermar 610
 
1380 jermar 611
	mutex_unlock(&area->lock);
1044 jermar 612
	page_table_unlock(as, true);
703 jermar 613
	interrupts_restore(ipl);
614
}
615
 
616
/** Handle page fault within the current address space.
617
 *
1409 jermar 618
 * This is the high-level page fault handler. It decides
619
 * whether the page fault can be resolved by any backend
620
 * and if so, it invokes the backend to resolve the page
621
 * fault.
622
 *
703 jermar 623
 * Interrupts are assumed disabled.
624
 *
625
 * @param page Faulting page.
1288 jermar 626
 * @param istate Pointer to interrupted state.
703 jermar 627
 *
1409 jermar 628
 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the
629
 * 	   fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 630
 */
1288 jermar 631
int as_page_fault(__address page, istate_t *istate)
703 jermar 632
{
1044 jermar 633
	pte_t *pte;
977 jermar 634
	as_area_t *area;
703 jermar 635
 
1380 jermar 636
	if (!THREAD)
1409 jermar 637
		return AS_PF_FAULT;
1380 jermar 638
 
703 jermar 639
	ASSERT(AS);
1044 jermar 640
 
1380 jermar 641
	mutex_lock(&AS->lock);
977 jermar 642
	area = find_area_and_lock(AS, page);	
703 jermar 643
	if (!area) {
644
		/*
645
		 * No area contained mapping for 'page'.
646
		 * Signal page fault to low-level handler.
647
		 */
1380 jermar 648
		mutex_unlock(&AS->lock);
1288 jermar 649
		goto page_fault;
703 jermar 650
	}
651
 
1239 jermar 652
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
653
		/*
654
		 * The address space area is not fully initialized.
655
		 * Avoid possible race by returning error.
656
		 */
1380 jermar 657
		mutex_unlock(&area->lock);
658
		mutex_unlock(&AS->lock);
1288 jermar 659
		goto page_fault;		
1239 jermar 660
	}
661
 
1409 jermar 662
	if (!area->backend || !area->backend->backend_page_fault) {
663
		/*
664
		 * The address space area is not backed by any backend
665
		 * or the backend cannot handle page faults.
666
		 */
667
		mutex_unlock(&area->lock);
668
		mutex_unlock(&AS->lock);
669
		goto page_fault;		
670
	}
1179 jermar 671
 
1044 jermar 672
	page_table_lock(AS, false);
673
 
703 jermar 674
	/*
1044 jermar 675
	 * To avoid race condition between two page faults
676
	 * on the same address, we need to make sure
677
	 * the mapping has not been already inserted.
678
	 */
679
	if ((pte = page_mapping_find(AS, page))) {
680
		if (PTE_PRESENT(pte)) {
681
			page_table_unlock(AS, false);
1380 jermar 682
			mutex_unlock(&area->lock);
683
			mutex_unlock(&AS->lock);
1409 jermar 684
			return AS_PF_OK;
1044 jermar 685
		}
686
	}
1409 jermar 687
 
1044 jermar 688
	/*
1409 jermar 689
	 * Resort to the backend page fault handler.
703 jermar 690
	 */
1409 jermar 691
	if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
692
		page_table_unlock(AS, false);
693
		mutex_unlock(&area->lock);
694
		mutex_unlock(&AS->lock);
695
		goto page_fault;
696
	}
703 jermar 697
 
1044 jermar 698
	page_table_unlock(AS, false);
1380 jermar 699
	mutex_unlock(&area->lock);
700
	mutex_unlock(&AS->lock);
1288 jermar 701
	return AS_PF_OK;
702
 
703
page_fault:
704
	if (THREAD->in_copy_from_uspace) {
705
		THREAD->in_copy_from_uspace = false;
706
		istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
707
	} else if (THREAD->in_copy_to_uspace) {
708
		THREAD->in_copy_to_uspace = false;
709
		istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
710
	} else {
711
		return AS_PF_FAULT;
712
	}
713
 
714
	return AS_PF_DEFER;
703 jermar 715
}
716
 
823 jermar 717
/** Switch address spaces.
703 jermar 718
 *
1380 jermar 719
 * Note that this function cannot sleep as it is essentially a part of
720
 * the scheduling. Sleeping here would lead to deadlock on wakeup.
721
 *
823 jermar 722
 * @param old Old address space or NULL.
723
 * @param new New address space.
703 jermar 724
 */
823 jermar 725
void as_switch(as_t *old, as_t *new)
703 jermar 726
{
727
	ipl_t ipl;
823 jermar 728
	bool needs_asid = false;
703 jermar 729
 
730
	ipl = interrupts_disable();
823 jermar 731
	spinlock_lock(&as_lock);
703 jermar 732
 
733
	/*
823 jermar 734
	 * First, take care of the old address space.
735
	 */	
736
	if (old) {
1380 jermar 737
		mutex_lock_active(&old->lock);
823 jermar 738
		ASSERT(old->refcount);
739
		if((--old->refcount == 0) && (old != AS_KERNEL)) {
740
			/*
741
			 * The old address space is no longer active on
742
			 * any processor. It can be appended to the
743
			 * list of inactive address spaces with assigned
744
			 * ASID.
745
			 */
746
			 ASSERT(old->asid != ASID_INVALID);
747
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
748
		}
1380 jermar 749
		mutex_unlock(&old->lock);
823 jermar 750
	}
751
 
752
	/*
753
	 * Second, prepare the new address space.
754
	 */
1380 jermar 755
	mutex_lock_active(&new->lock);
823 jermar 756
	if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
757
		if (new->asid != ASID_INVALID)
758
			list_remove(&new->inactive_as_with_asid_link);
759
		else
760
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
761
	}
762
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 763
	mutex_unlock(&new->lock);
823 jermar 764
 
765
	if (needs_asid) {
766
		/*
767
		 * Allocation of new ASID was deferred
768
		 * until now in order to avoid deadlock.
769
		 */
770
		asid_t asid;
771
 
772
		asid = asid_get();
1380 jermar 773
		mutex_lock_active(&new->lock);
823 jermar 774
		new->asid = asid;
1380 jermar 775
		mutex_unlock(&new->lock);
823 jermar 776
	}
777
	spinlock_unlock(&as_lock);
778
	interrupts_restore(ipl);
779
 
780
	/*
703 jermar 781
	 * Perform architecture-specific steps.
727 jermar 782
	 * (e.g. write ASID to hardware register etc.)
703 jermar 783
	 */
823 jermar 784
	as_install_arch(new);
703 jermar 785
 
823 jermar 786
	AS = new;
703 jermar 787
}
754 jermar 788
 
1235 jermar 789
/** Convert address space area flags to page flags.
754 jermar 790
 *
1235 jermar 791
 * @param aflags Flags of some address space area.
754 jermar 792
 *
1235 jermar 793
 * @return Flags to be passed to page_mapping_insert().
754 jermar 794
 */
1235 jermar 795
int area_flags_to_page_flags(int aflags)
754 jermar 796
{
797
	int flags;
798
 
1178 jermar 799
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 800
 
1235 jermar 801
	if (aflags & AS_AREA_READ)
1026 jermar 802
		flags |= PAGE_READ;
803
 
1235 jermar 804
	if (aflags & AS_AREA_WRITE)
1026 jermar 805
		flags |= PAGE_WRITE;
806
 
1235 jermar 807
	if (aflags & AS_AREA_EXEC)
1026 jermar 808
		flags |= PAGE_EXEC;
809
 
1235 jermar 810
	if (!(aflags & AS_AREA_DEVICE))
1178 jermar 811
		flags |= PAGE_CACHEABLE;
812
 
754 jermar 813
	return flags;
814
}
756 jermar 815
 
1235 jermar 816
/** Compute flags for virtual address translation subsytem.
817
 *
818
 * The address space area must be locked.
819
 * Interrupts must be disabled.
820
 *
821
 * @param a Address space area.
822
 *
823
 * @return Flags to be used in page_mapping_insert().
824
 */
1409 jermar 825
int as_area_get_flags(as_area_t *a)
1235 jermar 826
{
827
	return area_flags_to_page_flags(a->flags);
828
}
829
 
756 jermar 830
/** Create page table.
831
 *
832
 * Depending on architecture, create either address space
833
 * private or global page table.
834
 *
835
 * @param flags Flags saying whether the page table is for kernel address space.
836
 *
837
 * @return First entry of the page table.
838
 */
839
pte_t *page_table_create(int flags)
840
{
841
        ASSERT(as_operations);
842
        ASSERT(as_operations->page_table_create);
843
 
844
        return as_operations->page_table_create(flags);
845
}
977 jermar 846
 
1044 jermar 847
/** Lock page table.
848
 *
849
 * This function should be called before any page_mapping_insert(),
850
 * page_mapping_remove() and page_mapping_find().
851
 * 
852
 * Locking order is such that address space areas must be locked
853
 * prior to this call. Address space can be locked prior to this
854
 * call in which case the lock argument is false.
855
 *
856
 * @param as Address space.
1248 jermar 857
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 858
 */
859
void page_table_lock(as_t *as, bool lock)
860
{
861
	ASSERT(as_operations);
862
	ASSERT(as_operations->page_table_lock);
863
 
864
	as_operations->page_table_lock(as, lock);
865
}
866
 
867
/** Unlock page table.
868
 *
869
 * @param as Address space.
1248 jermar 870
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 871
 */
872
void page_table_unlock(as_t *as, bool unlock)
873
{
874
	ASSERT(as_operations);
875
	ASSERT(as_operations->page_table_unlock);
876
 
877
	as_operations->page_table_unlock(as, unlock);
878
}
879
 
977 jermar 880
 
881
/** Find address space area and lock it.
882
 *
883
 * The address space must be locked and interrupts must be disabled.
884
 *
885
 * @param as Address space.
886
 * @param va Virtual address.
887
 *
888
 * @return Locked address space area containing va on success or NULL on failure.
889
 */
890
as_area_t *find_area_and_lock(as_t *as, __address va)
891
{
892
	as_area_t *a;
1147 jermar 893
	btree_node_t *leaf, *lnode;
894
	int i;
977 jermar 895
 
1147 jermar 896
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
897
	if (a) {
898
		/* va is the base address of an address space area */
1380 jermar 899
		mutex_lock(&a->lock);
1147 jermar 900
		return a;
901
	}
902
 
903
	/*
1150 jermar 904
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 905
	 * to find out whether this is a miss or va belongs to an address
906
	 * space area found there.
907
	 */
908
 
909
	/* First, search the leaf node itself. */
910
	for (i = 0; i < leaf->keys; i++) {
911
		a = (as_area_t *) leaf->value[i];
1380 jermar 912
		mutex_lock(&a->lock);
1147 jermar 913
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
914
			return a;
915
		}
1380 jermar 916
		mutex_unlock(&a->lock);
1147 jermar 917
	}
977 jermar 918
 
1147 jermar 919
	/*
1150 jermar 920
	 * Second, locate the left neighbour and test its last record.
1148 jermar 921
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 922
	 */
1150 jermar 923
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 924
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 925
		mutex_lock(&a->lock);
1147 jermar 926
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 927
			return a;
1147 jermar 928
		}
1380 jermar 929
		mutex_unlock(&a->lock);
977 jermar 930
	}
931
 
932
	return NULL;
933
}
1048 jermar 934
 
935
/** Check area conflicts with other areas.
936
 *
937
 * The address space must be locked and interrupts must be disabled.
938
 *
939
 * @param as Address space.
940
 * @param va Starting virtual address of the area being tested.
941
 * @param size Size of the area being tested.
942
 * @param avoid_area Do not touch this area. 
943
 *
944
 * @return True if there is no conflict, false otherwise.
945
 */
946
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
947
{
948
	as_area_t *a;
1147 jermar 949
	btree_node_t *leaf, *node;
950
	int i;
1048 jermar 951
 
1070 jermar 952
	/*
953
	 * We don't want any area to have conflicts with NULL page.
954
	 */
955
	if (overlaps(va, size, NULL, PAGE_SIZE))
956
		return false;
957
 
1147 jermar 958
	/*
959
	 * The leaf node is found in O(log n), where n is proportional to
960
	 * the number of address space areas belonging to as.
961
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 962
	 * record in the left neighbour, the leftmost record in the right
963
	 * neighbour and all records in the leaf node itself.
1147 jermar 964
	 */
1048 jermar 965
 
1147 jermar 966
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
967
		if (a != avoid_area)
968
			return false;
969
	}
970
 
971
	/* First, check the two border cases. */
1150 jermar 972
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 973
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 974
		mutex_lock(&a->lock);
1147 jermar 975
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 976
			mutex_unlock(&a->lock);
1147 jermar 977
			return false;
978
		}
1380 jermar 979
		mutex_unlock(&a->lock);
1147 jermar 980
	}
1150 jermar 981
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 982
		a = (as_area_t *) node->value[0];
1380 jermar 983
		mutex_lock(&a->lock);
1147 jermar 984
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 985
			mutex_unlock(&a->lock);
1147 jermar 986
			return false;
987
		}
1380 jermar 988
		mutex_unlock(&a->lock);
1147 jermar 989
	}
990
 
991
	/* Second, check the leaf node. */
992
	for (i = 0; i < leaf->keys; i++) {
993
		a = (as_area_t *) leaf->value[i];
994
 
1048 jermar 995
		if (a == avoid_area)
996
			continue;
1147 jermar 997
 
1380 jermar 998
		mutex_lock(&a->lock);
1147 jermar 999
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 1000
			mutex_unlock(&a->lock);
1147 jermar 1001
			return false;
1002
		}
1380 jermar 1003
		mutex_unlock(&a->lock);
1048 jermar 1004
	}
1005
 
1070 jermar 1006
	/*
1007
	 * So far, the area does not conflict with other areas.
1008
	 * Check if it doesn't conflict with kernel address space.
1009
	 */	 
1010
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
1011
		return !overlaps(va, size, 
1012
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
1013
	}
1014
 
1048 jermar 1015
	return true;
1016
}
1235 jermar 1017
 
1380 jermar 1018
/** Return size of the address space area with given base.  */
1329 palkovsky 1019
size_t as_get_size(__address base)
1020
{
1021
	ipl_t ipl;
1022
	as_area_t *src_area;
1023
	size_t size;
1024
 
1025
	ipl = interrupts_disable();
1026
	src_area = find_area_and_lock(AS, base);
1027
	if (src_area){
1028
		size = src_area->pages * PAGE_SIZE;
1380 jermar 1029
		mutex_unlock(&src_area->lock);
1329 palkovsky 1030
	} else {
1031
		size = 0;
1032
	}
1033
	interrupts_restore(ipl);
1034
	return size;
1035
}
1036
 
1387 jermar 1037
/** Mark portion of address space area as used.
1038
 *
1039
 * The address space area must be already locked.
1040
 *
1041
 * @param a Address space area.
1042
 * @param page First page to be marked.
1043
 * @param count Number of page to be marked.
1044
 *
1045
 * @return 0 on failure and 1 on success.
1046
 */
1047
int used_space_insert(as_area_t *a, __address page, count_t count)
1048
{
1049
	btree_node_t *leaf, *node;
1050
	count_t pages;
1051
	int i;
1052
 
1053
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1054
	ASSERT(count);
1055
 
1056
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1057
	if (pages) {
1058
		/*
1059
		 * We hit the beginning of some used space.
1060
		 */
1061
		return 0;
1062
	}
1063
 
1064
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1065
	if (node) {
1066
		__address left_pg = node->key[node->keys - 1], right_pg = leaf->key[0];
1067
		count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0];
1068
 
1069
		/*
1070
		 * Examine the possibility that the interval fits
1071
		 * somewhere between the rightmost interval of
1072
		 * the left neigbour and the first interval of the leaf.
1073
		 */
1074
 
1075
		if (page >= right_pg) {
1076
			/* Do nothing. */
1077
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1078
			/* The interval intersects with the left interval. */
1079
			return 0;
1080
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1081
			/* The interval intersects with the right interval. */
1082
			return 0;			
1083
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1084
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1085
			node->value[node->keys - 1] += count + right_cnt;
1387 jermar 1086
			btree_remove(&a->used_space, right_pg, leaf);
1087
			return 1; 
1088
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1089
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1090
			node->value[node->keys - 1] += count;
1387 jermar 1091
			return 1;
1092
		} else if (page + count*PAGE_SIZE == right_pg) {
1093
			/*
1094
			 * The interval can be addded by simply moving base of the right
1095
			 * interval down and increasing its size accordingly.
1096
			 */
1403 jermar 1097
			leaf->value[0] += count;
1387 jermar 1098
			leaf->key[0] = page;
1099
			return 1;
1100
		} else {
1101
			/*
1102
			 * The interval is between both neigbouring intervals,
1103
			 * but cannot be merged with any of them.
1104
			 */
1105
			btree_insert(&a->used_space, page, (void *) count, leaf);
1106
			return 1;
1107
		}
1108
	} else if (page < leaf->key[0]) {
1109
		__address right_pg = leaf->key[0];
1110
		count_t right_cnt = (count_t) leaf->value[0];
1111
 
1112
		/*
1113
		 * Investigate the border case in which the left neighbour does not
1114
		 * exist but the interval fits from the left.
1115
		 */
1116
 
1117
		if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1118
			/* The interval intersects with the right interval. */
1119
			return 0;
1120
		} else if (page + count*PAGE_SIZE == right_pg) {
1121
			/*
1122
			 * The interval can be added by moving the base of the right interval down
1123
			 * and increasing its size accordingly.
1124
			 */
1125
			leaf->key[0] = page;
1403 jermar 1126
			leaf->value[0] += count;
1387 jermar 1127
			return 1;
1128
		} else {
1129
			/*
1130
			 * The interval doesn't adjoin with the right interval.
1131
			 * It must be added individually.
1132
			 */
1133
			btree_insert(&a->used_space, page, (void *) count, leaf);
1134
			return 1;
1135
		}
1136
	}
1137
 
1138
	node = btree_leaf_node_right_neighbour(&a->used_space, leaf);
1139
	if (node) {
1140
		__address left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0];
1141
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0];
1142
 
1143
		/*
1144
		 * Examine the possibility that the interval fits
1145
		 * somewhere between the leftmost interval of
1146
		 * the right neigbour and the last interval of the leaf.
1147
		 */
1148
 
1149
		if (page < left_pg) {
1150
			/* Do nothing. */
1151
		} else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1152
			/* The interval intersects with the left interval. */
1153
			return 0;
1154
		} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1155
			/* The interval intersects with the right interval. */
1156
			return 0;			
1157
		} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1158
			/* The interval can be added by merging the two already present intervals. */
1403 jermar 1159
			leaf->value[leaf->keys - 1] += count + right_cnt;
1387 jermar 1160
			btree_remove(&a->used_space, right_pg, node);
1161
			return 1; 
1162
		} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1163
			/* The interval can be added by simply growing the left interval. */
1403 jermar 1164
			leaf->value[leaf->keys - 1] +=  count;
1387 jermar 1165
			return 1;
1166
		} else if (page + count*PAGE_SIZE == right_pg) {
1167
			/*
1168
			 * The interval can be addded by simply moving base of the right
1169
			 * interval down and increasing its size accordingly.
1170
			 */
1403 jermar 1171
			node->value[0] += count;
1387 jermar 1172
			node->key[0] = page;
1173
			return 1;
1174
		} else {
1175
			/*
1176
			 * The interval is between both neigbouring intervals,
1177
			 * but cannot be merged with any of them.
1178
			 */
1179
			btree_insert(&a->used_space, page, (void *) count, leaf);
1180
			return 1;
1181
		}
1182
	} else if (page >= leaf->key[leaf->keys - 1]) {
1183
		__address left_pg = leaf->key[leaf->keys - 1];
1184
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1185
 
1186
		/*
1187
		 * Investigate the border case in which the right neighbour does not
1188
		 * exist but the interval fits from the right.
1189
		 */
1190
 
1191
		if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1403 jermar 1192
			/* The interval intersects with the left interval. */
1387 jermar 1193
			return 0;
1194
		} else if (left_pg + left_cnt*PAGE_SIZE == page) {
1195
			/* The interval can be added by growing the left interval. */
1403 jermar 1196
			leaf->value[leaf->keys - 1] += count;
1387 jermar 1197
			return 1;
1198
		} else {
1199
			/*
1200
			 * The interval doesn't adjoin with the left interval.
1201
			 * It must be added individually.
1202
			 */
1203
			btree_insert(&a->used_space, page, (void *) count, leaf);
1204
			return 1;
1205
		}
1206
	}
1207
 
1208
	/*
1209
	 * Note that if the algorithm made it thus far, the interval can fit only
1210
	 * between two other intervals of the leaf. The two border cases were already
1211
	 * resolved.
1212
	 */
1213
	for (i = 1; i < leaf->keys; i++) {
1214
		if (page < leaf->key[i]) {
1215
			__address left_pg = leaf->key[i - 1], right_pg = leaf->key[i];
1216
			count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i];
1217
 
1218
			/*
1219
			 * The interval fits between left_pg and right_pg.
1220
			 */
1221
 
1222
			if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) {
1223
				/* The interval intersects with the left interval. */
1224
				return 0;
1225
			} else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) {
1226
				/* The interval intersects with the right interval. */
1227
				return 0;			
1228
			} else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) {
1229
				/* The interval can be added by merging the two already present intervals. */
1403 jermar 1230
				leaf->value[i - 1] += count + right_cnt;
1387 jermar 1231
				btree_remove(&a->used_space, right_pg, leaf);
1232
				return 1; 
1233
			} else if (page == left_pg + left_cnt*PAGE_SIZE) {
1234
				/* The interval can be added by simply growing the left interval. */
1403 jermar 1235
				leaf->value[i - 1] += count;
1387 jermar 1236
				return 1;
1237
			} else if (page + count*PAGE_SIZE == right_pg) {
1238
				/*
1239
			         * The interval can be addded by simply moving base of the right
1240
			 	 * interval down and increasing its size accordingly.
1241
			 	 */
1403 jermar 1242
				leaf->value[i] += count;
1387 jermar 1243
				leaf->key[i] = page;
1244
				return 1;
1245
			} else {
1246
				/*
1247
				 * The interval is between both neigbouring intervals,
1248
				 * but cannot be merged with any of them.
1249
				 */
1250
				btree_insert(&a->used_space, page, (void *) count, leaf);
1251
				return 1;
1252
			}
1253
		}
1254
	}
1255
 
1256
	panic("Inconsistency detected while adding %d pages of used space at %P.\n", count, page);
1257
}
1258
 
1259
/** Mark portion of address space area as unused.
1260
 *
1261
 * The address space area must be already locked.
1262
 *
1263
 * @param a Address space area.
1264
 * @param page First page to be marked.
1265
 * @param count Number of page to be marked.
1266
 *
1267
 * @return 0 on failure and 1 on success.
1268
 */
1269
int used_space_remove(as_area_t *a, __address page, count_t count)
1270
{
1271
	btree_node_t *leaf, *node;
1272
	count_t pages;
1273
	int i;
1274
 
1275
	ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
1276
	ASSERT(count);
1277
 
1278
	pages = (count_t) btree_search(&a->used_space, page, &leaf);
1279
	if (pages) {
1280
		/*
1281
		 * We are lucky, page is the beginning of some interval.
1282
		 */
1283
		if (count > pages) {
1284
			return 0;
1285
		} else if (count == pages) {
1286
			btree_remove(&a->used_space, page, leaf);
1403 jermar 1287
			return 1;
1387 jermar 1288
		} else {
1289
			/*
1290
			 * Find the respective interval.
1291
			 * Decrease its size and relocate its start address.
1292
			 */
1293
			for (i = 0; i < leaf->keys; i++) {
1294
				if (leaf->key[i] == page) {
1295
					leaf->key[i] += count*PAGE_SIZE;
1403 jermar 1296
					leaf->value[i] -= count;
1387 jermar 1297
					return 1;
1298
				}
1299
			}
1300
			goto error;
1301
		}
1302
	}
1303
 
1304
	node = btree_leaf_node_left_neighbour(&a->used_space, leaf);
1305
	if (node && page < leaf->key[0]) {
1306
		__address left_pg = node->key[node->keys - 1];
1307
		count_t left_cnt = (count_t) node->value[node->keys - 1];
1308
 
1309
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1310
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1311
				/*
1312
				 * The interval is contained in the rightmost interval
1313
				 * of the left neighbour and can be removed by
1314
				 * updating the size of the bigger interval.
1315
				 */
1403 jermar 1316
				node->value[node->keys - 1] -= count;
1387 jermar 1317
				return 1;
1318
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1319
				count_t new_cnt;
1387 jermar 1320
 
1321
				/*
1322
				 * The interval is contained in the rightmost interval
1323
				 * of the left neighbour but its removal requires
1324
				 * both updating the size of the original interval and
1325
				 * also inserting a new interval.
1326
				 */
1403 jermar 1327
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1328
				node->value[node->keys - 1] -= count + new_cnt;
1387 jermar 1329
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1330
				return 1;
1331
			}
1332
		}
1333
		return 0;
1334
	} else if (page < leaf->key[0]) {
1335
		return 0;
1336
	}
1337
 
1338
	if (page > leaf->key[leaf->keys - 1]) {
1339
		__address left_pg = leaf->key[leaf->keys - 1];
1340
		count_t left_cnt = (count_t) leaf->value[leaf->keys - 1];
1341
 
1342
		if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1343
			if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1344
				/*
1345
				 * The interval is contained in the rightmost interval
1346
				 * of the leaf and can be removed by updating the size
1347
				 * of the bigger interval.
1348
				 */
1403 jermar 1349
				leaf->value[leaf->keys - 1] -= count;
1387 jermar 1350
				return 1;
1351
			} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1352
				count_t new_cnt;
1387 jermar 1353
 
1354
				/*
1355
				 * The interval is contained in the rightmost interval
1356
				 * of the leaf but its removal requires both updating
1357
				 * the size of the original interval and
1358
				 * also inserting a new interval.
1359
				 */
1403 jermar 1360
				new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1361
				leaf->value[leaf->keys - 1] -= count + new_cnt;
1387 jermar 1362
				btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1363
				return 1;
1364
			}
1365
		}
1366
		return 0;
1367
	}	
1368
 
1369
	/*
1370
	 * The border cases have been already resolved.
1371
	 * Now the interval can be only between intervals of the leaf. 
1372
	 */
1373
	for (i = 1; i < leaf->keys - 1; i++) {
1374
		if (page < leaf->key[i]) {
1375
			__address left_pg = leaf->key[i - 1];
1376
			count_t left_cnt = (count_t) leaf->value[i - 1];
1377
 
1378
			/*
1379
			 * Now the interval is between intervals corresponding to (i - 1) and i.
1380
			 */
1381
			if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) {
1382
				if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) {
1383
					/*
1384
				 	* The interval is contained in the interval (i - 1)
1385
					 * of the leaf and can be removed by updating the size
1386
					 * of the bigger interval.
1387
					 */
1403 jermar 1388
					leaf->value[i - 1] -= count;
1387 jermar 1389
					return 1;
1390
				} else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) {
1403 jermar 1391
					count_t new_cnt;
1387 jermar 1392
 
1393
					/*
1394
					 * The interval is contained in the interval (i - 1)
1395
					 * of the leaf but its removal requires both updating
1396
					 * the size of the original interval and
1397
					 * also inserting a new interval.
1398
					 */
1403 jermar 1399
					new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH;
1400
					leaf->value[i - 1] -= count + new_cnt;
1387 jermar 1401
					btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf);
1402
					return 1;
1403
				}
1404
			}
1405
			return 0;
1406
		}
1407
	}
1408
 
1409
error:
1410
	panic("Inconsistency detected while removing %d pages of used space from %P.\n", count, page);
1411
}
1412
 
1409 jermar 1413
/** Remove reference to address space area share info.
1414
 *
1415
 * If the reference count drops to 0, the sh_info is deallocated.
1416
 *
1417
 * @param sh_info Pointer to address space area share info.
1418
 */
1419
void sh_info_remove_reference(share_info_t *sh_info)
1420
{
1421
	bool dealloc = false;
1422
 
1423
	mutex_lock(&sh_info->lock);
1424
	ASSERT(sh_info->refcount);
1425
	if (--sh_info->refcount == 0) {
1426
		dealloc = true;
1427
		bool cond;
1428
 
1429
		/*
1430
		 * Now walk carefully the pagemap B+tree and free/remove
1431
		 * reference from all frames found there.
1432
		 */
1433
		for (cond = true; cond;) {
1434
			btree_node_t *node;
1435
 
1436
			ASSERT(!list_empty(&sh_info->pagemap.leaf_head));
1437
			node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link);
1438
			if ((cond = node->keys)) {
1439
				frame_free(ADDR2PFN((__address) node->value[0]));
1440
				btree_remove(&sh_info->pagemap, node->key[0], node);
1441
			}
1442
		}
1443
 
1444
	}
1445
	mutex_unlock(&sh_info->lock);
1446
 
1447
	if (dealloc) {
1448
		btree_destroy(&sh_info->pagemap);
1449
		free(sh_info);
1450
	}
1451
}
1452
 
1453
static int anon_page_fault(as_area_t *area, __address addr);
1454
static void anon_frame_free(as_area_t *area, __address page, __address frame);
1455
 
1235 jermar 1456
/*
1409 jermar 1457
 * Anonymous memory backend.
1458
 */
1459
mem_backend_t anon_backend = {
1460
	.backend_page_fault = anon_page_fault,
1461
	.backend_frame_free = anon_frame_free
1462
};
1463
 
1464
/** Service a page fault in the anonymous memory address space area.
1465
 *
1466
 * The address space area and page tables must be already locked.
1467
 *
1468
 * @param area Pointer to the address space area.
1469
 * @param addr Faulting virtual address.
1470
 *
1471
 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
1472
 */
1473
int anon_page_fault(as_area_t *area, __address addr)
1474
{
1475
	__address frame;
1476
 
1477
	if (area->sh_info) {
1478
		btree_node_t *leaf;
1479
 
1480
		/*
1481
		 * The area is shared, chances are that the mapping can be found
1482
		 * in the pagemap of the address space area share info structure.
1483
		 * In the case that the pagemap does not contain the respective
1484
		 * mapping, a new frame is allocated and the mapping is created.
1485
		 */
1486
		mutex_lock(&area->sh_info->lock);
1487
		frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf);
1488
		if (!frame) {
1489
			bool allocate = true;
1490
			int i;
1491
 
1492
			/*
1493
			 * Zero can be returned as a valid frame address.
1494
			 * Just a small workaround.
1495
			 */
1496
			for (i = 0; i < leaf->keys; i++) {
1497
				if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
1498
					allocate = false;
1499
					break;
1500
				}
1501
			}
1502
			if (allocate) {
1503
				frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1504
				memsetb(PA2KA(frame), FRAME_SIZE, 0);
1505
 
1506
				/*
1507
				 * Insert the address of the newly allocated frame to the pagemap.
1508
				 */
1509
				btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf);
1510
			}
1511
		}
1512
		mutex_unlock(&area->sh_info->lock);
1513
	} else {
1514
 
1515
		/*
1516
		 * In general, there can be several reasons that
1517
		 * can have caused this fault.
1518
		 *
1519
		 * - non-existent mapping: the area is an anonymous
1520
		 *   area (e.g. heap or stack) and so far has not been
1521
		 *   allocated a frame for the faulting page
1522
		 *
1523
		 * - non-present mapping: another possibility,
1524
		 *   currently not implemented, would be frame
1525
		 *   reuse; when this becomes a possibility,
1526
		 *   do not forget to distinguish between
1527
		 *   the different causes
1528
		 */
1529
		frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
1530
		memsetb(PA2KA(frame), FRAME_SIZE, 0);
1531
	}
1532
 
1533
	/*
1534
	 * Map 'page' to 'frame'.
1535
	 * Note that TLB shootdown is not attempted as only new information is being
1536
	 * inserted into page tables.
1537
	 */
1538
	page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
1539
	if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
1540
		panic("Could not insert used space.\n");
1541
 
1542
	return AS_PF_OK;
1543
}
1544
 
1545
/** Free a frame that is backed by the anonymous memory backend.
1546
 *
1547
 * The address space area and page tables must be already locked.
1548
 *
1549
 * @param area Ignored.
1550
 * @param page Ignored.
1551
 * @param frame Frame to be released.
1552
 */
1553
void anon_frame_free(as_area_t *area, __address page, __address frame)
1554
{
1555
	frame_free(ADDR2PFN(frame));
1556
}
1557
 
1558
/*
1235 jermar 1559
 * Address space related syscalls.
1560
 */
1561
 
1562
/** Wrapper for as_area_create(). */
1563
__native sys_as_area_create(__address address, size_t size, int flags)
1564
{
1409 jermar 1565
	if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
1235 jermar 1566
		return (__native) address;
1567
	else
1568
		return (__native) -1;
1569
}
1570
 
1571
/** Wrapper for as_area_resize. */
1572
__native sys_as_area_resize(__address address, size_t size, int flags)
1573
{
1306 jermar 1574
	return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 1575
}
1576
 
1306 jermar 1577
/** Wrapper for as_area_destroy. */
1578
__native sys_as_area_destroy(__address address)
1579
{
1580
	return (__native) as_area_destroy(AS, address);
1581
}