Subversion Repositories HelenOS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1248 jermar 29
/**
30
 * @file	as.c
31
 * @brief	Address space related functions.
32
 *
703 jermar 33
 * This file contains address space manipulation functions.
34
 * Roughly speaking, this is a higher-level client of
35
 * Virtual Address Translation (VAT) subsystem.
1248 jermar 36
 *
37
 * Functionality provided by this file allows one to
38
 * create address space and create, resize and share
39
 * address space areas.
40
 *
41
 * @see page.c
42
 *
703 jermar 43
 */
44
 
45
#include <mm/as.h>
756 jermar 46
#include <arch/mm/as.h>
703 jermar 47
#include <mm/page.h>
48
#include <mm/frame.h>
814 palkovsky 49
#include <mm/slab.h>
703 jermar 50
#include <mm/tlb.h>
51
#include <arch/mm/page.h>
52
#include <genarch/mm/page_pt.h>
1108 jermar 53
#include <genarch/mm/page_ht.h>
727 jermar 54
#include <mm/asid.h>
703 jermar 55
#include <arch/mm/asid.h>
56
#include <synch/spinlock.h>
1380 jermar 57
#include <synch/mutex.h>
788 jermar 58
#include <adt/list.h>
1147 jermar 59
#include <adt/btree.h>
1235 jermar 60
#include <proc/task.h>
1288 jermar 61
#include <proc/thread.h>
1235 jermar 62
#include <arch/asm.h>
703 jermar 63
#include <panic.h>
64
#include <debug.h>
1235 jermar 65
#include <print.h>
703 jermar 66
#include <memstr.h>
1070 jermar 67
#include <macros.h>
703 jermar 68
#include <arch.h>
1235 jermar 69
#include <errno.h>
70
#include <config.h>
71
#include <arch/types.h>
72
#include <typedefs.h>
1288 jermar 73
#include <syscall/copy.h>
74
#include <arch/interrupt.h>
703 jermar 75
 
756 jermar 76
as_operations_t *as_operations = NULL;
703 jermar 77
 
1380 jermar 78
/** Address space lock. It protects inactive_as_with_asid_head. Must be acquired before as_t mutex. */
823 jermar 79
SPINLOCK_INITIALIZE(as_lock);
80
 
81
/**
82
 * This list contains address spaces that are not active on any
83
 * processor and that have valid ASID.
84
 */
85
LIST_INITIALIZE(inactive_as_with_asid_head);
86
 
757 jermar 87
/** Kernel address space. */
88
as_t *AS_KERNEL = NULL;
89
 
1235 jermar 90
static int area_flags_to_page_flags(int aflags);
754 jermar 91
static int get_area_flags(as_area_t *a);
977 jermar 92
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 93
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
703 jermar 94
 
756 jermar 95
/** Initialize address space subsystem. */
96
void as_init(void)
97
{
98
	as_arch_init();
789 palkovsky 99
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
756 jermar 100
        if (!AS_KERNEL)
101
                panic("can't create kernel address space\n");
102
}
103
 
757 jermar 104
/** Create address space.
105
 *
106
 * @param flags Flags that influence way in wich the address space is created.
107
 */
756 jermar 108
as_t *as_create(int flags)
703 jermar 109
{
110
	as_t *as;
111
 
822 palkovsky 112
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 113
	link_initialize(&as->inactive_as_with_asid_link);
1380 jermar 114
	mutex_initialize(&as->lock);
1147 jermar 115
	btree_create(&as->as_area_btree);
822 palkovsky 116
 
117
	if (flags & FLAG_AS_KERNEL)
118
		as->asid = ASID_KERNEL;
119
	else
120
		as->asid = ASID_INVALID;
121
 
823 jermar 122
	as->refcount = 0;
822 palkovsky 123
	as->page_table = page_table_create(flags);
703 jermar 124
 
125
	return as;
126
}
127
 
973 palkovsky 128
/** Free Adress space */
129
void as_free(as_t *as)
130
{
131
	ASSERT(as->refcount == 0);
132
 
133
	/* TODO: free as_areas and other resources held by as */
134
	/* TODO: free page table */
135
	free(as);
136
}
137
 
703 jermar 138
/** Create address space area of common attributes.
139
 *
140
 * The created address space area is added to the target address space.
141
 *
142
 * @param as Target address space.
1239 jermar 143
 * @param flags Flags of the area memory.
1048 jermar 144
 * @param size Size of area.
703 jermar 145
 * @param base Base address of area.
1239 jermar 146
 * @param attrs Attributes of the area.
703 jermar 147
 *
148
 * @return Address space area on success or NULL on failure.
149
 */
1239 jermar 150
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs)
703 jermar 151
{
152
	ipl_t ipl;
153
	as_area_t *a;
154
 
155
	if (base % PAGE_SIZE)
1048 jermar 156
		return NULL;
157
 
1233 jermar 158
	if (!size)
159
		return NULL;
160
 
1048 jermar 161
	/* Writeable executable areas are not supported. */
162
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
163
		return NULL;
703 jermar 164
 
165
	ipl = interrupts_disable();
1380 jermar 166
	mutex_lock(&as->lock);
703 jermar 167
 
1048 jermar 168
	if (!check_area_conflicts(as, base, size, NULL)) {
1380 jermar 169
		mutex_unlock(&as->lock);
1048 jermar 170
		interrupts_restore(ipl);
171
		return NULL;
172
	}
703 jermar 173
 
822 palkovsky 174
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 175
 
1380 jermar 176
	mutex_initialize(&a->lock);
822 palkovsky 177
 
1026 jermar 178
	a->flags = flags;
1239 jermar 179
	a->attributes = attrs;
1048 jermar 180
	a->pages = SIZE2FRAMES(size);
822 palkovsky 181
	a->base = base;
182
 
1147 jermar 183
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 184
 
1380 jermar 185
	mutex_unlock(&as->lock);
703 jermar 186
	interrupts_restore(ipl);
704 jermar 187
 
703 jermar 188
	return a;
189
}
190
 
1235 jermar 191
/** Find address space area and change it.
192
 *
193
 * @param as Address space.
194
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
195
 * @param size New size of the virtual memory block starting at address. 
196
 * @param flags Flags influencing the remap operation. Currently unused.
197
 *
1306 jermar 198
 * @return Zero on success or a value from @ref errno.h otherwise.
1235 jermar 199
 */ 
1306 jermar 200
int as_area_resize(as_t *as, __address address, size_t size, int flags)
1235 jermar 201
{
1306 jermar 202
	as_area_t *area;
1235 jermar 203
	ipl_t ipl;
204
	size_t pages;
205
 
206
	ipl = interrupts_disable();
1380 jermar 207
	mutex_lock(&as->lock);
1235 jermar 208
 
209
	/*
210
	 * Locate the area.
211
	 */
212
	area = find_area_and_lock(as, address);
213
	if (!area) {
1380 jermar 214
		mutex_unlock(&as->lock);
1235 jermar 215
		interrupts_restore(ipl);
1306 jermar 216
		return ENOENT;
1235 jermar 217
	}
218
 
219
	if (area->flags & AS_AREA_DEVICE) {
220
		/*
221
		 * Remapping of address space areas associated
222
		 * with memory mapped devices is not supported.
223
		 */
1380 jermar 224
		mutex_unlock(&area->lock);
225
		mutex_unlock(&as->lock);
1235 jermar 226
		interrupts_restore(ipl);
1306 jermar 227
		return ENOTSUP;
1235 jermar 228
	}
229
 
230
	pages = SIZE2FRAMES((address - area->base) + size);
231
	if (!pages) {
232
		/*
233
		 * Zero size address space areas are not allowed.
234
		 */
1380 jermar 235
		mutex_unlock(&area->lock);
236
		mutex_unlock(&as->lock);
1235 jermar 237
		interrupts_restore(ipl);
1306 jermar 238
		return EPERM;
1235 jermar 239
	}
240
 
241
	if (pages < area->pages) {
242
		int i;
243
 
244
		/*
245
		 * Shrinking the area.
246
		 * No need to check for overlaps.
247
		 */
248
		for (i = pages; i < area->pages; i++) {
249
			pte_t *pte;
250
 
251
			/*
252
			 * Releasing physical memory.
253
			 * This depends on the fact that the memory was allocated using frame_alloc().
254
			 */
255
			page_table_lock(as, false);
256
			pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
257
			if (pte && PTE_VALID(pte)) {
258
				__address frame;
259
 
260
				ASSERT(PTE_PRESENT(pte));
261
				frame = PTE_GET_FRAME(pte);
262
				page_mapping_remove(as, area->base + i*PAGE_SIZE);
263
				page_table_unlock(as, false);
264
 
265
				frame_free(ADDR2PFN(frame));
266
			} else {
267
				page_table_unlock(as, false);
268
			}
269
		}
270
		/*
271
		 * Invalidate TLB's.
272
		 */
273
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
274
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
275
		tlb_shootdown_finalize();
276
	} else {
277
		/*
278
		 * Growing the area.
279
		 * Check for overlaps with other address space areas.
280
		 */
281
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
1380 jermar 282
			mutex_unlock(&area->lock);
283
			mutex_unlock(&as->lock);		
1235 jermar 284
			interrupts_restore(ipl);
1306 jermar 285
			return EADDRNOTAVAIL;
1235 jermar 286
		}
287
	} 
288
 
289
	area->pages = pages;
290
 
1380 jermar 291
	mutex_unlock(&area->lock);
292
	mutex_unlock(&as->lock);
1235 jermar 293
	interrupts_restore(ipl);
294
 
1306 jermar 295
	return 0;
1235 jermar 296
}
297
 
1306 jermar 298
/** Destroy address space area.
299
 *
300
 * @param as Address space.
301
 * @param address Address withing the area to be deleted.
302
 *
303
 * @return Zero on success or a value from @ref errno.h on failure. 
304
 */
305
int as_area_destroy(as_t *as, __address address)
306
{
307
	as_area_t *area;
308
	__address base;
309
	ipl_t ipl;
310
	int i;
311
 
312
	ipl = interrupts_disable();
1380 jermar 313
	mutex_lock(&as->lock);
1306 jermar 314
 
315
	area = find_area_and_lock(as, address);
316
	if (!area) {
1380 jermar 317
		mutex_unlock(&as->lock);
1306 jermar 318
		interrupts_restore(ipl);
319
		return ENOENT;
320
	}
321
 
322
	base = area->base;	
323
	for (i = 0; i < area->pages; i++) {
324
		pte_t *pte;
325
 
326
		/*
327
		 * Releasing physical memory.
328
		 * Areas mapping memory-mapped devices are treated differently than
329
		 * areas backing frame_alloc()'ed memory.
330
		 */
331
		page_table_lock(as, false);
332
		pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
333
		if (pte && PTE_VALID(pte)) {
334
			ASSERT(PTE_PRESENT(pte));
335
			page_mapping_remove(as, area->base + i*PAGE_SIZE);
336
			if (area->flags & AS_AREA_DEVICE) {
337
				__address frame;
338
				frame = PTE_GET_FRAME(pte);
339
				frame_free(ADDR2PFN(frame));
340
			}
341
			page_table_unlock(as, false);
342
		} else {
343
			page_table_unlock(as, false);
344
		}
345
	}
346
	/*
347
	 * Invalidate TLB's.
348
	 */
349
	tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base, area->pages);
350
	tlb_invalidate_pages(AS->asid, area->base, area->pages);
351
	tlb_shootdown_finalize();
352
 
1309 jermar 353
	area->attributes |= AS_AREA_ATTR_PARTIAL;
1380 jermar 354
	mutex_unlock(&area->lock);
1306 jermar 355
 
356
	/*
357
	 * Remove the empty area from address space.
358
	 */
359
	btree_remove(&AS->as_area_btree, base, NULL);
360
 
1309 jermar 361
	free(area);
362
 
1380 jermar 363
	mutex_unlock(&AS->lock);
1306 jermar 364
	interrupts_restore(ipl);
365
	return 0;
366
}
367
 
1329 palkovsky 368
/** Steal address space area from another task.
1235 jermar 369
 *
1329 palkovsky 370
 * Address space area is stolen from another task
371
 * Moreover, any existing mapping
1235 jermar 372
 * is copied as well, providing thus a mechanism
373
 * for sharing group of pages. The source address
374
 * space area and any associated mapping is preserved.
375
 *
1329 palkovsky 376
 * @param src_task Pointer of source task
1239 jermar 377
 * @param src_base Base address of the source address space area.
1329 palkovsky 378
 * @param acc_size Expected size of the source area
379
 * @param dst_base Target base address
1235 jermar 380
 *
1306 jermar 381
 * @return Zero on success or ENOENT if there is no such task or
1235 jermar 382
 *	   if there is no such address space area,
383
 *	   EPERM if there was a problem in accepting the area or
384
 *	   ENOMEM if there was a problem in allocating destination
385
 *	   address space area.
386
 */
1329 palkovsky 387
int as_area_steal(task_t *src_task, __address src_base, size_t acc_size,
388
		  __address dst_base)
1235 jermar 389
{
390
	ipl_t ipl;
391
	count_t i;
1329 palkovsky 392
	as_t *src_as;       
1239 jermar 393
	int src_flags;
394
	size_t src_size;
395
	as_area_t *src_area, *dst_area;
1329 palkovsky 396
 
1235 jermar 397
	ipl = interrupts_disable();
1329 palkovsky 398
	spinlock_lock(&src_task->lock);
399
	src_as = src_task->as;
1235 jermar 400
 
1380 jermar 401
	mutex_lock(&src_as->lock);
1329 palkovsky 402
	src_area = find_area_and_lock(src_as, src_base);
1239 jermar 403
	if (!src_area) {
1238 jermar 404
		/*
405
		 * Could not find the source address space area.
406
		 */
1329 palkovsky 407
		spinlock_unlock(&src_task->lock);
1380 jermar 408
		mutex_unlock(&src_as->lock);
1238 jermar 409
		interrupts_restore(ipl);
410
		return ENOENT;
411
	}
1239 jermar 412
	src_size = src_area->pages * PAGE_SIZE;
413
	src_flags = src_area->flags;
1380 jermar 414
	mutex_unlock(&src_area->lock);
415
	mutex_unlock(&src_as->lock);
1235 jermar 416
 
1329 palkovsky 417
 
418
	if (src_size != acc_size) {
419
		spinlock_unlock(&src_task->lock);
1235 jermar 420
		interrupts_restore(ipl);
421
		return EPERM;
422
	}
423
	/*
1239 jermar 424
	 * Create copy of the source address space area.
425
	 * The destination area is created with AS_AREA_ATTR_PARTIAL
426
	 * attribute set which prevents race condition with
427
	 * preliminary as_page_fault() calls.
1235 jermar 428
	 */
1329 palkovsky 429
	dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL);
1239 jermar 430
	if (!dst_area) {
1235 jermar 431
		/*
432
		 * Destination address space area could not be created.
433
		 */
1329 palkovsky 434
		spinlock_unlock(&src_task->lock);
1235 jermar 435
		interrupts_restore(ipl);
436
		return ENOMEM;
437
	}
438
 
1329 palkovsky 439
	spinlock_unlock(&src_task->lock);
1235 jermar 440
 
441
	/*
442
	 * Avoid deadlock by first locking the address space with lower address.
443
	 */
1329 palkovsky 444
	if (AS < src_as) {
1380 jermar 445
		mutex_lock(&AS->lock);
446
		mutex_lock(&src_as->lock);
1235 jermar 447
	} else {
1380 jermar 448
		mutex_lock(&AS->lock);
449
		mutex_lock(&src_as->lock);
1235 jermar 450
	}
451
 
1239 jermar 452
	for (i = 0; i < SIZE2FRAMES(src_size); i++) {
1235 jermar 453
		pte_t *pte;
454
		__address frame;
455
 
1329 palkovsky 456
		page_table_lock(src_as, false);
457
		pte = page_mapping_find(src_as, src_base + i*PAGE_SIZE);
1235 jermar 458
		if (pte && PTE_VALID(pte)) {
459
			ASSERT(PTE_PRESENT(pte));
460
			frame = PTE_GET_FRAME(pte);
1239 jermar 461
			if (!(src_flags & AS_AREA_DEVICE))
1236 jermar 462
				frame_reference_add(ADDR2PFN(frame));
1329 palkovsky 463
			page_table_unlock(src_as, false);
1235 jermar 464
		} else {
1329 palkovsky 465
			page_table_unlock(src_as, false);
1235 jermar 466
			continue;
467
		}
468
 
1329 palkovsky 469
		page_table_lock(AS, false);
470
		page_mapping_insert(AS, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(src_flags));
471
		page_table_unlock(AS, false);
1235 jermar 472
	}
1239 jermar 473
 
474
	/*
475
	 * Now the destination address space area has been
476
	 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL
477
	 * attribute.
478
	 */	
1380 jermar 479
	mutex_lock(&dst_area->lock);
1239 jermar 480
	dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
1380 jermar 481
	mutex_unlock(&dst_area->lock);
1235 jermar 482
 
1380 jermar 483
	mutex_unlock(&AS->lock);
484
	mutex_unlock(&src_as->lock);
1235 jermar 485
	interrupts_restore(ipl);
486
 
487
	return 0;
488
}
489
 
754 jermar 490
/** Initialize mapping for one page of address space.
703 jermar 491
 *
754 jermar 492
 * This functions maps 'page' to 'frame' according
493
 * to attributes of the address space area to
494
 * wich 'page' belongs.
703 jermar 495
 *
840 jermar 496
 * @param as Target address space.
754 jermar 497
 * @param page Virtual page within the area.
498
 * @param frame Physical frame to which page will be mapped.
703 jermar 499
 */
754 jermar 500
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 501
{
977 jermar 502
	as_area_t *area;
703 jermar 503
	ipl_t ipl;
504
 
505
	ipl = interrupts_disable();
1044 jermar 506
	page_table_lock(as, true);
703 jermar 507
 
977 jermar 508
	area = find_area_and_lock(as, page);
754 jermar 509
	if (!area) {
510
		panic("page not part of any as_area\n");
511
	}
512
 
756 jermar 513
	page_mapping_insert(as, page, frame, get_area_flags(area));
754 jermar 514
 
1380 jermar 515
	mutex_unlock(&area->lock);
1044 jermar 516
	page_table_unlock(as, true);
703 jermar 517
	interrupts_restore(ipl);
518
}
519
 
520
/** Handle page fault within the current address space.
521
 *
522
 * This is the high-level page fault handler.
523
 * Interrupts are assumed disabled.
524
 *
525
 * @param page Faulting page.
1288 jermar 526
 * @param istate Pointer to interrupted state.
703 jermar 527
 *
1288 jermar 528
 * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace().
703 jermar 529
 */
1288 jermar 530
int as_page_fault(__address page, istate_t *istate)
703 jermar 531
{
1044 jermar 532
	pte_t *pte;
977 jermar 533
	as_area_t *area;
703 jermar 534
	__address frame;
535
 
1380 jermar 536
	if (!THREAD)
537
		return 0;
538
 
703 jermar 539
	ASSERT(AS);
1044 jermar 540
 
1380 jermar 541
	mutex_lock(&AS->lock);
977 jermar 542
	area = find_area_and_lock(AS, page);	
703 jermar 543
	if (!area) {
544
		/*
545
		 * No area contained mapping for 'page'.
546
		 * Signal page fault to low-level handler.
547
		 */
1380 jermar 548
		mutex_unlock(&AS->lock);
1288 jermar 549
		goto page_fault;
703 jermar 550
	}
551
 
1239 jermar 552
	if (area->attributes & AS_AREA_ATTR_PARTIAL) {
553
		/*
554
		 * The address space area is not fully initialized.
555
		 * Avoid possible race by returning error.
556
		 */
1380 jermar 557
		mutex_unlock(&area->lock);
558
		mutex_unlock(&AS->lock);
1288 jermar 559
		goto page_fault;		
1239 jermar 560
	}
561
 
1179 jermar 562
	ASSERT(!(area->flags & AS_AREA_DEVICE));
563
 
1044 jermar 564
	page_table_lock(AS, false);
565
 
703 jermar 566
	/*
1044 jermar 567
	 * To avoid race condition between two page faults
568
	 * on the same address, we need to make sure
569
	 * the mapping has not been already inserted.
570
	 */
571
	if ((pte = page_mapping_find(AS, page))) {
572
		if (PTE_PRESENT(pte)) {
573
			page_table_unlock(AS, false);
1380 jermar 574
			mutex_unlock(&area->lock);
575
			mutex_unlock(&AS->lock);
1044 jermar 576
			return 1;
577
		}
578
	}
579
 
580
	/*
754 jermar 581
	 * In general, there can be several reasons that
582
	 * can have caused this fault.
583
	 *
584
	 * - non-existent mapping: the area is a scratch
585
	 *   area (e.g. stack) and so far has not been
586
	 *   allocated a frame for the faulting page
587
	 *
588
	 * - non-present mapping: another possibility,
589
	 *   currently not implemented, would be frame
590
	 *   reuse; when this becomes a possibility,
591
	 *   do not forget to distinguish between
592
	 *   the different causes
703 jermar 593
	 */
814 palkovsky 594
	frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
754 jermar 595
	memsetb(PA2KA(frame), FRAME_SIZE, 0);
703 jermar 596
 
597
	/*
598
	 * Map 'page' to 'frame'.
599
	 * Note that TLB shootdown is not attempted as only new information is being
600
	 * inserted into page tables.
601
	 */
756 jermar 602
	page_mapping_insert(AS, page, frame, get_area_flags(area));
1044 jermar 603
	page_table_unlock(AS, false);
703 jermar 604
 
1380 jermar 605
	mutex_unlock(&area->lock);
606
	mutex_unlock(&AS->lock);
1288 jermar 607
	return AS_PF_OK;
608
 
609
page_fault:
610
	if (!THREAD)
611
		return AS_PF_FAULT;
612
 
613
	if (THREAD->in_copy_from_uspace) {
614
		THREAD->in_copy_from_uspace = false;
615
		istate_set_retaddr(istate, (__address) &memcpy_from_uspace_failover_address);
616
	} else if (THREAD->in_copy_to_uspace) {
617
		THREAD->in_copy_to_uspace = false;
618
		istate_set_retaddr(istate, (__address) &memcpy_to_uspace_failover_address);
619
	} else {
620
		return AS_PF_FAULT;
621
	}
622
 
623
	return AS_PF_DEFER;
703 jermar 624
}
625
 
823 jermar 626
/** Switch address spaces.
703 jermar 627
 *
1380 jermar 628
 * Note that this function cannot sleep as it is essentially a part of
629
 * the scheduling. Sleeping here would lead to deadlock on wakeup.
630
 *
823 jermar 631
 * @param old Old address space or NULL.
632
 * @param new New address space.
703 jermar 633
 */
823 jermar 634
void as_switch(as_t *old, as_t *new)
703 jermar 635
{
636
	ipl_t ipl;
823 jermar 637
	bool needs_asid = false;
703 jermar 638
 
639
	ipl = interrupts_disable();
823 jermar 640
	spinlock_lock(&as_lock);
703 jermar 641
 
642
	/*
823 jermar 643
	 * First, take care of the old address space.
644
	 */	
645
	if (old) {
1380 jermar 646
		mutex_lock_active(&old->lock);
823 jermar 647
		ASSERT(old->refcount);
648
		if((--old->refcount == 0) && (old != AS_KERNEL)) {
649
			/*
650
			 * The old address space is no longer active on
651
			 * any processor. It can be appended to the
652
			 * list of inactive address spaces with assigned
653
			 * ASID.
654
			 */
655
			 ASSERT(old->asid != ASID_INVALID);
656
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
657
		}
1380 jermar 658
		mutex_unlock(&old->lock);
823 jermar 659
	}
660
 
661
	/*
662
	 * Second, prepare the new address space.
663
	 */
1380 jermar 664
	mutex_lock_active(&new->lock);
823 jermar 665
	if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
666
		if (new->asid != ASID_INVALID)
667
			list_remove(&new->inactive_as_with_asid_link);
668
		else
669
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
670
	}
671
	SET_PTL0_ADDRESS(new->page_table);
1380 jermar 672
	mutex_unlock(&new->lock);
823 jermar 673
 
674
	if (needs_asid) {
675
		/*
676
		 * Allocation of new ASID was deferred
677
		 * until now in order to avoid deadlock.
678
		 */
679
		asid_t asid;
680
 
681
		asid = asid_get();
1380 jermar 682
		mutex_lock_active(&new->lock);
823 jermar 683
		new->asid = asid;
1380 jermar 684
		mutex_unlock(&new->lock);
823 jermar 685
	}
686
	spinlock_unlock(&as_lock);
687
	interrupts_restore(ipl);
688
 
689
	/*
703 jermar 690
	 * Perform architecture-specific steps.
727 jermar 691
	 * (e.g. write ASID to hardware register etc.)
703 jermar 692
	 */
823 jermar 693
	as_install_arch(new);
703 jermar 694
 
823 jermar 695
	AS = new;
703 jermar 696
}
754 jermar 697
 
1235 jermar 698
/** Convert address space area flags to page flags.
754 jermar 699
 *
1235 jermar 700
 * @param aflags Flags of some address space area.
754 jermar 701
 *
1235 jermar 702
 * @return Flags to be passed to page_mapping_insert().
754 jermar 703
 */
1235 jermar 704
int area_flags_to_page_flags(int aflags)
754 jermar 705
{
706
	int flags;
707
 
1178 jermar 708
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 709
 
1235 jermar 710
	if (aflags & AS_AREA_READ)
1026 jermar 711
		flags |= PAGE_READ;
712
 
1235 jermar 713
	if (aflags & AS_AREA_WRITE)
1026 jermar 714
		flags |= PAGE_WRITE;
715
 
1235 jermar 716
	if (aflags & AS_AREA_EXEC)
1026 jermar 717
		flags |= PAGE_EXEC;
718
 
1235 jermar 719
	if (!(aflags & AS_AREA_DEVICE))
1178 jermar 720
		flags |= PAGE_CACHEABLE;
721
 
754 jermar 722
	return flags;
723
}
756 jermar 724
 
1235 jermar 725
/** Compute flags for virtual address translation subsytem.
726
 *
727
 * The address space area must be locked.
728
 * Interrupts must be disabled.
729
 *
730
 * @param a Address space area.
731
 *
732
 * @return Flags to be used in page_mapping_insert().
733
 */
734
int get_area_flags(as_area_t *a)
735
{
736
	return area_flags_to_page_flags(a->flags);
737
}
738
 
756 jermar 739
/** Create page table.
740
 *
741
 * Depending on architecture, create either address space
742
 * private or global page table.
743
 *
744
 * @param flags Flags saying whether the page table is for kernel address space.
745
 *
746
 * @return First entry of the page table.
747
 */
748
pte_t *page_table_create(int flags)
749
{
750
        ASSERT(as_operations);
751
        ASSERT(as_operations->page_table_create);
752
 
753
        return as_operations->page_table_create(flags);
754
}
977 jermar 755
 
1044 jermar 756
/** Lock page table.
757
 *
758
 * This function should be called before any page_mapping_insert(),
759
 * page_mapping_remove() and page_mapping_find().
760
 * 
761
 * Locking order is such that address space areas must be locked
762
 * prior to this call. Address space can be locked prior to this
763
 * call in which case the lock argument is false.
764
 *
765
 * @param as Address space.
1248 jermar 766
 * @param lock If false, do not attempt to lock as->lock.
1044 jermar 767
 */
768
void page_table_lock(as_t *as, bool lock)
769
{
770
	ASSERT(as_operations);
771
	ASSERT(as_operations->page_table_lock);
772
 
773
	as_operations->page_table_lock(as, lock);
774
}
775
 
776
/** Unlock page table.
777
 *
778
 * @param as Address space.
1248 jermar 779
 * @param unlock If false, do not attempt to unlock as->lock.
1044 jermar 780
 */
781
void page_table_unlock(as_t *as, bool unlock)
782
{
783
	ASSERT(as_operations);
784
	ASSERT(as_operations->page_table_unlock);
785
 
786
	as_operations->page_table_unlock(as, unlock);
787
}
788
 
977 jermar 789
 
790
/** Find address space area and lock it.
791
 *
792
 * The address space must be locked and interrupts must be disabled.
793
 *
794
 * @param as Address space.
795
 * @param va Virtual address.
796
 *
797
 * @return Locked address space area containing va on success or NULL on failure.
798
 */
799
as_area_t *find_area_and_lock(as_t *as, __address va)
800
{
801
	as_area_t *a;
1147 jermar 802
	btree_node_t *leaf, *lnode;
803
	int i;
977 jermar 804
 
1147 jermar 805
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
806
	if (a) {
807
		/* va is the base address of an address space area */
1380 jermar 808
		mutex_lock(&a->lock);
1147 jermar 809
		return a;
810
	}
811
 
812
	/*
1150 jermar 813
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 814
	 * to find out whether this is a miss or va belongs to an address
815
	 * space area found there.
816
	 */
817
 
818
	/* First, search the leaf node itself. */
819
	for (i = 0; i < leaf->keys; i++) {
820
		a = (as_area_t *) leaf->value[i];
1380 jermar 821
		mutex_lock(&a->lock);
1147 jermar 822
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
823
			return a;
824
		}
1380 jermar 825
		mutex_unlock(&a->lock);
1147 jermar 826
	}
977 jermar 827
 
1147 jermar 828
	/*
1150 jermar 829
	 * Second, locate the left neighbour and test its last record.
1148 jermar 830
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 831
	 */
1150 jermar 832
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 833
		a = (as_area_t *) lnode->value[lnode->keys - 1];
1380 jermar 834
		mutex_lock(&a->lock);
1147 jermar 835
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 836
			return a;
1147 jermar 837
		}
1380 jermar 838
		mutex_unlock(&a->lock);
977 jermar 839
	}
840
 
841
	return NULL;
842
}
1048 jermar 843
 
844
/** Check area conflicts with other areas.
845
 *
846
 * The address space must be locked and interrupts must be disabled.
847
 *
848
 * @param as Address space.
849
 * @param va Starting virtual address of the area being tested.
850
 * @param size Size of the area being tested.
851
 * @param avoid_area Do not touch this area. 
852
 *
853
 * @return True if there is no conflict, false otherwise.
854
 */
855
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
856
{
857
	as_area_t *a;
1147 jermar 858
	btree_node_t *leaf, *node;
859
	int i;
1048 jermar 860
 
1070 jermar 861
	/*
862
	 * We don't want any area to have conflicts with NULL page.
863
	 */
864
	if (overlaps(va, size, NULL, PAGE_SIZE))
865
		return false;
866
 
1147 jermar 867
	/*
868
	 * The leaf node is found in O(log n), where n is proportional to
869
	 * the number of address space areas belonging to as.
870
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 871
	 * record in the left neighbour, the leftmost record in the right
872
	 * neighbour and all records in the leaf node itself.
1147 jermar 873
	 */
1048 jermar 874
 
1147 jermar 875
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
876
		if (a != avoid_area)
877
			return false;
878
	}
879
 
880
	/* First, check the two border cases. */
1150 jermar 881
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 882
		a = (as_area_t *) node->value[node->keys - 1];
1380 jermar 883
		mutex_lock(&a->lock);
1147 jermar 884
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 885
			mutex_unlock(&a->lock);
1147 jermar 886
			return false;
887
		}
1380 jermar 888
		mutex_unlock(&a->lock);
1147 jermar 889
	}
1150 jermar 890
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 891
		a = (as_area_t *) node->value[0];
1380 jermar 892
		mutex_lock(&a->lock);
1147 jermar 893
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 894
			mutex_unlock(&a->lock);
1147 jermar 895
			return false;
896
		}
1380 jermar 897
		mutex_unlock(&a->lock);
1147 jermar 898
	}
899
 
900
	/* Second, check the leaf node. */
901
	for (i = 0; i < leaf->keys; i++) {
902
		a = (as_area_t *) leaf->value[i];
903
 
1048 jermar 904
		if (a == avoid_area)
905
			continue;
1147 jermar 906
 
1380 jermar 907
		mutex_lock(&a->lock);
1147 jermar 908
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
1380 jermar 909
			mutex_unlock(&a->lock);
1147 jermar 910
			return false;
911
		}
1380 jermar 912
		mutex_unlock(&a->lock);
1048 jermar 913
	}
914
 
1070 jermar 915
	/*
916
	 * So far, the area does not conflict with other areas.
917
	 * Check if it doesn't conflict with kernel address space.
918
	 */	 
919
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
920
		return !overlaps(va, size, 
921
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
922
	}
923
 
1048 jermar 924
	return true;
925
}
1235 jermar 926
 
1380 jermar 927
/** Return size of the address space area with given base.  */
1329 palkovsky 928
size_t as_get_size(__address base)
929
{
930
	ipl_t ipl;
931
	as_area_t *src_area;
932
	size_t size;
933
 
934
	ipl = interrupts_disable();
935
	src_area = find_area_and_lock(AS, base);
936
	if (src_area){
937
		size = src_area->pages * PAGE_SIZE;
1380 jermar 938
		mutex_unlock(&src_area->lock);
1329 palkovsky 939
	} else {
940
		size = 0;
941
	}
942
	interrupts_restore(ipl);
943
	return size;
944
}
945
 
1235 jermar 946
/*
947
 * Address space related syscalls.
948
 */
949
 
950
/** Wrapper for as_area_create(). */
951
__native sys_as_area_create(__address address, size_t size, int flags)
952
{
1239 jermar 953
	if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE))
1235 jermar 954
		return (__native) address;
955
	else
956
		return (__native) -1;
957
}
958
 
959
/** Wrapper for as_area_resize. */
960
__native sys_as_area_resize(__address address, size_t size, int flags)
961
{
1306 jermar 962
	return (__native) as_area_resize(AS, address, size, 0);
1235 jermar 963
}
964
 
1306 jermar 965
/** Wrapper for as_area_destroy. */
966
__native sys_as_area_destroy(__address address)
967
{
968
	return (__native) as_area_destroy(AS, address);
969
}