Subversion Repositories HelenOS-historic

Rev

Rev 1235 | Rev 1238 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
703 jermar 1
/*
2
 * Copyright (C) 2001-2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * This file contains address space manipulation functions.
31
 * Roughly speaking, this is a higher-level client of
32
 * Virtual Address Translation (VAT) subsystem.
33
 */
34
 
35
#include <mm/as.h>
756 jermar 36
#include <arch/mm/as.h>
703 jermar 37
#include <mm/page.h>
38
#include <mm/frame.h>
814 palkovsky 39
#include <mm/slab.h>
703 jermar 40
#include <mm/tlb.h>
41
#include <arch/mm/page.h>
42
#include <genarch/mm/page_pt.h>
1108 jermar 43
#include <genarch/mm/page_ht.h>
727 jermar 44
#include <mm/asid.h>
703 jermar 45
#include <arch/mm/asid.h>
46
#include <synch/spinlock.h>
788 jermar 47
#include <adt/list.h>
1147 jermar 48
#include <adt/btree.h>
1235 jermar 49
#include <proc/task.h>
50
#include <arch/asm.h>
703 jermar 51
#include <panic.h>
52
#include <debug.h>
1235 jermar 53
#include <print.h>
703 jermar 54
#include <memstr.h>
1070 jermar 55
#include <macros.h>
703 jermar 56
#include <arch.h>
1235 jermar 57
#include <errno.h>
58
#include <config.h>
59
#include <arch/types.h>
60
#include <typedefs.h>
703 jermar 61
 
756 jermar 62
as_operations_t *as_operations = NULL;
703 jermar 63
 
823 jermar 64
/** Address space lock. It protects inactive_as_with_asid_head. */
65
SPINLOCK_INITIALIZE(as_lock);
66
 
67
/**
68
 * This list contains address spaces that are not active on any
69
 * processor and that have valid ASID.
70
 */
71
LIST_INITIALIZE(inactive_as_with_asid_head);
72
 
757 jermar 73
/** Kernel address space. */
74
as_t *AS_KERNEL = NULL;
75
 
1235 jermar 76
static int area_flags_to_page_flags(int aflags);
754 jermar 77
static int get_area_flags(as_area_t *a);
977 jermar 78
static as_area_t *find_area_and_lock(as_t *as, __address va);
1048 jermar 79
static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area);
703 jermar 80
 
756 jermar 81
/** Initialize address space subsystem. */
82
void as_init(void)
83
{
84
	as_arch_init();
789 palkovsky 85
	AS_KERNEL = as_create(FLAG_AS_KERNEL);
756 jermar 86
        if (!AS_KERNEL)
87
                panic("can't create kernel address space\n");
88
}
89
 
757 jermar 90
/** Create address space.
91
 *
92
 * @param flags Flags that influence way in wich the address space is created.
93
 */
756 jermar 94
as_t *as_create(int flags)
703 jermar 95
{
96
	as_t *as;
97
 
822 palkovsky 98
	as = (as_t *) malloc(sizeof(as_t), 0);
823 jermar 99
	link_initialize(&as->inactive_as_with_asid_link);
822 palkovsky 100
	spinlock_initialize(&as->lock, "as_lock");
1147 jermar 101
	btree_create(&as->as_area_btree);
822 palkovsky 102
 
103
	if (flags & FLAG_AS_KERNEL)
104
		as->asid = ASID_KERNEL;
105
	else
106
		as->asid = ASID_INVALID;
107
 
823 jermar 108
	as->refcount = 0;
822 palkovsky 109
	as->page_table = page_table_create(flags);
703 jermar 110
 
111
	return as;
112
}
113
 
973 palkovsky 114
/** Free Adress space */
115
void as_free(as_t *as)
116
{
117
	ASSERT(as->refcount == 0);
118
 
119
	/* TODO: free as_areas and other resources held by as */
120
	/* TODO: free page table */
121
	free(as);
122
}
123
 
703 jermar 124
/** Create address space area of common attributes.
125
 *
126
 * The created address space area is added to the target address space.
127
 *
128
 * @param as Target address space.
1026 jermar 129
 * @param flags Flags of the area.
1048 jermar 130
 * @param size Size of area.
703 jermar 131
 * @param base Base address of area.
132
 *
133
 * @return Address space area on success or NULL on failure.
134
 */
1026 jermar 135
as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base)
703 jermar 136
{
137
	ipl_t ipl;
138
	as_area_t *a;
139
 
140
	if (base % PAGE_SIZE)
1048 jermar 141
		return NULL;
142
 
1233 jermar 143
	if (!size)
144
		return NULL;
145
 
1048 jermar 146
	/* Writeable executable areas are not supported. */
147
	if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE))
148
		return NULL;
703 jermar 149
 
150
	ipl = interrupts_disable();
151
	spinlock_lock(&as->lock);
152
 
1048 jermar 153
	if (!check_area_conflicts(as, base, size, NULL)) {
154
		spinlock_unlock(&as->lock);
155
		interrupts_restore(ipl);
156
		return NULL;
157
	}
703 jermar 158
 
822 palkovsky 159
	a = (as_area_t *) malloc(sizeof(as_area_t), 0);
703 jermar 160
 
822 palkovsky 161
	spinlock_initialize(&a->lock, "as_area_lock");
162
 
1026 jermar 163
	a->flags = flags;
1048 jermar 164
	a->pages = SIZE2FRAMES(size);
822 palkovsky 165
	a->base = base;
166
 
1147 jermar 167
	btree_insert(&as->as_area_btree, base, (void *) a, NULL);
822 palkovsky 168
 
703 jermar 169
	spinlock_unlock(&as->lock);
170
	interrupts_restore(ipl);
704 jermar 171
 
703 jermar 172
	return a;
173
}
174
 
1235 jermar 175
/** Find address space area and change it.
176
 *
177
 * @param as Address space.
178
 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.
179
 * @param size New size of the virtual memory block starting at address. 
180
 * @param flags Flags influencing the remap operation. Currently unused.
181
 *
182
 * @return address on success, (__address) -1 otherwise.
183
 */ 
184
__address as_area_resize(as_t *as, __address address, size_t size, int flags)
185
{
186
	as_area_t *area = NULL;
187
	ipl_t ipl;
188
	size_t pages;
189
 
190
	ipl = interrupts_disable();
191
	spinlock_lock(&as->lock);
192
 
193
	/*
194
	 * Locate the area.
195
	 */
196
	area = find_area_and_lock(as, address);
197
	if (!area) {
198
		spinlock_unlock(&as->lock);
199
		interrupts_restore(ipl);
200
		return (__address) -1;
201
	}
202
 
203
	if (area->flags & AS_AREA_DEVICE) {
204
		/*
205
		 * Remapping of address space areas associated
206
		 * with memory mapped devices is not supported.
207
		 */
208
		spinlock_unlock(&area->lock);
209
		spinlock_unlock(&as->lock);
210
		interrupts_restore(ipl);
211
		return (__address) -1;
212
	}
213
 
214
	pages = SIZE2FRAMES((address - area->base) + size);
215
	if (!pages) {
216
		/*
217
		 * Zero size address space areas are not allowed.
218
		 */
219
		spinlock_unlock(&area->lock);
220
		spinlock_unlock(&as->lock);
221
		interrupts_restore(ipl);
222
		return (__address) -1;
223
	}
224
 
225
	if (pages < area->pages) {
226
		int i;
227
 
228
		/*
229
		 * Shrinking the area.
230
		 * No need to check for overlaps.
231
		 */
232
		for (i = pages; i < area->pages; i++) {
233
			pte_t *pte;
234
 
235
			/*
236
			 * Releasing physical memory.
237
			 * This depends on the fact that the memory was allocated using frame_alloc().
238
			 */
239
			page_table_lock(as, false);
240
			pte = page_mapping_find(as, area->base + i*PAGE_SIZE);
241
			if (pte && PTE_VALID(pte)) {
242
				__address frame;
243
 
244
				ASSERT(PTE_PRESENT(pte));
245
				frame = PTE_GET_FRAME(pte);
246
				page_mapping_remove(as, area->base + i*PAGE_SIZE);
247
				page_table_unlock(as, false);
248
 
249
				frame_free(ADDR2PFN(frame));
250
			} else {
251
				page_table_unlock(as, false);
252
			}
253
		}
254
		/*
255
		 * Invalidate TLB's.
256
		 */
257
		tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
258
		tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);
259
		tlb_shootdown_finalize();
260
	} else {
261
		/*
262
		 * Growing the area.
263
		 * Check for overlaps with other address space areas.
264
		 */
265
		if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {
266
			spinlock_unlock(&area->lock);
267
			spinlock_unlock(&as->lock);		
268
			interrupts_restore(ipl);
269
			return (__address) -1;
270
		}
271
	} 
272
 
273
	area->pages = pages;
274
 
275
	spinlock_unlock(&area->lock);
276
	spinlock_unlock(&as->lock);
277
	interrupts_restore(ipl);
278
 
279
	return address;
280
}
281
 
282
/** Send address space area to another task.
283
 *
284
 * Address space area is sent to the specified task.
285
 * If the destination task is willing to accept the
286
 * area, a new area is created according to the
287
 * source area. Moreover, any existing mapping
288
 * is copied as well, providing thus a mechanism
289
 * for sharing group of pages. The source address
290
 * space area and any associated mapping is preserved.
291
 *
292
 * @param id Task ID of the accepting task.
293
 * @param base Base address of the source address space area.
294
 * @param size Size of the source address space area.
295
 * @param flags Flags of the source address space area.
296
 *
297
 * @return 0 on success or ENOENT if there is no such task or
298
 *	   if there is no such address space area,
299
 *	   EPERM if there was a problem in accepting the area or
300
 *	   ENOMEM if there was a problem in allocating destination
301
 *	   address space area.
302
 */
303
int as_area_send(task_id_t id, __address base, size_t size, int flags)
304
{
305
	ipl_t ipl;
306
	task_t *t;
307
	count_t i;
308
	as_t *as;
309
	__address dst_base;
310
 
311
	ipl = interrupts_disable();
312
	spinlock_lock(&tasks_lock);
313
 
314
	t = task_find_by_id(id);
315
	if (!NULL) {
316
		spinlock_unlock(&tasks_lock);
317
		interrupts_restore(ipl);
318
		return ENOENT;
319
	}
320
 
321
	spinlock_lock(&t->lock);
322
	spinlock_unlock(&tasks_lock);
323
 
324
	as = t->as;
325
	dst_base = (__address) t->accept_arg.base;
326
 
327
	if (as == AS) {
328
		/*
329
		 * The two tasks share the entire address space.
330
		 * Return error since there is no point in continuing.
331
		 */
332
		spinlock_unlock(&t->lock);
333
		interrupts_restore(ipl);
334
		return EPERM;
335
	}
336
 
337
	if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) ||
338
	    (t->accept_arg.flags != flags)) {
339
		/*
340
		 * Discrepancy in either task ID, size or flags.
341
		 */
342
		spinlock_unlock(&t->lock);
343
		interrupts_restore(ipl);
344
		return EPERM;
345
	}
346
 
347
	/*
348
	 * Create copy of the address space area.
349
	 */
350
	if (!as_area_create(as, flags, size, dst_base)) {
351
		/*
352
		 * Destination address space area could not be created.
353
		 */
354
		spinlock_unlock(&t->lock);
355
		interrupts_restore(ipl);
356
		return ENOMEM;
357
	}
358
 
359
	/*
360
	 * NOTE: we have just introduced a race condition.
361
	 * The destination task can try to attempt the newly
362
	 * created area before its mapping is copied from
363
	 * the source address space area. In result, frames
364
	 * can get lost.
365
	 *
366
	 * Currently, this race is not solved, but one of the
367
	 * possible solutions would be to sleep in as_page_fault()
368
	 * when this situation is detected.
369
	 */
370
 
371
	memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0);
372
	spinlock_unlock(&t->lock);
373
 
374
	/*
375
	 * Avoid deadlock by first locking the address space with lower address.
376
	 */
377
	if (as < AS) {
378
		spinlock_lock(&as->lock);
379
		spinlock_lock(&AS->lock);
380
	} else {
381
		spinlock_lock(&AS->lock);
382
		spinlock_lock(&as->lock);
383
	}
384
 
385
	for (i = 0; i < SIZE2FRAMES(size); i++) {
386
		pte_t *pte;
387
		__address frame;
388
 
389
		page_table_lock(AS, false);
390
		pte = page_mapping_find(AS, base + i*PAGE_SIZE);
391
		if (pte && PTE_VALID(pte)) {
392
			ASSERT(PTE_PRESENT(pte));
393
			frame = PTE_GET_FRAME(pte);
1236 jermar 394
			if (!(flags & AS_AREA_DEVICE))
395
				frame_reference_add(ADDR2PFN(frame));
1235 jermar 396
			page_table_unlock(AS, false);
397
		} else {
398
			page_table_unlock(AS, false);
399
			continue;
400
		}
401
 
402
		page_table_lock(as, false);
403
		page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags));
404
		page_table_unlock(as, false);
405
	}
406
 
407
	spinlock_unlock(&AS->lock);
408
	spinlock_unlock(&as->lock);
409
	interrupts_restore(ipl);
410
 
411
	return 0;
412
}
413
 
754 jermar 414
/** Initialize mapping for one page of address space.
703 jermar 415
 *
754 jermar 416
 * This functions maps 'page' to 'frame' according
417
 * to attributes of the address space area to
418
 * wich 'page' belongs.
703 jermar 419
 *
840 jermar 420
 * @param as Target address space.
754 jermar 421
 * @param page Virtual page within the area.
422
 * @param frame Physical frame to which page will be mapped.
703 jermar 423
 */
754 jermar 424
void as_set_mapping(as_t *as, __address page, __address frame)
703 jermar 425
{
977 jermar 426
	as_area_t *area;
703 jermar 427
	ipl_t ipl;
428
 
429
	ipl = interrupts_disable();
1044 jermar 430
	page_table_lock(as, true);
703 jermar 431
 
977 jermar 432
	area = find_area_and_lock(as, page);
754 jermar 433
	if (!area) {
434
		panic("page not part of any as_area\n");
435
	}
436
 
756 jermar 437
	page_mapping_insert(as, page, frame, get_area_flags(area));
754 jermar 438
 
439
	spinlock_unlock(&area->lock);
1044 jermar 440
	page_table_unlock(as, true);
703 jermar 441
	interrupts_restore(ipl);
442
}
443
 
444
/** Handle page fault within the current address space.
445
 *
446
 * This is the high-level page fault handler.
447
 * Interrupts are assumed disabled.
448
 *
449
 * @param page Faulting page.
450
 *
704 jermar 451
 * @return 0 on page fault, 1 on success.
703 jermar 452
 */
453
int as_page_fault(__address page)
454
{
1044 jermar 455
	pte_t *pte;
977 jermar 456
	as_area_t *area;
703 jermar 457
	__address frame;
458
 
459
	ASSERT(AS);
1044 jermar 460
 
703 jermar 461
	spinlock_lock(&AS->lock);
977 jermar 462
	area = find_area_and_lock(AS, page);	
703 jermar 463
	if (!area) {
464
		/*
465
		 * No area contained mapping for 'page'.
466
		 * Signal page fault to low-level handler.
467
		 */
468
		spinlock_unlock(&AS->lock);
469
		return 0;
470
	}
471
 
1179 jermar 472
	ASSERT(!(area->flags & AS_AREA_DEVICE));
473
 
1044 jermar 474
	page_table_lock(AS, false);
475
 
703 jermar 476
	/*
1044 jermar 477
	 * To avoid race condition between two page faults
478
	 * on the same address, we need to make sure
479
	 * the mapping has not been already inserted.
480
	 */
481
	if ((pte = page_mapping_find(AS, page))) {
482
		if (PTE_PRESENT(pte)) {
483
			page_table_unlock(AS, false);
484
			spinlock_unlock(&area->lock);
485
			spinlock_unlock(&AS->lock);
486
			return 1;
487
		}
488
	}
489
 
490
	/*
754 jermar 491
	 * In general, there can be several reasons that
492
	 * can have caused this fault.
493
	 *
494
	 * - non-existent mapping: the area is a scratch
495
	 *   area (e.g. stack) and so far has not been
496
	 *   allocated a frame for the faulting page
497
	 *
498
	 * - non-present mapping: another possibility,
499
	 *   currently not implemented, would be frame
500
	 *   reuse; when this becomes a possibility,
501
	 *   do not forget to distinguish between
502
	 *   the different causes
703 jermar 503
	 */
814 palkovsky 504
	frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
754 jermar 505
	memsetb(PA2KA(frame), FRAME_SIZE, 0);
703 jermar 506
 
507
	/*
508
	 * Map 'page' to 'frame'.
509
	 * Note that TLB shootdown is not attempted as only new information is being
510
	 * inserted into page tables.
511
	 */
756 jermar 512
	page_mapping_insert(AS, page, frame, get_area_flags(area));
1044 jermar 513
	page_table_unlock(AS, false);
703 jermar 514
 
515
	spinlock_unlock(&area->lock);
516
	spinlock_unlock(&AS->lock);
517
	return 1;
518
}
519
 
823 jermar 520
/** Switch address spaces.
703 jermar 521
 *
823 jermar 522
 * @param old Old address space or NULL.
523
 * @param new New address space.
703 jermar 524
 */
823 jermar 525
void as_switch(as_t *old, as_t *new)
703 jermar 526
{
527
	ipl_t ipl;
823 jermar 528
	bool needs_asid = false;
703 jermar 529
 
530
	ipl = interrupts_disable();
823 jermar 531
	spinlock_lock(&as_lock);
703 jermar 532
 
533
	/*
823 jermar 534
	 * First, take care of the old address space.
535
	 */	
536
	if (old) {
537
		spinlock_lock(&old->lock);
538
		ASSERT(old->refcount);
539
		if((--old->refcount == 0) && (old != AS_KERNEL)) {
540
			/*
541
			 * The old address space is no longer active on
542
			 * any processor. It can be appended to the
543
			 * list of inactive address spaces with assigned
544
			 * ASID.
545
			 */
546
			 ASSERT(old->asid != ASID_INVALID);
547
			 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
548
		}
549
		spinlock_unlock(&old->lock);
550
	}
551
 
552
	/*
553
	 * Second, prepare the new address space.
554
	 */
555
	spinlock_lock(&new->lock);
556
	if ((new->refcount++ == 0) && (new != AS_KERNEL)) {
557
		if (new->asid != ASID_INVALID)
558
			list_remove(&new->inactive_as_with_asid_link);
559
		else
560
			needs_asid = true;	/* defer call to asid_get() until new->lock is released */
561
	}
562
	SET_PTL0_ADDRESS(new->page_table);
563
	spinlock_unlock(&new->lock);
564
 
565
	if (needs_asid) {
566
		/*
567
		 * Allocation of new ASID was deferred
568
		 * until now in order to avoid deadlock.
569
		 */
570
		asid_t asid;
571
 
572
		asid = asid_get();
573
		spinlock_lock(&new->lock);
574
		new->asid = asid;
575
		spinlock_unlock(&new->lock);
576
	}
577
	spinlock_unlock(&as_lock);
578
	interrupts_restore(ipl);
579
 
580
	/*
703 jermar 581
	 * Perform architecture-specific steps.
727 jermar 582
	 * (e.g. write ASID to hardware register etc.)
703 jermar 583
	 */
823 jermar 584
	as_install_arch(new);
703 jermar 585
 
823 jermar 586
	AS = new;
703 jermar 587
}
754 jermar 588
 
1235 jermar 589
/** Convert address space area flags to page flags.
754 jermar 590
 *
1235 jermar 591
 * @param aflags Flags of some address space area.
754 jermar 592
 *
1235 jermar 593
 * @return Flags to be passed to page_mapping_insert().
754 jermar 594
 */
1235 jermar 595
int area_flags_to_page_flags(int aflags)
754 jermar 596
{
597
	int flags;
598
 
1178 jermar 599
	flags = PAGE_USER | PAGE_PRESENT;
754 jermar 600
 
1235 jermar 601
	if (aflags & AS_AREA_READ)
1026 jermar 602
		flags |= PAGE_READ;
603
 
1235 jermar 604
	if (aflags & AS_AREA_WRITE)
1026 jermar 605
		flags |= PAGE_WRITE;
606
 
1235 jermar 607
	if (aflags & AS_AREA_EXEC)
1026 jermar 608
		flags |= PAGE_EXEC;
609
 
1235 jermar 610
	if (!(aflags & AS_AREA_DEVICE))
1178 jermar 611
		flags |= PAGE_CACHEABLE;
612
 
754 jermar 613
	return flags;
614
}
756 jermar 615
 
1235 jermar 616
/** Compute flags for virtual address translation subsytem.
617
 *
618
 * The address space area must be locked.
619
 * Interrupts must be disabled.
620
 *
621
 * @param a Address space area.
622
 *
623
 * @return Flags to be used in page_mapping_insert().
624
 */
625
int get_area_flags(as_area_t *a)
626
{
627
	return area_flags_to_page_flags(a->flags);
628
}
629
 
756 jermar 630
/** Create page table.
631
 *
632
 * Depending on architecture, create either address space
633
 * private or global page table.
634
 *
635
 * @param flags Flags saying whether the page table is for kernel address space.
636
 *
637
 * @return First entry of the page table.
638
 */
639
pte_t *page_table_create(int flags)
640
{
641
        ASSERT(as_operations);
642
        ASSERT(as_operations->page_table_create);
643
 
644
        return as_operations->page_table_create(flags);
645
}
977 jermar 646
 
1044 jermar 647
/** Lock page table.
648
 *
649
 * This function should be called before any page_mapping_insert(),
650
 * page_mapping_remove() and page_mapping_find().
651
 * 
652
 * Locking order is such that address space areas must be locked
653
 * prior to this call. Address space can be locked prior to this
654
 * call in which case the lock argument is false.
655
 *
656
 * @param as Address space.
657
 * @param as_locked If false, do not attempt to lock as->lock.
658
 */
659
void page_table_lock(as_t *as, bool lock)
660
{
661
	ASSERT(as_operations);
662
	ASSERT(as_operations->page_table_lock);
663
 
664
	as_operations->page_table_lock(as, lock);
665
}
666
 
667
/** Unlock page table.
668
 *
669
 * @param as Address space.
670
 * @param as_locked If false, do not attempt to unlock as->lock.
671
 */
672
void page_table_unlock(as_t *as, bool unlock)
673
{
674
	ASSERT(as_operations);
675
	ASSERT(as_operations->page_table_unlock);
676
 
677
	as_operations->page_table_unlock(as, unlock);
678
}
679
 
977 jermar 680
 
681
/** Find address space area and lock it.
682
 *
683
 * The address space must be locked and interrupts must be disabled.
684
 *
685
 * @param as Address space.
686
 * @param va Virtual address.
687
 *
688
 * @return Locked address space area containing va on success or NULL on failure.
689
 */
690
as_area_t *find_area_and_lock(as_t *as, __address va)
691
{
692
	as_area_t *a;
1147 jermar 693
	btree_node_t *leaf, *lnode;
694
	int i;
977 jermar 695
 
1147 jermar 696
	a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
697
	if (a) {
698
		/* va is the base address of an address space area */
977 jermar 699
		spinlock_lock(&a->lock);
1147 jermar 700
		return a;
701
	}
702
 
703
	/*
1150 jermar 704
	 * Search the leaf node and the righmost record of its left neighbour
1147 jermar 705
	 * to find out whether this is a miss or va belongs to an address
706
	 * space area found there.
707
	 */
708
 
709
	/* First, search the leaf node itself. */
710
	for (i = 0; i < leaf->keys; i++) {
711
		a = (as_area_t *) leaf->value[i];
712
		spinlock_lock(&a->lock);
713
		if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) {
714
			return a;
715
		}
716
		spinlock_unlock(&a->lock);
717
	}
977 jermar 718
 
1147 jermar 719
	/*
1150 jermar 720
	 * Second, locate the left neighbour and test its last record.
1148 jermar 721
	 * Because of its position in the B+tree, it must have base < va.
1147 jermar 722
	 */
1150 jermar 723
	if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 724
		a = (as_area_t *) lnode->value[lnode->keys - 1];
725
		spinlock_lock(&a->lock);
726
		if (va < a->base + a->pages * PAGE_SIZE) {
1048 jermar 727
			return a;
1147 jermar 728
		}
977 jermar 729
		spinlock_unlock(&a->lock);
730
	}
731
 
732
	return NULL;
733
}
1048 jermar 734
 
735
/** Check area conflicts with other areas.
736
 *
737
 * The address space must be locked and interrupts must be disabled.
738
 *
739
 * @param as Address space.
740
 * @param va Starting virtual address of the area being tested.
741
 * @param size Size of the area being tested.
742
 * @param avoid_area Do not touch this area. 
743
 *
744
 * @return True if there is no conflict, false otherwise.
745
 */
746
bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area)
747
{
748
	as_area_t *a;
1147 jermar 749
	btree_node_t *leaf, *node;
750
	int i;
1048 jermar 751
 
1070 jermar 752
	/*
753
	 * We don't want any area to have conflicts with NULL page.
754
	 */
755
	if (overlaps(va, size, NULL, PAGE_SIZE))
756
		return false;
757
 
1147 jermar 758
	/*
759
	 * The leaf node is found in O(log n), where n is proportional to
760
	 * the number of address space areas belonging to as.
761
	 * The check for conflicts is then attempted on the rightmost
1150 jermar 762
	 * record in the left neighbour, the leftmost record in the right
763
	 * neighbour and all records in the leaf node itself.
1147 jermar 764
	 */
1048 jermar 765
 
1147 jermar 766
	if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) {
767
		if (a != avoid_area)
768
			return false;
769
	}
770
 
771
	/* First, check the two border cases. */
1150 jermar 772
	if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 773
		a = (as_area_t *) node->value[node->keys - 1];
774
		spinlock_lock(&a->lock);
775
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
776
			spinlock_unlock(&a->lock);
777
			return false;
778
		}
779
		spinlock_unlock(&a->lock);
780
	}
1150 jermar 781
	if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) {
1147 jermar 782
		a = (as_area_t *) node->value[0];
783
		spinlock_lock(&a->lock);
784
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
785
			spinlock_unlock(&a->lock);
786
			return false;
787
		}
788
		spinlock_unlock(&a->lock);
789
	}
790
 
791
	/* Second, check the leaf node. */
792
	for (i = 0; i < leaf->keys; i++) {
793
		a = (as_area_t *) leaf->value[i];
794
 
1048 jermar 795
		if (a == avoid_area)
796
			continue;
1147 jermar 797
 
1048 jermar 798
		spinlock_lock(&a->lock);
1147 jermar 799
		if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) {
800
			spinlock_unlock(&a->lock);
801
			return false;
802
		}
1048 jermar 803
		spinlock_unlock(&a->lock);
804
	}
805
 
1070 jermar 806
	/*
807
	 * So far, the area does not conflict with other areas.
808
	 * Check if it doesn't conflict with kernel address space.
809
	 */	 
810
	if (!KERNEL_ADDRESS_SPACE_SHADOWED) {
811
		return !overlaps(va, size, 
812
			KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START);
813
	}
814
 
1048 jermar 815
	return true;
816
}
1235 jermar 817
 
818
/*
819
 * Address space related syscalls.
820
 */
821
 
822
/** Wrapper for as_area_create(). */
823
__native sys_as_area_create(__address address, size_t size, int flags)
824
{
825
	if (as_area_create(AS, flags, size, address))
826
		return (__native) address;
827
	else
828
		return (__native) -1;
829
}
830
 
831
/** Wrapper for as_area_resize. */
832
__native sys_as_area_resize(__address address, size_t size, int flags)
833
{
834
	return as_area_resize(AS, address, size, 0);
835
}
836
 
837
/** Prepare task for accepting address space area from another task.
838
 *
839
 * @param uspace_accept_arg Accept structure passed from userspace.
840
 *
841
 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references
842
 *	   TASK. Otherwise zero is returned.
843
 */
844
__native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg)
845
{
846
	as_area_acptsnd_arg_t arg;
847
 
848
	copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t));
849
 
850
	if (!arg.size)
851
		return (__native) EPERM;
852
 
853
	if (arg.task_id == TASK->taskid) {
854
		/*
855
		 * Accepting from itself is not allowed.
856
		 */
857
		return (__native) EPERM;
858
	}
859
 
860
	memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t));
861
 
862
        return 0;
863
}
864
 
865
/** Wrapper for as_area_send. */
866
__native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg)
867
{
868
	as_area_acptsnd_arg_t arg;
869
 
870
	copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t));
871
 
872
	if (!arg.size)
873
		return (__native) EPERM;
874
 
875
	if (arg.task_id == TASK->taskid) {
876
		/*
877
		 * Sending to itself is not allowed.
878
		 */
879
		return (__native) EPERM;
880
	}
881
 
882
	return (__native) as_area_send(arg.task_id, (__address) arg.base, arg.size, arg.flags);
883
}