Subversion Repositories HelenOS

Rev

Rev 1787 | Rev 1852 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1787 Rev 1851
Line 41... Line 41...
41
#include <arch/trap/regwin.h>
41
#include <arch/trap/regwin.h>
42
#include <arch/trap/interrupt.h>
42
#include <arch/trap/interrupt.h>
43
#include <arch/trap/exception.h>
43
#include <arch/trap/exception.h>
44
#include <arch/trap/mmu.h>
44
#include <arch/trap/mmu.h>
45
#include <arch/stack.h>
45
#include <arch/stack.h>
-
 
46
#include <arch/regdef.h>
46
 
47
 
47
#define TABLE_SIZE	TRAP_TABLE_SIZE
48
#define TABLE_SIZE	TRAP_TABLE_SIZE
48
#define ENTRY_SIZE	TRAP_TABLE_ENTRY_SIZE
49
#define ENTRY_SIZE	TRAP_TABLE_ENTRY_SIZE
49
 
50
 
50
/*
51
/*
Line 273... Line 274...
273
.global fill_0_normal_high
274
.global fill_0_normal_high
274
fill_0_normal_high:
275
fill_0_normal_high:
275
	FILL_NORMAL_HANDLER
276
	FILL_NORMAL_HANDLER
276
 
277
 
277
 
278
 
278
/* Preemptible trap handler.
279
/* Preemptible trap handler for TL=1.
279
 *
280
 *
280
 * This trap handler makes arrangements to
281
 * This trap handler makes arrangements to make calling of scheduler() from
-
 
282
 * within a trap context possible. It is guaranteed to function only when traps
281
 * make calling scheduler() possible.
283
 * are not nested (i.e. for TL=1).
282
 *
284
 *
-
 
285
 * Every trap handler on TL=1 that makes a call to the scheduler needs to
-
 
286
 * be based on this function. The reason behind it is that the nested
-
 
287
 * trap levels and the automatic saving of the interrupted context by hardware
-
 
288
 * does not work well together with scheduling (i.e. a thread cannot be rescheduled
-
 
289
 * with TL>0). Therefore it is necessary to eliminate the effect of trap levels
-
 
290
 * by software and save the necessary state on the kernel stack.
-
 
291
 *
-
 
292
 * Note that for traps with TL>1, more state needs to be saved. This function
-
 
293
 * is therefore not going to work when TL>1.
-
 
294
 *
283
 * The caller is responsible for doing save
295
 * The caller is responsible for doing SAVE and allocating
284
 * and allocating PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE
296
 * PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack.
285
 * bytes on stack.
-
 
286
 *
297
 *
287
 * Input registers:
298
 * Input registers:
288
 * 	%l0	 	Address of function to call.
299
 * 	%l0	 	Address of function to call.
289
 * Output registers:
300
 * Output registers:
290
 *	 %l1 - %l7 	Copy of %g1 - %g7
301
 *	 %l1 - %l7 	Copy of %g1 - %g7
Line 297... Line 308...
297
	rdpr %tstate, %g1
308
	rdpr %tstate, %g1
298
	rdpr %tpc, %g2
309
	rdpr %tpc, %g2
299
	rdpr %tnpc, %g3
310
	rdpr %tnpc, %g3
300
	rdpr %pstate, %g4
311
	rdpr %pstate, %g4
301
 
312
 
-
 
313
	/*
-
 
314
	 * The following memory accesses will not fault
-
 
315
	 * because special provisions are made to have
-
 
316
	 * the kernel stack of THREAD locked in DTLB.
-
 
317
	 */
302
	stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE]
318
	stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE]
303
	stx %g2, [%fp + STACK_BIAS + SAVED_TPC]
319
	stx %g2, [%fp + STACK_BIAS + SAVED_TPC]
304
	stx %g3, [%fp + STACK_BIAS + SAVED_TNPC]
320
	stx %g3, [%fp + STACK_BIAS + SAVED_TNPC]
305
	stx %g4, [%fp + STACK_BIAS + SAVED_PSTATE]
321
	stx %g4, [%fp + STACK_BIAS + SAVED_PSTATE]
306
	
322
	
Line 311... Line 327...
311
	
327
	
312
	/*
328
	/*
313
	 * Alter PSTATE.
329
	 * Alter PSTATE.
314
	 * - switch to normal globals.
330
	 * - switch to normal globals.
315
	 */
331
	 */
316
	and %g4, ~1, %g4		! mask alternate globals
332
	and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4
317
	wrpr %g4, 0, %pstate
333
	wrpr %g4, 0, %pstate
318
	 
334
	 
319
	/*
335
	/*
320
	 * Save the normal globals.
336
	 * Save the normal globals.
321
	 */
337
	 */
322
	SAVE_GLOBALS
338
	SAVE_GLOBALS
323
	
339
	
324
	/*
340
	/*
325
	 * Call the higher-level handler.
341
	 * Call the higher-level handler.
326
	 */
342
	 */
-
 
343
	mov %fp, %o1				! calculate istate address
327
	call %l0
344
	call %l0
328
	nop
-
 
-
 
345
	add %o1, STACK_BIAS + SAVED_PSTATE, %o1	! calculate istate address
329
	
346
	
330
	/*
347
	/*
331
	 * Restore the normal global register set.
348
	 * Restore	 the normal global register set.
332
	 */
349
	 */
333
	RESTORE_GLOBALS
350
	RESTORE_GLOBALS
334
	
351
	
335
	/*
352
	/*
336
	 * Restore PSTATE from saved copy.
353
	 * Restore PSTATE from saved copy.
337
	 * Alternate globals become active.
354
	 * Alternate/Interrupt/MM globals become active.
338
	 */
355
	 */
339
	ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4
356
	ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4
340
	wrpr %l4, 0, %pstate
357
	wrpr %l4, 0, %pstate
341
	
358
	
342
	/*
359
	/*
Line 355... Line 372...
355
	 * Do restore to match the save instruction from the top-level handler.
372
	 * Do restore to match the save instruction from the top-level handler.
356
	 */
373
	 */
357
	restore
374
	restore
358
 
375
 
359
	/*
376
	/*
360
	 * On execution of retry instruction, CWP will be restored from TSTATE register.
377
	 * On execution of the RETRY instruction, CWP will be restored from the TSTATE
361
	 * However, because of scheduling, it is possible that CWP in saved TSTATE
378
	 * register. However, because of scheduling, it is possible that CWP in the saved
362
	 * is different from current CWP. The following chunk of code fixes CWP
379
	 * TSTATE is different from the current CWP. The following chunk of code fixes
363
	 * in the saved copy of TSTATE.
380
	 * CWP in the saved copy of TSTATE.
364
	 */
381
	 */
365
	rdpr %cwp, %g4		! read current CWP
382
	rdpr %cwp, %g4		! read current CWP
366
	and %g1, ~0x1f, %g1	! clear CWP field in saved TSTATE
383
	and %g1, ~0x1f, %g1	! clear CWP field in saved TSTATE
367
	or %g1, %g4, %g1	! write current CWP to TSTATE
384
	or %g1, %g4, %g1	! write current CWP to TSTATE
368
	
385