Rev 1864 | Rev 1882 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed
#
# Copyright (C) 2005 Jakub Jermar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#include <arch/arch.h>
#include <arch/stack.h>
#include <arch/regdef.h>
#include <arch/mm/mmu.h>
.text
.register %g2, #scratch
.register %g3, #scratch
/*
* This is the assembly language version of our _memcpy() generated by gcc.
*/
.global memcpy
memcpy:
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
be,pn %xcc, 3f
add %o0, 7, %g1
mov 0, %g3
0:
brz,pn %o2, 2f
mov 0, %g2
1:
ldub [%g3 + %o1], %g1
add %g2, 1, %g2
cmp %o2, %g2
stb %g1, [%g3 + %o0]
bne,pt %xcc, 1b
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
bne,pt %xcc, 0b
mov 0, %g3
srlx %o2, 3, %g4
brz,pn %g4, 5f
mov 0, %g5
4:
sllx %g3, 3, %g2
add %g5, 1, %g3
ldx [%o1 + %g2], %g1
mov %g3, %g5
cmp %g4, %g3
bne,pt %xcc, 4b
stx %g1, [%o0 + %g2]
5:
and %o2, 7, %o2
brz,pn %o2, 2b
sllx %g4, 3, %g1
mov 0, %g2
add %g1, %o0, %o0
add %g1, %o1, %g4
mov 0, %g3
6:
ldub [%g2 + %g4], %g1
stb %g1, [%g2 + %o0]
add %g3, 1, %g2
cmp %o2, %g2
bne,pt %xcc, 6b
mov %g2, %g3
jmp %o7 + 8 ! exit point
mov %o1, %o0
/*
* Almost the same as memcpy() except the loads are from userspace.
*/
.global memcpy_from_uspace
memcpy_from_uspace:
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
be,pn %xcc, 3f
add %o0, 7, %g1
mov 0, %g3
0:
brz,pn %o2, 2f
mov 0, %g2
1:
lduba [%g3 + %o1] ASI_AIUS, %g1
add %g2, 1, %g2
cmp %o2, %g2
stb %g1, [%g3 + %o0]
bne,pt %xcc, 1b
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
bne,pt %xcc, 0b
mov 0, %g3
srlx %o2, 3, %g4
brz,pn %g4, 5f
mov 0, %g5
4:
sllx %g3, 3, %g2
add %g5, 1, %g3
ldxa [%o1 + %g2] ASI_AIUS, %g1
mov %g3, %g5
cmp %g4, %g3
bne,pt %xcc, 4b
stx %g1, [%o0 + %g2]
5:
and %o2, 7, %o2
brz,pn %o2, 2b
sllx %g4, 3, %g1
mov 0, %g2
add %g1, %o0, %o0
add %g1, %o1, %g4
mov 0, %g3
6:
lduba [%g2 + %g4] ASI_AIUS, %g1
stb %g1, [%g2 + %o0]
add %g3, 1, %g2
cmp %o2, %g2
bne,pt %xcc, 6b
mov %g2, %g3
jmp %o7 + 8 ! exit point
mov %o1, %o0
/*
* Almost the same as memcpy() except the stores are to userspace.
*/
.global memcpy_to_uspace
memcpy_to_uspace:
add %o1, 7, %g1
and %g1, -8, %g1
cmp %o1, %g1
be,pn %xcc, 3f
add %o0, 7, %g1
mov 0, %g3
0:
brz,pn %o2, 2f
mov 0, %g2
1:
ldub [%g3 + %o1], %g1
add %g2, 1, %g2
cmp %o2, %g2
stba %g1, [%g3 + %o0] ASI_AIUS
bne,pt %xcc, 1b
mov %g2, %g3
2:
jmp %o7 + 8 ! exit point
mov %o1, %o0
3:
and %g1, -8, %g1
cmp %o0, %g1
bne,pt %xcc, 0b
mov 0, %g3
srlx %o2, 3, %g4
brz,pn %g4, 5f
mov 0, %g5
4:
sllx %g3, 3, %g2
add %g5, 1, %g3
ldx [%o1 + %g2], %g1
mov %g3, %g5
cmp %g4, %g3
bne,pt %xcc, 4b
stxa %g1, [%o0 + %g2] ASI_AIUS
5:
and %o2, 7, %o2
brz,pn %o2, 2b
sllx %g4, 3, %g1
mov 0, %g2
add %g1, %o0, %o0
add %g1, %o1, %g4
mov 0, %g3
6:
ldub [%g2 + %g4], %g1
stba %g1, [%g2 + %o0] ASI_AIUS
add %g3, 1, %g2
cmp %o2, %g2
bne,pt %xcc, 6b
mov %g2, %g3
jmp %o7 + 8 ! exit point
mov %o1, %o0
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
jmp %o7 + 8 ! exit point
mov %g0, %o0 ! return 0 on failure
.global memsetb
memsetb:
b _memsetb
nop
.macro WRITE_ALTERNATE_REGISTER reg, bit
save %sp, -STACK_WINDOW_SAVE_AREA_SIZE, %sp
rdpr %pstate, %l0
wrpr %l0, \bit, %pstate
mov %i0, \reg
wrpr %l0, 0, %pstate
ret
restore
.endm
.macro READ_ALTERNATE_REGISTER reg, bit
save %sp, -STACK_WINDOW_SAVE_AREA_SIZE, %sp
rdpr %pstate, %l0
wrpr %l0, \bit, %pstate
mov \reg, %i0
wrpr %l0, 0, %pstate
ret
restore
.endm
.global write_to_ag_g6
write_to_ag_g6:
WRITE_ALTERNATE_REGISTER %g6, PSTATE_AG_BIT
.global write_to_ag_g7
write_to_ag_g7:
WRITE_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
.global write_to_ig_g6
write_to_ig_g6:
WRITE_ALTERNATE_REGISTER %g6, PSTATE_IG_BIT
.global read_from_ag_g7
read_from_ag_g7:
READ_ALTERNATE_REGISTER %g7, PSTATE_AG_BIT
/** Switch to userspace.
*
* %o0 Userspace entry address.
* %o1 Userspace stack pointer address.
* %o2 Userspace address of uarg structure.
*/
.global switch_to_userspace
switch_to_userspace:
save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp
flushw
wrpr %g0, 0, %cleanwin ! avoid information leak
mov %i3, %o0 ! uarg
clr %i2
clr %i3
clr %i4
clr %i5
clr %i6
wrpr %g0, 1, %tl ! enforce mapping via nucleus
rdpr %cwp, %g1
wrpr %g1, TSTATE_IE_BIT, %tstate
wrpr %i0, 0, %tnpc
/*
* Set primary context according to secondary context.
* Secondary context has been already installed by
* higher-level functions.
*/
wr %g0, ASI_DMMU, %asi
ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi
flush %i7
/*
* Spills and fills will be handled by the userspace handlers.
*/
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate
done ! jump to userspace