Rev 1787 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
55 | jermar | 1 | # |
2071 | jermar | 2 | # Copyright (c) 2005 Jakub Jermar |
55 | jermar | 3 | # All rights reserved. |
4 | # |
||
5 | # Redistribution and use in source and binary forms, with or without |
||
6 | # modification, are permitted provided that the following conditions |
||
7 | # are met: |
||
8 | # |
||
9 | # - Redistributions of source code must retain the above copyright |
||
10 | # notice, this list of conditions and the following disclaimer. |
||
11 | # - Redistributions in binary form must reproduce the above copyright |
||
12 | # notice, this list of conditions and the following disclaimer in the |
||
13 | # documentation and/or other materials provided with the distribution. |
||
14 | # - The name of the author may not be used to endorse or promote products |
||
15 | # derived from this software without specific prior written permission. |
||
16 | # |
||
17 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | # |
||
28 | |||
29 | .text |
||
30 | |||
414 | jermar | 31 | .global context_save_arch |
32 | .global context_restore_arch |
||
55 | jermar | 33 | |
414 | jermar | 34 | context_save_arch: |
322 | jermar | 35 | alloc loc0 = ar.pfs, 1, 8, 0, 0 |
83 | jermar | 36 | mov loc1 = ar.unat ;; |
94 | jermar | 37 | /* loc2 */ |
38 | mov loc3 = ar.rsc |
||
100 | jermar | 39 | |
40 | .auto |
||
41 | |||
42 | /* |
||
43 | * Flush dirty registers to backing store. |
||
44 | * After this ar.bsp and ar.bspstore are equal. |
||
45 | */ |
||
46 | flushrs |
||
47 | mov loc4 = ar.bsp |
||
48 | |||
49 | /* |
||
50 | * Put RSE to enforced lazy mode. |
||
51 | * So that ar.rnat can be read. |
||
52 | */ |
||
435 | jermar | 53 | and loc5 = ~3, loc3 |
100 | jermar | 54 | mov ar.rsc = loc5 |
94 | jermar | 55 | mov loc5 = ar.rnat |
100 | jermar | 56 | |
57 | .explicit |
||
58 | |||
94 | jermar | 59 | mov loc6 = ar.lc |
59 | jermar | 60 | |
94 | jermar | 61 | /* |
62 | * Save application registers |
||
63 | */ |
||
83 | jermar | 64 | st8 [in0] = loc0, 8 ;; /* save ar.pfs */ |
65 | st8 [in0] = loc1, 8 ;; /* save ar.unat (caller) */ |
||
66 | mov loc2 = in0 ;; |
||
67 | add in0 = 8, in0 ;; /* skip ar.unat (callee) */ |
||
94 | jermar | 68 | st8 [in0] = loc3, 8 ;; /* save ar.rsc */ |
69 | st8 [in0] = loc4, 8 ;; /* save ar.bsp */ |
||
70 | st8 [in0] = loc5, 8 ;; /* save ar.rnat */ |
||
71 | st8 [in0] = loc6, 8 ;; /* save ar.lc */ |
||
83 | jermar | 72 | |
59 | jermar | 73 | /* |
83 | jermar | 74 | * Save general registers including NaT bits |
82 | jermar | 75 | */ |
83 | jermar | 76 | st8.spill [in0] = r1, 8 ;; |
77 | st8.spill [in0] = r4, 8 ;; |
||
78 | st8.spill [in0] = r5, 8 ;; |
||
79 | st8.spill [in0] = r6, 8 ;; |
||
80 | st8.spill [in0] = r7, 8 ;; |
||
81 | st8.spill [in0] = r12, 8 ;; /* save sp */ |
||
82 | st8.spill [in0] = r13, 8 ;; |
||
59 | jermar | 83 | |
83 | jermar | 84 | mov loc3 = ar.unat ;; |
85 | st8 [loc2] = loc3 /* save ar.unat (callee) */ |
||
86 | |||
82 | jermar | 87 | /* |
88 | * Save branch registers |
||
89 | */ |
||
83 | jermar | 90 | mov loc2 = b0 ;; |
91 | st8 [in0] = loc2, 8 /* save pc */ |
||
92 | mov loc3 = b1 ;; |
||
82 | jermar | 93 | st8 [in0] = loc3, 8 |
83 | jermar | 94 | mov loc4 = b2 ;; |
82 | jermar | 95 | st8 [in0] = loc4, 8 |
83 | jermar | 96 | mov loc5 = b3 ;; |
82 | jermar | 97 | st8 [in0] = loc5, 8 |
83 | jermar | 98 | mov loc6 = b4 ;; |
82 | jermar | 99 | st8 [in0] = loc6, 8 |
83 | jermar | 100 | mov loc7 = b5 ;; |
82 | jermar | 101 | st8 [in0] = loc7, 8 |
102 | |||
103 | /* |
||
104 | * Save predicate registers |
||
105 | */ |
||
83 | jermar | 106 | mov loc2 = pr ;; |
1124 | jermar | 107 | st8 [in0] = loc2, 16;; /* Next fpu registers should be spilled to 16B aligned address */ |
1053 | vana | 108 | |
1124 | jermar | 109 | /* |
110 | * Save floating-point registers. |
||
111 | */ |
||
112 | stf.spill [in0] = f2, 16 ;; |
||
113 | stf.spill [in0] = f3, 16 ;; |
||
114 | stf.spill [in0] = f4, 16 ;; |
||
115 | stf.spill [in0] = f5, 16 ;; |
||
1053 | vana | 116 | |
1124 | jermar | 117 | stf.spill [in0] = f16, 16 ;; |
118 | stf.spill [in0] = f17, 16 ;; |
||
119 | stf.spill [in0] = f18, 16 ;; |
||
120 | stf.spill [in0] = f19, 16 ;; |
||
121 | stf.spill [in0] = f20, 16 ;; |
||
122 | stf.spill [in0] = f21, 16 ;; |
||
123 | stf.spill [in0] = f22, 16 ;; |
||
124 | stf.spill [in0] = f23, 16 ;; |
||
125 | stf.spill [in0] = f24, 16 ;; |
||
126 | stf.spill [in0] = f25, 16 ;; |
||
127 | stf.spill [in0] = f26, 16 ;; |
||
128 | stf.spill [in0] = f27, 16 ;; |
||
129 | stf.spill [in0] = f28, 16 ;; |
||
130 | stf.spill [in0] = f29, 16 ;; |
||
131 | stf.spill [in0] = f30, 16 ;; |
||
132 | stf.spill [in0] = f31, 16 ;; |
||
1053 | vana | 133 | |
83 | jermar | 134 | mov ar.unat = loc1 |
59 | jermar | 135 | |
136 | add r8 = r0, r0, 1 /* context_save returns 1 */ |
||
60 | jermar | 137 | br.ret.sptk.many b0 |
55 | jermar | 138 | |
414 | jermar | 139 | context_restore_arch: |
416 | jermar | 140 | alloc loc0 = ar.pfs, 1, 9, 0, 0 ;; |
59 | jermar | 141 | |
94 | jermar | 142 | ld8 loc0 = [in0], 8 ;; /* load ar.pfs */ |
143 | ld8 loc1 = [in0], 8 ;; /* load ar.unat (caller) */ |
||
144 | ld8 loc2 = [in0], 8 ;; /* load ar.unat (callee) */ |
||
145 | ld8 loc3 = [in0], 8 ;; /* load ar.rsc */ |
||
146 | ld8 loc4 = [in0], 8 ;; /* load ar.bsp */ |
||
147 | ld8 loc5 = [in0], 8 ;; /* load ar.rnat */ |
||
148 | ld8 loc6 = [in0], 8 ;; /* load ar.lc */ |
||
149 | |||
100 | jermar | 150 | .auto |
151 | |||
59 | jermar | 152 | /* |
100 | jermar | 153 | * Invalidate the ALAT |
154 | */ |
||
155 | invala |
||
156 | |||
157 | /* |
||
416 | jermar | 158 | * Put RSE to enforced lazy mode. |
159 | * So that ar.bspstore and ar.rnat can be written. |
||
160 | */ |
||
161 | movl loc8 = ~3 |
||
162 | and loc8 = loc3, loc8 |
||
163 | mov ar.rsc = loc8 |
||
164 | |||
165 | /* |
||
166 | * Flush dirty registers to backing store. |
||
167 | * We do this because we want the following move |
||
168 | * to ar.bspstore to assign the same value to ar.bsp. |
||
169 | */ |
||
170 | flushrs |
||
171 | |||
172 | /* |
||
94 | jermar | 173 | * Restore application registers |
59 | jermar | 174 | */ |
416 | jermar | 175 | mov ar.bspstore = loc4 /* rse.bspload = ar.bsp = ar.bspstore = loc4 */ |
100 | jermar | 176 | mov ar.rnat = loc5 |
177 | mov ar.pfs = loc0 |
||
178 | mov ar.rsc = loc3 |
||
179 | |||
180 | .explicit |
||
181 | |||
83 | jermar | 182 | mov ar.unat = loc2 ;; |
94 | jermar | 183 | mov ar.lc = loc6 |
83 | jermar | 184 | |
82 | jermar | 185 | /* |
83 | jermar | 186 | * Restore general registers including NaT bits |
82 | jermar | 187 | */ |
83 | jermar | 188 | ld8.fill r1 = [in0], 8 ;; |
189 | ld8.fill r4 = [in0], 8 ;; |
||
190 | ld8.fill r5 = [in0], 8 ;; |
||
191 | ld8.fill r6 = [in0], 8 ;; |
||
192 | ld8.fill r7 = [in0], 8 ;; |
||
193 | ld8.fill r12 = [in0], 8 ;; /* restore sp */ |
||
194 | ld8.fill r13 = [in0], 8 ;; |
||
59 | jermar | 195 | |
82 | jermar | 196 | /* |
197 | * Restore branch registers |
||
198 | */ |
||
83 | jermar | 199 | ld8 loc2 = [in0], 8 ;; /* restore pc */ |
200 | mov b0 = loc2 |
||
82 | jermar | 201 | ld8 loc3 = [in0], 8 ;; |
83 | jermar | 202 | mov b1 = loc3 |
82 | jermar | 203 | ld8 loc4 = [in0], 8 ;; |
83 | jermar | 204 | mov b2 = loc4 |
82 | jermar | 205 | ld8 loc5 = [in0], 8 ;; |
83 | jermar | 206 | mov b3 = loc5 |
82 | jermar | 207 | ld8 loc6 = [in0], 8 ;; |
83 | jermar | 208 | mov b4 = loc6 |
82 | jermar | 209 | ld8 loc7 = [in0], 8 ;; |
83 | jermar | 210 | mov b5 = loc7 |
82 | jermar | 211 | |
83 | jermar | 212 | /* |
213 | * Restore predicate registers |
||
214 | */ |
||
1053 | vana | 215 | ld8 loc2 = [in0], 16 ;; |
83 | jermar | 216 | mov pr = loc2, ~0 |
59 | jermar | 217 | |
1124 | jermar | 218 | /* |
219 | * Restore floating-point registers. |
||
220 | */ |
||
221 | ldf.fill f2 = [in0], 16 ;; |
||
222 | ldf.fill f3 = [in0], 16 ;; |
||
223 | ldf.fill f4 = [in0], 16 ;; |
||
224 | ldf.fill f5 = [in0], 16 ;; |
||
1053 | vana | 225 | |
1124 | jermar | 226 | ldf.fill f16 = [in0], 16 ;; |
227 | ldf.fill f17 = [in0], 16 ;; |
||
228 | ldf.fill f18 = [in0], 16 ;; |
||
229 | ldf.fill f19 = [in0], 16 ;; |
||
230 | ldf.fill f20 = [in0], 16 ;; |
||
231 | ldf.fill f21 = [in0], 16 ;; |
||
232 | ldf.fill f22 = [in0], 16 ;; |
||
233 | ldf.fill f23 = [in0], 16 ;; |
||
234 | ldf.fill f24 = [in0], 16 ;; |
||
235 | ldf.fill f25 = [in0], 16 ;; |
||
236 | ldf.fill f26 = [in0], 16 ;; |
||
237 | ldf.fill f27 = [in0], 16 ;; |
||
238 | ldf.fill f28 = [in0], 16 ;; |
||
239 | ldf.fill f29 = [in0], 16 ;; |
||
240 | ldf.fill f30 = [in0], 16 ;; |
||
241 | ldf.fill f31 = [in0], 16 ;; |
||
1053 | vana | 242 | |
83 | jermar | 243 | mov ar.unat = loc1 |
59 | jermar | 244 | |
245 | mov r8 = r0 /* context_restore returns 0 */ |
||
60 | jermar | 246 | br.ret.sptk.many b0 |