Rev 1787 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1787 | Rev 2071 | ||
---|---|---|---|
1 | # |
1 | # |
2 | # Copyright (C) 2005 Jakub Jermar |
2 | # Copyright (c) 2005 Jakub Jermar |
3 | # All rights reserved. |
3 | # All rights reserved. |
4 | # |
4 | # |
5 | # Redistribution and use in source and binary forms, with or without |
5 | # Redistribution and use in source and binary forms, with or without |
6 | # modification, are permitted provided that the following conditions |
6 | # modification, are permitted provided that the following conditions |
7 | # are met: |
7 | # are met: |
8 | # |
8 | # |
9 | # - Redistributions of source code must retain the above copyright |
9 | # - Redistributions of source code must retain the above copyright |
10 | # notice, this list of conditions and the following disclaimer. |
10 | # notice, this list of conditions and the following disclaimer. |
11 | # - Redistributions in binary form must reproduce the above copyright |
11 | # - Redistributions in binary form must reproduce the above copyright |
12 | # notice, this list of conditions and the following disclaimer in the |
12 | # notice, this list of conditions and the following disclaimer in the |
13 | # documentation and/or other materials provided with the distribution. |
13 | # documentation and/or other materials provided with the distribution. |
14 | # - The name of the author may not be used to endorse or promote products |
14 | # - The name of the author may not be used to endorse or promote products |
15 | # derived from this software without specific prior written permission. |
15 | # derived from this software without specific prior written permission. |
16 | # |
16 | # |
17 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | # |
27 | # |
28 | 28 | ||
29 | .text |
29 | .text |
30 | 30 | ||
31 | .global context_save_arch |
31 | .global context_save_arch |
32 | .global context_restore_arch |
32 | .global context_restore_arch |
33 | 33 | ||
34 | context_save_arch: |
34 | context_save_arch: |
35 | alloc loc0 = ar.pfs, 1, 8, 0, 0 |
35 | alloc loc0 = ar.pfs, 1, 8, 0, 0 |
36 | mov loc1 = ar.unat ;; |
36 | mov loc1 = ar.unat ;; |
37 | /* loc2 */ |
37 | /* loc2 */ |
38 | mov loc3 = ar.rsc |
38 | mov loc3 = ar.rsc |
39 | 39 | ||
40 | .auto |
40 | .auto |
41 | 41 | ||
42 | /* |
42 | /* |
43 | * Flush dirty registers to backing store. |
43 | * Flush dirty registers to backing store. |
44 | * After this ar.bsp and ar.bspstore are equal. |
44 | * After this ar.bsp and ar.bspstore are equal. |
45 | */ |
45 | */ |
46 | flushrs |
46 | flushrs |
47 | mov loc4 = ar.bsp |
47 | mov loc4 = ar.bsp |
48 | 48 | ||
49 | /* |
49 | /* |
50 | * Put RSE to enforced lazy mode. |
50 | * Put RSE to enforced lazy mode. |
51 | * So that ar.rnat can be read. |
51 | * So that ar.rnat can be read. |
52 | */ |
52 | */ |
53 | and loc5 = ~3, loc3 |
53 | and loc5 = ~3, loc3 |
54 | mov ar.rsc = loc5 |
54 | mov ar.rsc = loc5 |
55 | mov loc5 = ar.rnat |
55 | mov loc5 = ar.rnat |
56 | 56 | ||
57 | .explicit |
57 | .explicit |
58 | 58 | ||
59 | mov loc6 = ar.lc |
59 | mov loc6 = ar.lc |
60 | 60 | ||
61 | /* |
61 | /* |
62 | * Save application registers |
62 | * Save application registers |
63 | */ |
63 | */ |
64 | st8 [in0] = loc0, 8 ;; /* save ar.pfs */ |
64 | st8 [in0] = loc0, 8 ;; /* save ar.pfs */ |
65 | st8 [in0] = loc1, 8 ;; /* save ar.unat (caller) */ |
65 | st8 [in0] = loc1, 8 ;; /* save ar.unat (caller) */ |
66 | mov loc2 = in0 ;; |
66 | mov loc2 = in0 ;; |
67 | add in0 = 8, in0 ;; /* skip ar.unat (callee) */ |
67 | add in0 = 8, in0 ;; /* skip ar.unat (callee) */ |
68 | st8 [in0] = loc3, 8 ;; /* save ar.rsc */ |
68 | st8 [in0] = loc3, 8 ;; /* save ar.rsc */ |
69 | st8 [in0] = loc4, 8 ;; /* save ar.bsp */ |
69 | st8 [in0] = loc4, 8 ;; /* save ar.bsp */ |
70 | st8 [in0] = loc5, 8 ;; /* save ar.rnat */ |
70 | st8 [in0] = loc5, 8 ;; /* save ar.rnat */ |
71 | st8 [in0] = loc6, 8 ;; /* save ar.lc */ |
71 | st8 [in0] = loc6, 8 ;; /* save ar.lc */ |
72 | 72 | ||
73 | /* |
73 | /* |
74 | * Save general registers including NaT bits |
74 | * Save general registers including NaT bits |
75 | */ |
75 | */ |
76 | st8.spill [in0] = r1, 8 ;; |
76 | st8.spill [in0] = r1, 8 ;; |
77 | st8.spill [in0] = r4, 8 ;; |
77 | st8.spill [in0] = r4, 8 ;; |
78 | st8.spill [in0] = r5, 8 ;; |
78 | st8.spill [in0] = r5, 8 ;; |
79 | st8.spill [in0] = r6, 8 ;; |
79 | st8.spill [in0] = r6, 8 ;; |
80 | st8.spill [in0] = r7, 8 ;; |
80 | st8.spill [in0] = r7, 8 ;; |
81 | st8.spill [in0] = r12, 8 ;; /* save sp */ |
81 | st8.spill [in0] = r12, 8 ;; /* save sp */ |
82 | st8.spill [in0] = r13, 8 ;; |
82 | st8.spill [in0] = r13, 8 ;; |
83 | 83 | ||
84 | mov loc3 = ar.unat ;; |
84 | mov loc3 = ar.unat ;; |
85 | st8 [loc2] = loc3 /* save ar.unat (callee) */ |
85 | st8 [loc2] = loc3 /* save ar.unat (callee) */ |
86 | 86 | ||
87 | /* |
87 | /* |
88 | * Save branch registers |
88 | * Save branch registers |
89 | */ |
89 | */ |
90 | mov loc2 = b0 ;; |
90 | mov loc2 = b0 ;; |
91 | st8 [in0] = loc2, 8 /* save pc */ |
91 | st8 [in0] = loc2, 8 /* save pc */ |
92 | mov loc3 = b1 ;; |
92 | mov loc3 = b1 ;; |
93 | st8 [in0] = loc3, 8 |
93 | st8 [in0] = loc3, 8 |
94 | mov loc4 = b2 ;; |
94 | mov loc4 = b2 ;; |
95 | st8 [in0] = loc4, 8 |
95 | st8 [in0] = loc4, 8 |
96 | mov loc5 = b3 ;; |
96 | mov loc5 = b3 ;; |
97 | st8 [in0] = loc5, 8 |
97 | st8 [in0] = loc5, 8 |
98 | mov loc6 = b4 ;; |
98 | mov loc6 = b4 ;; |
99 | st8 [in0] = loc6, 8 |
99 | st8 [in0] = loc6, 8 |
100 | mov loc7 = b5 ;; |
100 | mov loc7 = b5 ;; |
101 | st8 [in0] = loc7, 8 |
101 | st8 [in0] = loc7, 8 |
102 | 102 | ||
103 | /* |
103 | /* |
104 | * Save predicate registers |
104 | * Save predicate registers |
105 | */ |
105 | */ |
106 | mov loc2 = pr ;; |
106 | mov loc2 = pr ;; |
107 | st8 [in0] = loc2, 16;; /* Next fpu registers should be spilled to 16B aligned address */ |
107 | st8 [in0] = loc2, 16;; /* Next fpu registers should be spilled to 16B aligned address */ |
108 | 108 | ||
109 | /* |
109 | /* |
110 | * Save floating-point registers. |
110 | * Save floating-point registers. |
111 | */ |
111 | */ |
112 | stf.spill [in0] = f2, 16 ;; |
112 | stf.spill [in0] = f2, 16 ;; |
113 | stf.spill [in0] = f3, 16 ;; |
113 | stf.spill [in0] = f3, 16 ;; |
114 | stf.spill [in0] = f4, 16 ;; |
114 | stf.spill [in0] = f4, 16 ;; |
115 | stf.spill [in0] = f5, 16 ;; |
115 | stf.spill [in0] = f5, 16 ;; |
116 | 116 | ||
117 | stf.spill [in0] = f16, 16 ;; |
117 | stf.spill [in0] = f16, 16 ;; |
118 | stf.spill [in0] = f17, 16 ;; |
118 | stf.spill [in0] = f17, 16 ;; |
119 | stf.spill [in0] = f18, 16 ;; |
119 | stf.spill [in0] = f18, 16 ;; |
120 | stf.spill [in0] = f19, 16 ;; |
120 | stf.spill [in0] = f19, 16 ;; |
121 | stf.spill [in0] = f20, 16 ;; |
121 | stf.spill [in0] = f20, 16 ;; |
122 | stf.spill [in0] = f21, 16 ;; |
122 | stf.spill [in0] = f21, 16 ;; |
123 | stf.spill [in0] = f22, 16 ;; |
123 | stf.spill [in0] = f22, 16 ;; |
124 | stf.spill [in0] = f23, 16 ;; |
124 | stf.spill [in0] = f23, 16 ;; |
125 | stf.spill [in0] = f24, 16 ;; |
125 | stf.spill [in0] = f24, 16 ;; |
126 | stf.spill [in0] = f25, 16 ;; |
126 | stf.spill [in0] = f25, 16 ;; |
127 | stf.spill [in0] = f26, 16 ;; |
127 | stf.spill [in0] = f26, 16 ;; |
128 | stf.spill [in0] = f27, 16 ;; |
128 | stf.spill [in0] = f27, 16 ;; |
129 | stf.spill [in0] = f28, 16 ;; |
129 | stf.spill [in0] = f28, 16 ;; |
130 | stf.spill [in0] = f29, 16 ;; |
130 | stf.spill [in0] = f29, 16 ;; |
131 | stf.spill [in0] = f30, 16 ;; |
131 | stf.spill [in0] = f30, 16 ;; |
132 | stf.spill [in0] = f31, 16 ;; |
132 | stf.spill [in0] = f31, 16 ;; |
133 | 133 | ||
134 | mov ar.unat = loc1 |
134 | mov ar.unat = loc1 |
135 | 135 | ||
136 | add r8 = r0, r0, 1 /* context_save returns 1 */ |
136 | add r8 = r0, r0, 1 /* context_save returns 1 */ |
137 | br.ret.sptk.many b0 |
137 | br.ret.sptk.many b0 |
138 | 138 | ||
139 | context_restore_arch: |
139 | context_restore_arch: |
140 | alloc loc0 = ar.pfs, 1, 9, 0, 0 ;; |
140 | alloc loc0 = ar.pfs, 1, 9, 0, 0 ;; |
141 | 141 | ||
142 | ld8 loc0 = [in0], 8 ;; /* load ar.pfs */ |
142 | ld8 loc0 = [in0], 8 ;; /* load ar.pfs */ |
143 | ld8 loc1 = [in0], 8 ;; /* load ar.unat (caller) */ |
143 | ld8 loc1 = [in0], 8 ;; /* load ar.unat (caller) */ |
144 | ld8 loc2 = [in0], 8 ;; /* load ar.unat (callee) */ |
144 | ld8 loc2 = [in0], 8 ;; /* load ar.unat (callee) */ |
145 | ld8 loc3 = [in0], 8 ;; /* load ar.rsc */ |
145 | ld8 loc3 = [in0], 8 ;; /* load ar.rsc */ |
146 | ld8 loc4 = [in0], 8 ;; /* load ar.bsp */ |
146 | ld8 loc4 = [in0], 8 ;; /* load ar.bsp */ |
147 | ld8 loc5 = [in0], 8 ;; /* load ar.rnat */ |
147 | ld8 loc5 = [in0], 8 ;; /* load ar.rnat */ |
148 | ld8 loc6 = [in0], 8 ;; /* load ar.lc */ |
148 | ld8 loc6 = [in0], 8 ;; /* load ar.lc */ |
149 | 149 | ||
150 | .auto |
150 | .auto |
151 | 151 | ||
152 | /* |
152 | /* |
153 | * Invalidate the ALAT |
153 | * Invalidate the ALAT |
154 | */ |
154 | */ |
155 | invala |
155 | invala |
156 | 156 | ||
157 | /* |
157 | /* |
158 | * Put RSE to enforced lazy mode. |
158 | * Put RSE to enforced lazy mode. |
159 | * So that ar.bspstore and ar.rnat can be written. |
159 | * So that ar.bspstore and ar.rnat can be written. |
160 | */ |
160 | */ |
161 | movl loc8 = ~3 |
161 | movl loc8 = ~3 |
162 | and loc8 = loc3, loc8 |
162 | and loc8 = loc3, loc8 |
163 | mov ar.rsc = loc8 |
163 | mov ar.rsc = loc8 |
164 | 164 | ||
165 | /* |
165 | /* |
166 | * Flush dirty registers to backing store. |
166 | * Flush dirty registers to backing store. |
167 | * We do this because we want the following move |
167 | * We do this because we want the following move |
168 | * to ar.bspstore to assign the same value to ar.bsp. |
168 | * to ar.bspstore to assign the same value to ar.bsp. |
169 | */ |
169 | */ |
170 | flushrs |
170 | flushrs |
171 | 171 | ||
172 | /* |
172 | /* |
173 | * Restore application registers |
173 | * Restore application registers |
174 | */ |
174 | */ |
175 | mov ar.bspstore = loc4 /* rse.bspload = ar.bsp = ar.bspstore = loc4 */ |
175 | mov ar.bspstore = loc4 /* rse.bspload = ar.bsp = ar.bspstore = loc4 */ |
176 | mov ar.rnat = loc5 |
176 | mov ar.rnat = loc5 |
177 | mov ar.pfs = loc0 |
177 | mov ar.pfs = loc0 |
178 | mov ar.rsc = loc3 |
178 | mov ar.rsc = loc3 |
179 | 179 | ||
180 | .explicit |
180 | .explicit |
181 | 181 | ||
182 | mov ar.unat = loc2 ;; |
182 | mov ar.unat = loc2 ;; |
183 | mov ar.lc = loc6 |
183 | mov ar.lc = loc6 |
184 | 184 | ||
185 | /* |
185 | /* |
186 | * Restore general registers including NaT bits |
186 | * Restore general registers including NaT bits |
187 | */ |
187 | */ |
188 | ld8.fill r1 = [in0], 8 ;; |
188 | ld8.fill r1 = [in0], 8 ;; |
189 | ld8.fill r4 = [in0], 8 ;; |
189 | ld8.fill r4 = [in0], 8 ;; |
190 | ld8.fill r5 = [in0], 8 ;; |
190 | ld8.fill r5 = [in0], 8 ;; |
191 | ld8.fill r6 = [in0], 8 ;; |
191 | ld8.fill r6 = [in0], 8 ;; |
192 | ld8.fill r7 = [in0], 8 ;; |
192 | ld8.fill r7 = [in0], 8 ;; |
193 | ld8.fill r12 = [in0], 8 ;; /* restore sp */ |
193 | ld8.fill r12 = [in0], 8 ;; /* restore sp */ |
194 | ld8.fill r13 = [in0], 8 ;; |
194 | ld8.fill r13 = [in0], 8 ;; |
195 | 195 | ||
196 | /* |
196 | /* |
197 | * Restore branch registers |
197 | * Restore branch registers |
198 | */ |
198 | */ |
199 | ld8 loc2 = [in0], 8 ;; /* restore pc */ |
199 | ld8 loc2 = [in0], 8 ;; /* restore pc */ |
200 | mov b0 = loc2 |
200 | mov b0 = loc2 |
201 | ld8 loc3 = [in0], 8 ;; |
201 | ld8 loc3 = [in0], 8 ;; |
202 | mov b1 = loc3 |
202 | mov b1 = loc3 |
203 | ld8 loc4 = [in0], 8 ;; |
203 | ld8 loc4 = [in0], 8 ;; |
204 | mov b2 = loc4 |
204 | mov b2 = loc4 |
205 | ld8 loc5 = [in0], 8 ;; |
205 | ld8 loc5 = [in0], 8 ;; |
206 | mov b3 = loc5 |
206 | mov b3 = loc5 |
207 | ld8 loc6 = [in0], 8 ;; |
207 | ld8 loc6 = [in0], 8 ;; |
208 | mov b4 = loc6 |
208 | mov b4 = loc6 |
209 | ld8 loc7 = [in0], 8 ;; |
209 | ld8 loc7 = [in0], 8 ;; |
210 | mov b5 = loc7 |
210 | mov b5 = loc7 |
211 | 211 | ||
212 | /* |
212 | /* |
213 | * Restore predicate registers |
213 | * Restore predicate registers |
214 | */ |
214 | */ |
215 | ld8 loc2 = [in0], 16 ;; |
215 | ld8 loc2 = [in0], 16 ;; |
216 | mov pr = loc2, ~0 |
216 | mov pr = loc2, ~0 |
217 | 217 | ||
218 | /* |
218 | /* |
219 | * Restore floating-point registers. |
219 | * Restore floating-point registers. |
220 | */ |
220 | */ |
221 | ldf.fill f2 = [in0], 16 ;; |
221 | ldf.fill f2 = [in0], 16 ;; |
222 | ldf.fill f3 = [in0], 16 ;; |
222 | ldf.fill f3 = [in0], 16 ;; |
223 | ldf.fill f4 = [in0], 16 ;; |
223 | ldf.fill f4 = [in0], 16 ;; |
224 | ldf.fill f5 = [in0], 16 ;; |
224 | ldf.fill f5 = [in0], 16 ;; |
225 | 225 | ||
226 | ldf.fill f16 = [in0], 16 ;; |
226 | ldf.fill f16 = [in0], 16 ;; |
227 | ldf.fill f17 = [in0], 16 ;; |
227 | ldf.fill f17 = [in0], 16 ;; |
228 | ldf.fill f18 = [in0], 16 ;; |
228 | ldf.fill f18 = [in0], 16 ;; |
229 | ldf.fill f19 = [in0], 16 ;; |
229 | ldf.fill f19 = [in0], 16 ;; |
230 | ldf.fill f20 = [in0], 16 ;; |
230 | ldf.fill f20 = [in0], 16 ;; |
231 | ldf.fill f21 = [in0], 16 ;; |
231 | ldf.fill f21 = [in0], 16 ;; |
232 | ldf.fill f22 = [in0], 16 ;; |
232 | ldf.fill f22 = [in0], 16 ;; |
233 | ldf.fill f23 = [in0], 16 ;; |
233 | ldf.fill f23 = [in0], 16 ;; |
234 | ldf.fill f24 = [in0], 16 ;; |
234 | ldf.fill f24 = [in0], 16 ;; |
235 | ldf.fill f25 = [in0], 16 ;; |
235 | ldf.fill f25 = [in0], 16 ;; |
236 | ldf.fill f26 = [in0], 16 ;; |
236 | ldf.fill f26 = [in0], 16 ;; |
237 | ldf.fill f27 = [in0], 16 ;; |
237 | ldf.fill f27 = [in0], 16 ;; |
238 | ldf.fill f28 = [in0], 16 ;; |
238 | ldf.fill f28 = [in0], 16 ;; |
239 | ldf.fill f29 = [in0], 16 ;; |
239 | ldf.fill f29 = [in0], 16 ;; |
240 | ldf.fill f30 = [in0], 16 ;; |
240 | ldf.fill f30 = [in0], 16 ;; |
241 | ldf.fill f31 = [in0], 16 ;; |
241 | ldf.fill f31 = [in0], 16 ;; |
242 | 242 | ||
243 | mov ar.unat = loc1 |
243 | mov ar.unat = loc1 |
244 | 244 | ||
245 | mov r8 = r0 /* context_restore returns 0 */ |
245 | mov r8 = r0 /* context_restore returns 0 */ |
246 | br.ret.sptk.many b0 |
246 | br.ret.sptk.many b0 |
247 | 247 |