Rev 1031 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1031 | Rev 1657 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Josef Cejka |
2 | * Copyright (C) 2005 Josef Cejka |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /** @addtogroup softfloat |
|
- | 30 | * @{ |
|
- | 31 | */ |
|
- | 32 | /** @file |
|
- | 33 | */ |
|
- | 34 | ||
29 | #include<sftypes.h> |
35 | #include<sftypes.h> |
30 | #include<mul.h> |
36 | #include<mul.h> |
31 | #include<comparison.h> |
37 | #include<comparison.h> |
32 | #include<common.h> |
38 | #include<common.h> |
33 | 39 | ||
34 | /** Multiply two 32 bit float numbers |
40 | /** Multiply two 32 bit float numbers |
35 | * |
41 | * |
36 | */ |
42 | */ |
37 | float32 mulFloat32(float32 a, float32 b) |
43 | float32 mulFloat32(float32 a, float32 b) |
38 | { |
44 | { |
39 | float32 result; |
45 | float32 result; |
40 | uint64_t frac1, frac2; |
46 | uint64_t frac1, frac2; |
41 | int32_t exp; |
47 | int32_t exp; |
42 | 48 | ||
43 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
49 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
44 | 50 | ||
45 | if (isFloat32NaN(a) || isFloat32NaN(b) ) { |
51 | if (isFloat32NaN(a) || isFloat32NaN(b) ) { |
46 | /* TODO: fix SigNaNs */ |
52 | /* TODO: fix SigNaNs */ |
47 | if (isFloat32SigNaN(a)) { |
53 | if (isFloat32SigNaN(a)) { |
48 | result.parts.fraction = a.parts.fraction; |
54 | result.parts.fraction = a.parts.fraction; |
49 | result.parts.exp = a.parts.exp; |
55 | result.parts.exp = a.parts.exp; |
50 | return result; |
56 | return result; |
51 | }; |
57 | }; |
52 | if (isFloat32SigNaN(b)) { /* TODO: fix SigNaN */ |
58 | if (isFloat32SigNaN(b)) { /* TODO: fix SigNaN */ |
53 | result.parts.fraction = b.parts.fraction; |
59 | result.parts.fraction = b.parts.fraction; |
54 | result.parts.exp = b.parts.exp; |
60 | result.parts.exp = b.parts.exp; |
55 | return result; |
61 | return result; |
56 | }; |
62 | }; |
57 | /* set NaN as result */ |
63 | /* set NaN as result */ |
58 | result.binary = FLOAT32_NAN; |
64 | result.binary = FLOAT32_NAN; |
59 | return result; |
65 | return result; |
60 | }; |
66 | }; |
61 | 67 | ||
62 | if (isFloat32Infinity(a)) { |
68 | if (isFloat32Infinity(a)) { |
63 | if (isFloat32Zero(b)) { |
69 | if (isFloat32Zero(b)) { |
64 | /* FIXME: zero * infinity */ |
70 | /* FIXME: zero * infinity */ |
65 | result.binary = FLOAT32_NAN; |
71 | result.binary = FLOAT32_NAN; |
66 | return result; |
72 | return result; |
67 | } |
73 | } |
68 | result.parts.fraction = a.parts.fraction; |
74 | result.parts.fraction = a.parts.fraction; |
69 | result.parts.exp = a.parts.exp; |
75 | result.parts.exp = a.parts.exp; |
70 | return result; |
76 | return result; |
71 | } |
77 | } |
72 | 78 | ||
73 | if (isFloat32Infinity(b)) { |
79 | if (isFloat32Infinity(b)) { |
74 | if (isFloat32Zero(a)) { |
80 | if (isFloat32Zero(a)) { |
75 | /* FIXME: zero * infinity */ |
81 | /* FIXME: zero * infinity */ |
76 | result.binary = FLOAT32_NAN; |
82 | result.binary = FLOAT32_NAN; |
77 | return result; |
83 | return result; |
78 | } |
84 | } |
79 | result.parts.fraction = b.parts.fraction; |
85 | result.parts.fraction = b.parts.fraction; |
80 | result.parts.exp = b.parts.exp; |
86 | result.parts.exp = b.parts.exp; |
81 | return result; |
87 | return result; |
82 | } |
88 | } |
83 | 89 | ||
84 | /* exp is signed so we can easy detect underflow */ |
90 | /* exp is signed so we can easy detect underflow */ |
85 | exp = a.parts.exp + b.parts.exp; |
91 | exp = a.parts.exp + b.parts.exp; |
86 | exp -= FLOAT32_BIAS; |
92 | exp -= FLOAT32_BIAS; |
87 | 93 | ||
88 | if (exp >= FLOAT32_MAX_EXPONENT) { |
94 | if (exp >= FLOAT32_MAX_EXPONENT) { |
89 | /* FIXME: overflow */ |
95 | /* FIXME: overflow */ |
90 | /* set infinity as result */ |
96 | /* set infinity as result */ |
91 | result.binary = FLOAT32_INF; |
97 | result.binary = FLOAT32_INF; |
92 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
98 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
93 | return result; |
99 | return result; |
94 | }; |
100 | }; |
95 | 101 | ||
96 | if (exp < 0) { |
102 | if (exp < 0) { |
97 | /* FIXME: underflow */ |
103 | /* FIXME: underflow */ |
98 | /* return signed zero */ |
104 | /* return signed zero */ |
99 | result.parts.fraction = 0x0; |
105 | result.parts.fraction = 0x0; |
100 | result.parts.exp = 0x0; |
106 | result.parts.exp = 0x0; |
101 | return result; |
107 | return result; |
102 | }; |
108 | }; |
103 | 109 | ||
104 | frac1 = a.parts.fraction; |
110 | frac1 = a.parts.fraction; |
105 | if (a.parts.exp > 0) { |
111 | if (a.parts.exp > 0) { |
106 | frac1 |= FLOAT32_HIDDEN_BIT_MASK; |
112 | frac1 |= FLOAT32_HIDDEN_BIT_MASK; |
107 | } else { |
113 | } else { |
108 | ++exp; |
114 | ++exp; |
109 | }; |
115 | }; |
110 | 116 | ||
111 | frac2 = b.parts.fraction; |
117 | frac2 = b.parts.fraction; |
112 | 118 | ||
113 | if (b.parts.exp > 0) { |
119 | if (b.parts.exp > 0) { |
114 | frac2 |= FLOAT32_HIDDEN_BIT_MASK; |
120 | frac2 |= FLOAT32_HIDDEN_BIT_MASK; |
115 | } else { |
121 | } else { |
116 | ++exp; |
122 | ++exp; |
117 | }; |
123 | }; |
118 | 124 | ||
119 | frac1 <<= 1; /* one bit space for rounding */ |
125 | frac1 <<= 1; /* one bit space for rounding */ |
120 | 126 | ||
121 | frac1 = frac1 * frac2; |
127 | frac1 = frac1 * frac2; |
122 | /* round and return */ |
128 | /* round and return */ |
123 | 129 | ||
124 | while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= ( 1 << (FLOAT32_FRACTION_SIZE + 2)))) { |
130 | while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= ( 1 << (FLOAT32_FRACTION_SIZE + 2)))) { |
125 | /* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left)*/ |
131 | /* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left)*/ |
126 | ++exp; |
132 | ++exp; |
127 | frac1 >>= 1; |
133 | frac1 >>= 1; |
128 | }; |
134 | }; |
129 | 135 | ||
130 | /* rounding */ |
136 | /* rounding */ |
131 | /* ++frac1; FIXME: not works - without it is ok */ |
137 | /* ++frac1; FIXME: not works - without it is ok */ |
132 | frac1 >>= 1; /* shift off rounding space */ |
138 | frac1 >>= 1; /* shift off rounding space */ |
133 | 139 | ||
134 | if ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= (1 << (FLOAT32_FRACTION_SIZE + 1)))) { |
140 | if ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= (1 << (FLOAT32_FRACTION_SIZE + 1)))) { |
135 | ++exp; |
141 | ++exp; |
136 | frac1 >>= 1; |
142 | frac1 >>= 1; |
137 | }; |
143 | }; |
138 | 144 | ||
139 | if (exp >= FLOAT32_MAX_EXPONENT ) { |
145 | if (exp >= FLOAT32_MAX_EXPONENT ) { |
140 | /* TODO: fix overflow */ |
146 | /* TODO: fix overflow */ |
141 | /* return infinity*/ |
147 | /* return infinity*/ |
142 | result.parts.exp = FLOAT32_MAX_EXPONENT; |
148 | result.parts.exp = FLOAT32_MAX_EXPONENT; |
143 | result.parts.fraction = 0x0; |
149 | result.parts.fraction = 0x0; |
144 | return result; |
150 | return result; |
145 | } |
151 | } |
146 | 152 | ||
147 | exp -= FLOAT32_FRACTION_SIZE; |
153 | exp -= FLOAT32_FRACTION_SIZE; |
148 | 154 | ||
149 | if (exp <= FLOAT32_FRACTION_SIZE) { |
155 | if (exp <= FLOAT32_FRACTION_SIZE) { |
150 | /* denormalized number */ |
156 | /* denormalized number */ |
151 | frac1 >>= 1; /* denormalize */ |
157 | frac1 >>= 1; /* denormalize */ |
152 | while ((frac1 > 0) && (exp < 0)) { |
158 | while ((frac1 > 0) && (exp < 0)) { |
153 | frac1 >>= 1; |
159 | frac1 >>= 1; |
154 | ++exp; |
160 | ++exp; |
155 | }; |
161 | }; |
156 | if (frac1 == 0) { |
162 | if (frac1 == 0) { |
157 | /* FIXME : underflow */ |
163 | /* FIXME : underflow */ |
158 | result.parts.exp = 0; |
164 | result.parts.exp = 0; |
159 | result.parts.fraction = 0; |
165 | result.parts.fraction = 0; |
160 | return result; |
166 | return result; |
161 | }; |
167 | }; |
162 | }; |
168 | }; |
163 | result.parts.exp = exp; |
169 | result.parts.exp = exp; |
164 | result.parts.fraction = frac1 & ( (1 << FLOAT32_FRACTION_SIZE) - 1); |
170 | result.parts.fraction = frac1 & ( (1 << FLOAT32_FRACTION_SIZE) - 1); |
165 | 171 | ||
166 | return result; |
172 | return result; |
167 | 173 | ||
168 | } |
174 | } |
169 | 175 | ||
170 | /** Multiply two 64 bit float numbers |
176 | /** Multiply two 64 bit float numbers |
171 | * |
177 | * |
172 | */ |
178 | */ |
173 | float64 mulFloat64(float64 a, float64 b) |
179 | float64 mulFloat64(float64 a, float64 b) |
174 | { |
180 | { |
175 | float64 result; |
181 | float64 result; |
176 | uint64_t frac1, frac2; |
182 | uint64_t frac1, frac2; |
177 | int32_t exp; |
183 | int32_t exp; |
178 | 184 | ||
179 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
185 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
180 | 186 | ||
181 | if (isFloat64NaN(a) || isFloat64NaN(b) ) { |
187 | if (isFloat64NaN(a) || isFloat64NaN(b) ) { |
182 | /* TODO: fix SigNaNs */ |
188 | /* TODO: fix SigNaNs */ |
183 | if (isFloat64SigNaN(a)) { |
189 | if (isFloat64SigNaN(a)) { |
184 | result.parts.fraction = a.parts.fraction; |
190 | result.parts.fraction = a.parts.fraction; |
185 | result.parts.exp = a.parts.exp; |
191 | result.parts.exp = a.parts.exp; |
186 | return result; |
192 | return result; |
187 | }; |
193 | }; |
188 | if (isFloat64SigNaN(b)) { /* TODO: fix SigNaN */ |
194 | if (isFloat64SigNaN(b)) { /* TODO: fix SigNaN */ |
189 | result.parts.fraction = b.parts.fraction; |
195 | result.parts.fraction = b.parts.fraction; |
190 | result.parts.exp = b.parts.exp; |
196 | result.parts.exp = b.parts.exp; |
191 | return result; |
197 | return result; |
192 | }; |
198 | }; |
193 | /* set NaN as result */ |
199 | /* set NaN as result */ |
194 | result.binary = FLOAT64_NAN; |
200 | result.binary = FLOAT64_NAN; |
195 | return result; |
201 | return result; |
196 | }; |
202 | }; |
197 | 203 | ||
198 | if (isFloat64Infinity(a)) { |
204 | if (isFloat64Infinity(a)) { |
199 | if (isFloat64Zero(b)) { |
205 | if (isFloat64Zero(b)) { |
200 | /* FIXME: zero * infinity */ |
206 | /* FIXME: zero * infinity */ |
201 | result.binary = FLOAT64_NAN; |
207 | result.binary = FLOAT64_NAN; |
202 | return result; |
208 | return result; |
203 | } |
209 | } |
204 | result.parts.fraction = a.parts.fraction; |
210 | result.parts.fraction = a.parts.fraction; |
205 | result.parts.exp = a.parts.exp; |
211 | result.parts.exp = a.parts.exp; |
206 | return result; |
212 | return result; |
207 | } |
213 | } |
208 | 214 | ||
209 | if (isFloat64Infinity(b)) { |
215 | if (isFloat64Infinity(b)) { |
210 | if (isFloat64Zero(a)) { |
216 | if (isFloat64Zero(a)) { |
211 | /* FIXME: zero * infinity */ |
217 | /* FIXME: zero * infinity */ |
212 | result.binary = FLOAT64_NAN; |
218 | result.binary = FLOAT64_NAN; |
213 | return result; |
219 | return result; |
214 | } |
220 | } |
215 | result.parts.fraction = b.parts.fraction; |
221 | result.parts.fraction = b.parts.fraction; |
216 | result.parts.exp = b.parts.exp; |
222 | result.parts.exp = b.parts.exp; |
217 | return result; |
223 | return result; |
218 | } |
224 | } |
219 | 225 | ||
220 | /* exp is signed so we can easy detect underflow */ |
226 | /* exp is signed so we can easy detect underflow */ |
221 | exp = a.parts.exp + b.parts.exp - FLOAT64_BIAS; |
227 | exp = a.parts.exp + b.parts.exp - FLOAT64_BIAS; |
222 | 228 | ||
223 | frac1 = a.parts.fraction; |
229 | frac1 = a.parts.fraction; |
224 | 230 | ||
225 | if (a.parts.exp > 0) { |
231 | if (a.parts.exp > 0) { |
226 | frac1 |= FLOAT64_HIDDEN_BIT_MASK; |
232 | frac1 |= FLOAT64_HIDDEN_BIT_MASK; |
227 | } else { |
233 | } else { |
228 | ++exp; |
234 | ++exp; |
229 | }; |
235 | }; |
230 | 236 | ||
231 | frac2 = b.parts.fraction; |
237 | frac2 = b.parts.fraction; |
232 | 238 | ||
233 | if (b.parts.exp > 0) { |
239 | if (b.parts.exp > 0) { |
234 | frac2 |= FLOAT64_HIDDEN_BIT_MASK; |
240 | frac2 |= FLOAT64_HIDDEN_BIT_MASK; |
235 | } else { |
241 | } else { |
236 | ++exp; |
242 | ++exp; |
237 | }; |
243 | }; |
238 | 244 | ||
239 | frac1 <<= (64 - FLOAT64_FRACTION_SIZE - 1); |
245 | frac1 <<= (64 - FLOAT64_FRACTION_SIZE - 1); |
240 | frac2 <<= (64 - FLOAT64_FRACTION_SIZE - 2); |
246 | frac2 <<= (64 - FLOAT64_FRACTION_SIZE - 2); |
241 | 247 | ||
242 | mul64integers(frac1, frac2, &frac1, &frac2); |
248 | mul64integers(frac1, frac2, &frac1, &frac2); |
243 | 249 | ||
244 | frac2 |= (frac1 != 0); |
250 | frac2 |= (frac1 != 0); |
245 | if (frac2 & (0x1ll << 62)) { |
251 | if (frac2 & (0x1ll << 62)) { |
246 | frac2 <<= 1; |
252 | frac2 <<= 1; |
247 | exp--; |
253 | exp--; |
248 | } |
254 | } |
249 | 255 | ||
250 | result = finishFloat64(exp, frac2, result.parts.sign); |
256 | result = finishFloat64(exp, frac2, result.parts.sign); |
251 | return result; |
257 | return result; |
252 | } |
258 | } |
253 | 259 | ||
254 | /** Multiply two 64 bit numbers and return result in two parts |
260 | /** Multiply two 64 bit numbers and return result in two parts |
255 | * @param a first operand |
261 | * @param a first operand |
256 | * @param b second operand |
262 | * @param b second operand |
257 | * @param lo lower part from result |
263 | * @param lo lower part from result |
258 | * @param hi higher part of result |
264 | * @param hi higher part of result |
259 | */ |
265 | */ |
260 | void mul64integers(uint64_t a,uint64_t b, uint64_t *lo, uint64_t *hi) |
266 | void mul64integers(uint64_t a,uint64_t b, uint64_t *lo, uint64_t *hi) |
261 | { |
267 | { |
262 | uint64_t low, high, middle1, middle2; |
268 | uint64_t low, high, middle1, middle2; |
263 | uint32_t alow, blow; |
269 | uint32_t alow, blow; |
264 | 270 | ||
265 | alow = a & 0xFFFFFFFF; |
271 | alow = a & 0xFFFFFFFF; |
266 | blow = b & 0xFFFFFFFF; |
272 | blow = b & 0xFFFFFFFF; |
267 | 273 | ||
268 | a >>= 32; |
274 | a >>= 32; |
269 | b >>= 32; |
275 | b >>= 32; |
270 | 276 | ||
271 | low = ((uint64_t)alow) * blow; |
277 | low = ((uint64_t)alow) * blow; |
272 | middle1 = a * blow; |
278 | middle1 = a * blow; |
273 | middle2 = alow * b; |
279 | middle2 = alow * b; |
274 | high = a * b; |
280 | high = a * b; |
275 | 281 | ||
276 | middle1 += middle2; |
282 | middle1 += middle2; |
277 | high += (((uint64_t)(middle1 < middle2)) << 32) + (middle1 >> 32); |
283 | high += (((uint64_t)(middle1 < middle2)) << 32) + (middle1 >> 32); |
278 | middle1 <<= 32; |
284 | middle1 <<= 32; |
279 | low += middle1; |
285 | low += middle1; |
280 | high += (low < middle1); |
286 | high += (low < middle1); |
281 | *lo = low; |
287 | *lo = low; |
282 | *hi = high; |
288 | *hi = high; |
283 | 289 | ||
284 | return; |
290 | return; |
285 | } |
291 | } |
286 | 292 | ||
287 | 293 | ||
- | 294 | ||
- | 295 | /** @} |
|
- | 296 | */ |
|
- | 297 | ||
288 | 298 |