Rev 1657 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1657 | Rev 1740 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2005 Josef Cejka |
2 | * Copyright (C) 2005 Josef Cejka |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup softfloat |
29 | /** @addtogroup softfloat |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | /** @file |
32 | /** @file |
33 | */ |
33 | */ |
34 | 34 | ||
35 | #include<sftypes.h> |
35 | #include<sftypes.h> |
36 | #include<mul.h> |
36 | #include<mul.h> |
37 | #include<comparison.h> |
37 | #include<comparison.h> |
38 | #include<common.h> |
38 | #include<common.h> |
39 | 39 | ||
40 | /** Multiply two 32 bit float numbers |
40 | /** Multiply two 32 bit float numbers |
41 | * |
41 | * |
42 | */ |
42 | */ |
43 | float32 mulFloat32(float32 a, float32 b) |
43 | float32 mulFloat32(float32 a, float32 b) |
44 | { |
44 | { |
45 | float32 result; |
45 | float32 result; |
46 | uint64_t frac1, frac2; |
46 | uint64_t frac1, frac2; |
47 | int32_t exp; |
47 | int32_t exp; |
48 | 48 | ||
49 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
49 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
50 | 50 | ||
51 | if (isFloat32NaN(a) || isFloat32NaN(b) ) { |
51 | if (isFloat32NaN(a) || isFloat32NaN(b) ) { |
52 | /* TODO: fix SigNaNs */ |
52 | /* TODO: fix SigNaNs */ |
53 | if (isFloat32SigNaN(a)) { |
53 | if (isFloat32SigNaN(a)) { |
54 | result.parts.fraction = a.parts.fraction; |
54 | result.parts.fraction = a.parts.fraction; |
55 | result.parts.exp = a.parts.exp; |
55 | result.parts.exp = a.parts.exp; |
56 | return result; |
56 | return result; |
57 | }; |
57 | }; |
58 | if (isFloat32SigNaN(b)) { /* TODO: fix SigNaN */ |
58 | if (isFloat32SigNaN(b)) { /* TODO: fix SigNaN */ |
59 | result.parts.fraction = b.parts.fraction; |
59 | result.parts.fraction = b.parts.fraction; |
60 | result.parts.exp = b.parts.exp; |
60 | result.parts.exp = b.parts.exp; |
61 | return result; |
61 | return result; |
62 | }; |
62 | }; |
63 | /* set NaN as result */ |
63 | /* set NaN as result */ |
64 | result.binary = FLOAT32_NAN; |
64 | result.binary = FLOAT32_NAN; |
65 | return result; |
65 | return result; |
66 | }; |
66 | }; |
67 | 67 | ||
68 | if (isFloat32Infinity(a)) { |
68 | if (isFloat32Infinity(a)) { |
69 | if (isFloat32Zero(b)) { |
69 | if (isFloat32Zero(b)) { |
70 | /* FIXME: zero * infinity */ |
70 | /* FIXME: zero * infinity */ |
71 | result.binary = FLOAT32_NAN; |
71 | result.binary = FLOAT32_NAN; |
72 | return result; |
72 | return result; |
73 | } |
73 | } |
74 | result.parts.fraction = a.parts.fraction; |
74 | result.parts.fraction = a.parts.fraction; |
75 | result.parts.exp = a.parts.exp; |
75 | result.parts.exp = a.parts.exp; |
76 | return result; |
76 | return result; |
77 | } |
77 | } |
78 | 78 | ||
79 | if (isFloat32Infinity(b)) { |
79 | if (isFloat32Infinity(b)) { |
80 | if (isFloat32Zero(a)) { |
80 | if (isFloat32Zero(a)) { |
81 | /* FIXME: zero * infinity */ |
81 | /* FIXME: zero * infinity */ |
82 | result.binary = FLOAT32_NAN; |
82 | result.binary = FLOAT32_NAN; |
83 | return result; |
83 | return result; |
84 | } |
84 | } |
85 | result.parts.fraction = b.parts.fraction; |
85 | result.parts.fraction = b.parts.fraction; |
86 | result.parts.exp = b.parts.exp; |
86 | result.parts.exp = b.parts.exp; |
87 | return result; |
87 | return result; |
88 | } |
88 | } |
89 | 89 | ||
90 | /* exp is signed so we can easy detect underflow */ |
90 | /* exp is signed so we can easy detect underflow */ |
91 | exp = a.parts.exp + b.parts.exp; |
91 | exp = a.parts.exp + b.parts.exp; |
92 | exp -= FLOAT32_BIAS; |
92 | exp -= FLOAT32_BIAS; |
93 | 93 | ||
94 | if (exp >= FLOAT32_MAX_EXPONENT) { |
94 | if (exp >= FLOAT32_MAX_EXPONENT) { |
95 | /* FIXME: overflow */ |
95 | /* FIXME: overflow */ |
96 | /* set infinity as result */ |
96 | /* set infinity as result */ |
97 | result.binary = FLOAT32_INF; |
97 | result.binary = FLOAT32_INF; |
98 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
98 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
99 | return result; |
99 | return result; |
100 | }; |
100 | }; |
101 | 101 | ||
102 | if (exp < 0) { |
102 | if (exp < 0) { |
103 | /* FIXME: underflow */ |
103 | /* FIXME: underflow */ |
104 | /* return signed zero */ |
104 | /* return signed zero */ |
105 | result.parts.fraction = 0x0; |
105 | result.parts.fraction = 0x0; |
106 | result.parts.exp = 0x0; |
106 | result.parts.exp = 0x0; |
107 | return result; |
107 | return result; |
108 | }; |
108 | }; |
109 | 109 | ||
110 | frac1 = a.parts.fraction; |
110 | frac1 = a.parts.fraction; |
111 | if (a.parts.exp > 0) { |
111 | if (a.parts.exp > 0) { |
112 | frac1 |= FLOAT32_HIDDEN_BIT_MASK; |
112 | frac1 |= FLOAT32_HIDDEN_BIT_MASK; |
113 | } else { |
113 | } else { |
114 | ++exp; |
114 | ++exp; |
115 | }; |
115 | }; |
116 | 116 | ||
117 | frac2 = b.parts.fraction; |
117 | frac2 = b.parts.fraction; |
118 | 118 | ||
119 | if (b.parts.exp > 0) { |
119 | if (b.parts.exp > 0) { |
120 | frac2 |= FLOAT32_HIDDEN_BIT_MASK; |
120 | frac2 |= FLOAT32_HIDDEN_BIT_MASK; |
121 | } else { |
121 | } else { |
122 | ++exp; |
122 | ++exp; |
123 | }; |
123 | }; |
124 | 124 | ||
125 | frac1 <<= 1; /* one bit space for rounding */ |
125 | frac1 <<= 1; /* one bit space for rounding */ |
126 | 126 | ||
127 | frac1 = frac1 * frac2; |
127 | frac1 = frac1 * frac2; |
128 | /* round and return */ |
128 | /* round and return */ |
129 | 129 | ||
130 | while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= ( 1 << (FLOAT32_FRACTION_SIZE + 2)))) { |
130 | while ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= ( 1 << (FLOAT32_FRACTION_SIZE + 2)))) { |
131 | /* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left)*/ |
131 | /* 23 bits of fraction + one more for hidden bit (all shifted 1 bit left)*/ |
132 | ++exp; |
132 | ++exp; |
133 | frac1 >>= 1; |
133 | frac1 >>= 1; |
134 | }; |
134 | }; |
135 | 135 | ||
136 | /* rounding */ |
136 | /* rounding */ |
137 | /* ++frac1; FIXME: not works - without it is ok */ |
137 | /* ++frac1; FIXME: not works - without it is ok */ |
138 | frac1 >>= 1; /* shift off rounding space */ |
138 | frac1 >>= 1; /* shift off rounding space */ |
139 | 139 | ||
140 | if ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= (1 << (FLOAT32_FRACTION_SIZE + 1)))) { |
140 | if ((exp < FLOAT32_MAX_EXPONENT) && (frac1 >= (1 << (FLOAT32_FRACTION_SIZE + 1)))) { |
141 | ++exp; |
141 | ++exp; |
142 | frac1 >>= 1; |
142 | frac1 >>= 1; |
143 | }; |
143 | }; |
144 | 144 | ||
145 | if (exp >= FLOAT32_MAX_EXPONENT ) { |
145 | if (exp >= FLOAT32_MAX_EXPONENT ) { |
146 | /* TODO: fix overflow */ |
146 | /* TODO: fix overflow */ |
147 | /* return infinity*/ |
147 | /* return infinity*/ |
148 | result.parts.exp = FLOAT32_MAX_EXPONENT; |
148 | result.parts.exp = FLOAT32_MAX_EXPONENT; |
149 | result.parts.fraction = 0x0; |
149 | result.parts.fraction = 0x0; |
150 | return result; |
150 | return result; |
151 | } |
151 | } |
152 | 152 | ||
153 | exp -= FLOAT32_FRACTION_SIZE; |
153 | exp -= FLOAT32_FRACTION_SIZE; |
154 | 154 | ||
155 | if (exp <= FLOAT32_FRACTION_SIZE) { |
155 | if (exp <= FLOAT32_FRACTION_SIZE) { |
156 | /* denormalized number */ |
156 | /* denormalized number */ |
157 | frac1 >>= 1; /* denormalize */ |
157 | frac1 >>= 1; /* denormalize */ |
158 | while ((frac1 > 0) && (exp < 0)) { |
158 | while ((frac1 > 0) && (exp < 0)) { |
159 | frac1 >>= 1; |
159 | frac1 >>= 1; |
160 | ++exp; |
160 | ++exp; |
161 | }; |
161 | }; |
162 | if (frac1 == 0) { |
162 | if (frac1 == 0) { |
163 | /* FIXME : underflow */ |
163 | /* FIXME : underflow */ |
164 | result.parts.exp = 0; |
164 | result.parts.exp = 0; |
165 | result.parts.fraction = 0; |
165 | result.parts.fraction = 0; |
166 | return result; |
166 | return result; |
167 | }; |
167 | }; |
168 | }; |
168 | }; |
169 | result.parts.exp = exp; |
169 | result.parts.exp = exp; |
170 | result.parts.fraction = frac1 & ( (1 << FLOAT32_FRACTION_SIZE) - 1); |
170 | result.parts.fraction = frac1 & ( (1 << FLOAT32_FRACTION_SIZE) - 1); |
171 | 171 | ||
172 | return result; |
172 | return result; |
173 | 173 | ||
174 | } |
174 | } |
175 | 175 | ||
176 | /** Multiply two 64 bit float numbers |
176 | /** Multiply two 64 bit float numbers |
177 | * |
177 | * |
178 | */ |
178 | */ |
179 | float64 mulFloat64(float64 a, float64 b) |
179 | float64 mulFloat64(float64 a, float64 b) |
180 | { |
180 | { |
181 | float64 result; |
181 | float64 result; |
182 | uint64_t frac1, frac2; |
182 | uint64_t frac1, frac2; |
183 | int32_t exp; |
183 | int32_t exp; |
184 | 184 | ||
185 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
185 | result.parts.sign = a.parts.sign ^ b.parts.sign; |
186 | 186 | ||
187 | if (isFloat64NaN(a) || isFloat64NaN(b) ) { |
187 | if (isFloat64NaN(a) || isFloat64NaN(b) ) { |
188 | /* TODO: fix SigNaNs */ |
188 | /* TODO: fix SigNaNs */ |
189 | if (isFloat64SigNaN(a)) { |
189 | if (isFloat64SigNaN(a)) { |
190 | result.parts.fraction = a.parts.fraction; |
190 | result.parts.fraction = a.parts.fraction; |
191 | result.parts.exp = a.parts.exp; |
191 | result.parts.exp = a.parts.exp; |
192 | return result; |
192 | return result; |
193 | }; |
193 | }; |
194 | if (isFloat64SigNaN(b)) { /* TODO: fix SigNaN */ |
194 | if (isFloat64SigNaN(b)) { /* TODO: fix SigNaN */ |
195 | result.parts.fraction = b.parts.fraction; |
195 | result.parts.fraction = b.parts.fraction; |
196 | result.parts.exp = b.parts.exp; |
196 | result.parts.exp = b.parts.exp; |
197 | return result; |
197 | return result; |
198 | }; |
198 | }; |
199 | /* set NaN as result */ |
199 | /* set NaN as result */ |
200 | result.binary = FLOAT64_NAN; |
200 | result.binary = FLOAT64_NAN; |
201 | return result; |
201 | return result; |
202 | }; |
202 | }; |
203 | 203 | ||
204 | if (isFloat64Infinity(a)) { |
204 | if (isFloat64Infinity(a)) { |
205 | if (isFloat64Zero(b)) { |
205 | if (isFloat64Zero(b)) { |
206 | /* FIXME: zero * infinity */ |
206 | /* FIXME: zero * infinity */ |
207 | result.binary = FLOAT64_NAN; |
207 | result.binary = FLOAT64_NAN; |
208 | return result; |
208 | return result; |
209 | } |
209 | } |
210 | result.parts.fraction = a.parts.fraction; |
210 | result.parts.fraction = a.parts.fraction; |
211 | result.parts.exp = a.parts.exp; |
211 | result.parts.exp = a.parts.exp; |
212 | return result; |
212 | return result; |
213 | } |
213 | } |
214 | 214 | ||
215 | if (isFloat64Infinity(b)) { |
215 | if (isFloat64Infinity(b)) { |
216 | if (isFloat64Zero(a)) { |
216 | if (isFloat64Zero(a)) { |
217 | /* FIXME: zero * infinity */ |
217 | /* FIXME: zero * infinity */ |
218 | result.binary = FLOAT64_NAN; |
218 | result.binary = FLOAT64_NAN; |
219 | return result; |
219 | return result; |
220 | } |
220 | } |
221 | result.parts.fraction = b.parts.fraction; |
221 | result.parts.fraction = b.parts.fraction; |
222 | result.parts.exp = b.parts.exp; |
222 | result.parts.exp = b.parts.exp; |
223 | return result; |
223 | return result; |
224 | } |
224 | } |
225 | 225 | ||
226 | /* exp is signed so we can easy detect underflow */ |
226 | /* exp is signed so we can easy detect underflow */ |
227 | exp = a.parts.exp + b.parts.exp - FLOAT64_BIAS; |
227 | exp = a.parts.exp + b.parts.exp - FLOAT64_BIAS; |
228 | 228 | ||
229 | frac1 = a.parts.fraction; |
229 | frac1 = a.parts.fraction; |
230 | 230 | ||
231 | if (a.parts.exp > 0) { |
231 | if (a.parts.exp > 0) { |
232 | frac1 |= FLOAT64_HIDDEN_BIT_MASK; |
232 | frac1 |= FLOAT64_HIDDEN_BIT_MASK; |
233 | } else { |
233 | } else { |
234 | ++exp; |
234 | ++exp; |
235 | }; |
235 | }; |
236 | 236 | ||
237 | frac2 = b.parts.fraction; |
237 | frac2 = b.parts.fraction; |
238 | 238 | ||
239 | if (b.parts.exp > 0) { |
239 | if (b.parts.exp > 0) { |
240 | frac2 |= FLOAT64_HIDDEN_BIT_MASK; |
240 | frac2 |= FLOAT64_HIDDEN_BIT_MASK; |
241 | } else { |
241 | } else { |
242 | ++exp; |
242 | ++exp; |
243 | }; |
243 | }; |
244 | 244 | ||
245 | frac1 <<= (64 - FLOAT64_FRACTION_SIZE - 1); |
245 | frac1 <<= (64 - FLOAT64_FRACTION_SIZE - 1); |
246 | frac2 <<= (64 - FLOAT64_FRACTION_SIZE - 2); |
246 | frac2 <<= (64 - FLOAT64_FRACTION_SIZE - 2); |
247 | 247 | ||
248 | mul64integers(frac1, frac2, &frac1, &frac2); |
248 | mul64integers(frac1, frac2, &frac1, &frac2); |
249 | 249 | ||
250 | frac2 |= (frac1 != 0); |
250 | frac2 |= (frac1 != 0); |
251 | if (frac2 & (0x1ll << 62)) { |
251 | if (frac2 & (0x1ll << 62)) { |
252 | frac2 <<= 1; |
252 | frac2 <<= 1; |
253 | exp--; |
253 | exp--; |
254 | } |
254 | } |
255 | 255 | ||
256 | result = finishFloat64(exp, frac2, result.parts.sign); |
256 | result = finishFloat64(exp, frac2, result.parts.sign); |
257 | return result; |
257 | return result; |
258 | } |
258 | } |
259 | 259 | ||
260 | /** Multiply two 64 bit numbers and return result in two parts |
260 | /** Multiply two 64 bit numbers and return result in two parts |
261 | * @param a first operand |
261 | * @param a first operand |
262 | * @param b second operand |
262 | * @param b second operand |
263 | * @param lo lower part from result |
263 | * @param lo lower part from result |
264 | * @param hi higher part of result |
264 | * @param hi higher part of result |
265 | */ |
265 | */ |
266 | void mul64integers(uint64_t a,uint64_t b, uint64_t *lo, uint64_t *hi) |
266 | void mul64integers(uint64_t a,uint64_t b, uint64_t *lo, uint64_t *hi) |
267 | { |
267 | { |
268 | uint64_t low, high, middle1, middle2; |
268 | uint64_t low, high, middle1, middle2; |
269 | uint32_t alow, blow; |
269 | uint32_t alow, blow; |
270 | 270 | ||
271 | alow = a & 0xFFFFFFFF; |
271 | alow = a & 0xFFFFFFFF; |
272 | blow = b & 0xFFFFFFFF; |
272 | blow = b & 0xFFFFFFFF; |
273 | 273 | ||
274 | a >>= 32; |
274 | a >>= 32; |
275 | b >>= 32; |
275 | b >>= 32; |
276 | 276 | ||
277 | low = ((uint64_t)alow) * blow; |
277 | low = ((uint64_t)alow) * blow; |
278 | middle1 = a * blow; |
278 | middle1 = a * blow; |
279 | middle2 = alow * b; |
279 | middle2 = alow * b; |
280 | high = a * b; |
280 | high = a * b; |
281 | 281 | ||
282 | middle1 += middle2; |
282 | middle1 += middle2; |
283 | high += (((uint64_t)(middle1 < middle2)) << 32) + (middle1 >> 32); |
283 | high += (((uint64_t)(middle1 < middle2)) << 32) + (middle1 >> 32); |
284 | middle1 <<= 32; |
284 | middle1 <<= 32; |
285 | low += middle1; |
285 | low += middle1; |
286 | high += (low < middle1); |
286 | high += (low < middle1); |
287 | *lo = low; |
287 | *lo = low; |
288 | *hi = high; |
288 | *hi = high; |
289 | 289 | ||
290 | return; |
290 | return; |
291 | } |
291 | } |
292 | 292 | ||
293 | - | ||
294 | - | ||
295 | /** @} |
293 | /** @} |
296 | */ |
294 | */ |
297 | - | ||
298 | 295 |