Rev 1196 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1196 | Rev 1702 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Josef Cejka |
2 | * Copyright (C) 2006 Josef Cejka |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /** @addtogroup genarch |
|
- | 30 | * @{ |
|
- | 31 | */ |
|
- | 32 | /** @file |
|
- | 33 | */ |
|
- | 34 | ||
29 | #include <genarch/softint/division.h> |
35 | #include <genarch/softint/division.h> |
30 | 36 | ||
31 | #define ABSVAL(x) ( (x) > 0 ? (x) : -(x)) |
37 | #define ABSVAL(x) ( (x) > 0 ? (x) : -(x)) |
32 | #define SGN(x) ( (x) >= 0 ? 1 : 0 ) |
38 | #define SGN(x) ( (x) >= 0 ? 1 : 0 ) |
33 | 39 | ||
34 | static unsigned int divandmod32(unsigned int a, unsigned int b, unsigned int *remainder) |
40 | static unsigned int divandmod32(unsigned int a, unsigned int b, unsigned int *remainder) |
35 | { |
41 | { |
36 | unsigned int result; |
42 | unsigned int result; |
37 | int steps = sizeof(unsigned int) * 8; |
43 | int steps = sizeof(unsigned int) * 8; |
38 | 44 | ||
39 | *remainder = 0; |
45 | *remainder = 0; |
40 | result = 0; |
46 | result = 0; |
41 | 47 | ||
42 | if (b == 0) { |
48 | if (b == 0) { |
43 | /* FIXME: division by zero */ |
49 | /* FIXME: division by zero */ |
44 | return 0; |
50 | return 0; |
45 | } |
51 | } |
46 | 52 | ||
47 | if ( a < b) { |
53 | if ( a < b) { |
48 | *remainder = a; |
54 | *remainder = a; |
49 | return 0; |
55 | return 0; |
50 | } |
56 | } |
51 | 57 | ||
52 | for ( ; steps > 0; steps--) { |
58 | for ( ; steps > 0; steps--) { |
53 | /* shift one bit to remainder */ |
59 | /* shift one bit to remainder */ |
54 | *remainder = ( (*remainder) << 1) | (( a >> 31) & 0x1); |
60 | *remainder = ( (*remainder) << 1) | (( a >> 31) & 0x1); |
55 | result <<= 1; |
61 | result <<= 1; |
56 | 62 | ||
57 | if (*remainder >= b) { |
63 | if (*remainder >= b) { |
58 | *remainder -= b; |
64 | *remainder -= b; |
59 | result |= 0x1; |
65 | result |= 0x1; |
60 | } |
66 | } |
61 | a <<= 1; |
67 | a <<= 1; |
62 | } |
68 | } |
63 | 69 | ||
64 | return result; |
70 | return result; |
65 | } |
71 | } |
66 | 72 | ||
67 | 73 | ||
68 | static unsigned long long divandmod64(unsigned long long a, unsigned long long b, unsigned long long *remainder) |
74 | static unsigned long long divandmod64(unsigned long long a, unsigned long long b, unsigned long long *remainder) |
69 | { |
75 | { |
70 | unsigned long long result; |
76 | unsigned long long result; |
71 | int steps = sizeof(unsigned long long) * 8; |
77 | int steps = sizeof(unsigned long long) * 8; |
72 | 78 | ||
73 | *remainder = 0; |
79 | *remainder = 0; |
74 | result = 0; |
80 | result = 0; |
75 | 81 | ||
76 | if (b == 0) { |
82 | if (b == 0) { |
77 | /* FIXME: division by zero */ |
83 | /* FIXME: division by zero */ |
78 | return 0; |
84 | return 0; |
79 | } |
85 | } |
80 | 86 | ||
81 | if ( a < b) { |
87 | if ( a < b) { |
82 | *remainder = a; |
88 | *remainder = a; |
83 | return 0; |
89 | return 0; |
84 | } |
90 | } |
85 | 91 | ||
86 | for ( ; steps > 0; steps--) { |
92 | for ( ; steps > 0; steps--) { |
87 | /* shift one bit to remainder */ |
93 | /* shift one bit to remainder */ |
88 | *remainder = ( (*remainder) << 1) | ((a >> 63) & 0x1); |
94 | *remainder = ( (*remainder) << 1) | ((a >> 63) & 0x1); |
89 | result <<= 1; |
95 | result <<= 1; |
90 | 96 | ||
91 | if (*remainder >= b) { |
97 | if (*remainder >= b) { |
92 | *remainder -= b; |
98 | *remainder -= b; |
93 | result |= 0x1; |
99 | result |= 0x1; |
94 | } |
100 | } |
95 | a <<= 1; |
101 | a <<= 1; |
96 | } |
102 | } |
97 | 103 | ||
98 | return result; |
104 | return result; |
99 | } |
105 | } |
100 | 106 | ||
101 | /* 32bit integer division */ |
107 | /* 32bit integer division */ |
102 | int __divsi3(int a, int b) |
108 | int __divsi3(int a, int b) |
103 | { |
109 | { |
104 | unsigned int rem; |
110 | unsigned int rem; |
105 | int result; |
111 | int result; |
106 | 112 | ||
107 | result = (int)divandmod32(ABSVAL(a), ABSVAL(b), &rem); |
113 | result = (int)divandmod32(ABSVAL(a), ABSVAL(b), &rem); |
108 | 114 | ||
109 | if ( SGN(a) == SGN(b)) return result; |
115 | if ( SGN(a) == SGN(b)) return result; |
110 | return -result; |
116 | return -result; |
111 | } |
117 | } |
112 | 118 | ||
113 | /* 64bit integer division */ |
119 | /* 64bit integer division */ |
114 | long long __divdi3(long long a, long long b) |
120 | long long __divdi3(long long a, long long b) |
115 | { |
121 | { |
116 | unsigned long long rem; |
122 | unsigned long long rem; |
117 | long long result; |
123 | long long result; |
118 | 124 | ||
119 | result = (long long)divandmod64(ABSVAL(a), ABSVAL(b), &rem); |
125 | result = (long long)divandmod64(ABSVAL(a), ABSVAL(b), &rem); |
120 | 126 | ||
121 | if ( SGN(a) == SGN(b)) return result; |
127 | if ( SGN(a) == SGN(b)) return result; |
122 | return -result; |
128 | return -result; |
123 | } |
129 | } |
124 | 130 | ||
125 | /* 32bit unsigned integer division */ |
131 | /* 32bit unsigned integer division */ |
126 | unsigned int __udivsi3(unsigned int a, unsigned int b) |
132 | unsigned int __udivsi3(unsigned int a, unsigned int b) |
127 | { |
133 | { |
128 | unsigned int rem; |
134 | unsigned int rem; |
129 | return divandmod32(a, b, &rem); |
135 | return divandmod32(a, b, &rem); |
130 | } |
136 | } |
131 | 137 | ||
132 | /* 64bit unsigned integer division */ |
138 | /* 64bit unsigned integer division */ |
133 | unsigned long long __udivdi3(unsigned long long a, unsigned long long b) |
139 | unsigned long long __udivdi3(unsigned long long a, unsigned long long b) |
134 | { |
140 | { |
135 | unsigned long long rem; |
141 | unsigned long long rem; |
136 | return divandmod64(a, b, &rem); |
142 | return divandmod64(a, b, &rem); |
137 | } |
143 | } |
138 | 144 | ||
139 | /* 32bit remainder of the signed division */ |
145 | /* 32bit remainder of the signed division */ |
140 | int __modsi3(int a, int b) |
146 | int __modsi3(int a, int b) |
141 | { |
147 | { |
142 | unsigned int rem; |
148 | unsigned int rem; |
143 | divandmod32(a, b, &rem); |
149 | divandmod32(a, b, &rem); |
144 | 150 | ||
145 | /* if divident is negative, remainder must be too */ |
151 | /* if divident is negative, remainder must be too */ |
146 | if (!(SGN(a))) { |
152 | if (!(SGN(a))) { |
147 | return -((int)rem); |
153 | return -((int)rem); |
148 | } |
154 | } |
149 | 155 | ||
150 | return (int)rem; |
156 | return (int)rem; |
151 | } |
157 | } |
152 | 158 | ||
153 | /* 64bit remainder of the signed division */ |
159 | /* 64bit remainder of the signed division */ |
154 | long long __moddi3(long long a,long long b) |
160 | long long __moddi3(long long a,long long b) |
155 | { |
161 | { |
156 | unsigned long long rem; |
162 | unsigned long long rem; |
157 | divandmod64(a, b, &rem); |
163 | divandmod64(a, b, &rem); |
158 | 164 | ||
159 | /* if divident is negative, remainder must be too */ |
165 | /* if divident is negative, remainder must be too */ |
160 | if (!(SGN(a))) { |
166 | if (!(SGN(a))) { |
161 | return -((long long)rem); |
167 | return -((long long)rem); |
162 | } |
168 | } |
163 | 169 | ||
164 | return (long long)rem; |
170 | return (long long)rem; |
165 | } |
171 | } |
166 | 172 | ||
167 | /* 32bit remainder of the unsigned division */ |
173 | /* 32bit remainder of the unsigned division */ |
168 | unsigned int __umodsi3(unsigned int a, unsigned int b) |
174 | unsigned int __umodsi3(unsigned int a, unsigned int b) |
169 | { |
175 | { |
170 | unsigned int rem; |
176 | unsigned int rem; |
171 | divandmod32(a, b, &rem); |
177 | divandmod32(a, b, &rem); |
172 | return rem; |
178 | return rem; |
173 | } |
179 | } |
174 | 180 | ||
175 | /* 64bit remainder of the unsigned division */ |
181 | /* 64bit remainder of the unsigned division */ |
176 | unsigned long long __umoddi3(unsigned long long a, unsigned long long b) |
182 | unsigned long long __umoddi3(unsigned long long a, unsigned long long b) |
177 | { |
183 | { |
178 | unsigned long long rem; |
184 | unsigned long long rem; |
179 | divandmod64(a, b, &rem); |
185 | divandmod64(a, b, &rem); |
180 | return rem; |
186 | return rem; |
181 | } |
187 | } |
182 | 188 | ||
183 | unsigned long long __udivmoddi3(unsigned long long a, unsigned long long b, unsigned long long *c) |
189 | unsigned long long __udivmoddi3(unsigned long long a, unsigned long long b, unsigned long long *c) |
184 | { |
190 | { |
185 | return divandmod64(a, b, c); |
191 | return divandmod64(a, b, c); |
186 | } |
192 | } |
187 | 193 | ||
188 | 194 | ||
- | 195 | ||
- | 196 | /** @} |
|
- | 197 | */ |
|
- | 198 | ||
189 | 199 |