Annotation of OpenXM_contrib/gmp/mpq/get_d.c, Revision 1.1.1.2
1.1 maekawa 1: /* double mpq_get_d (mpq_t src) -- Return the double approximation to SRC.
2:
3: Copyright (C) 1995, 1996 Free Software Foundation, Inc.
4:
5: This file is part of the GNU MP Library.
6:
7: The GNU MP Library is free software; you can redistribute it and/or modify
1.1.1.2 ! maekawa 8: it under the terms of the GNU Lesser General Public License as published by
! 9: the Free Software Foundation; either version 2.1 of the License, or (at your
1.1 maekawa 10: option) any later version.
11:
12: The GNU MP Library is distributed in the hope that it will be useful, but
13: WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
1.1.1.2 ! maekawa 14: or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
1.1 maekawa 15: License for more details.
16:
1.1.1.2 ! maekawa 17: You should have received a copy of the GNU Lesser General Public License
1.1 maekawa 18: along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19: the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20: MA 02111-1307, USA. */
21:
22: #include "gmp.h"
23: #include "gmp-impl.h"
24: #include "longlong.h"
25:
26: /* Algorithm:
27: 1. Develop >= n bits of src.num / src.den, where n is the number of bits
28: in a double. This (partial) division will use all bits from the
29: denominator.
30: 2. Use the remainder to determine how to round the result.
31: 3. Assign the integral result to a temporary double.
32: 4. Scale the temporary double, and return the result.
33:
34: An alternative algorithm, that would be faster:
35: 0. Let n be somewhat larger than the number of significant bits in a double.
36: 1. Extract the most significant n bits of the denominator, and an equal
37: number of bits from the numerator.
38: 2. Interpret the extracted numbers as integers, call them a and b
39: respectively, and develop n bits of the fractions ((a + 1) / b) and
40: (a / (b + 1)) using mpn_divrem.
41: 3. If the computed values are identical UP TO THE POSITION WE CARE ABOUT,
42: we are done. If they are different, repeat the algorithm from step 1,
43: but first let n = n * 2.
44: 4. If we end up using all bits from the numerator and denominator, fall
45: back to the first algorithm above.
46: 5. Just to make life harder, The computation of a + 1 and b + 1 above
47: might give carry-out... Needs special handling. It might work to
48: subtract 1 in both cases instead.
49: */
50:
51: double
52: #if __STDC__
53: mpq_get_d (const MP_RAT *src)
54: #else
55: mpq_get_d (src)
56: const MP_RAT *src;
57: #endif
58: {
59: mp_ptr np, dp;
60: mp_ptr rp;
61: mp_size_t nsize = src->_mp_num._mp_size;
62: mp_size_t dsize = src->_mp_den._mp_size;
63: mp_size_t qsize, rsize;
64: mp_size_t sign_quotient = nsize ^ dsize;
65: unsigned normalization_steps;
66: mp_limb_t qlimb;
67: #define N_QLIMBS (1 + (sizeof (double) + BYTES_PER_MP_LIMB-1) / BYTES_PER_MP_LIMB)
1.1.1.2 ! maekawa 68: mp_limb_t qarr[N_QLIMBS + 1];
! 69: mp_ptr qp = qarr;
1.1 maekawa 70: TMP_DECL (marker);
71:
72: if (nsize == 0)
73: return 0.0;
74:
75: TMP_MARK (marker);
76: nsize = ABS (nsize);
77: dsize = ABS (dsize);
78: np = src->_mp_num._mp_d;
79: dp = src->_mp_den._mp_d;
80:
81: rsize = dsize + N_QLIMBS;
82: rp = (mp_ptr) TMP_ALLOC ((rsize + 1) * BYTES_PER_MP_LIMB);
83:
84: count_leading_zeros (normalization_steps, dp[dsize - 1]);
85:
86: /* Normalize the denominator, i.e. make its most significant bit set by
87: shifting it NORMALIZATION_STEPS bits to the left. Also shift the
88: numerator the same number of steps (to keep the quotient the same!). */
89: if (normalization_steps != 0)
90: {
91: mp_ptr tp;
92: mp_limb_t nlimb;
93:
94: /* Shift up the denominator setting the most significant bit of
95: the most significant limb. Use temporary storage not to clobber
96: the original contents of the denominator. */
97: tp = (mp_ptr) TMP_ALLOC (dsize * BYTES_PER_MP_LIMB);
98: mpn_lshift (tp, dp, dsize, normalization_steps);
99: dp = tp;
100:
101: if (rsize > nsize)
102: {
103: MPN_ZERO (rp, rsize - nsize);
104: nlimb = mpn_lshift (rp + (rsize - nsize),
105: np, nsize, normalization_steps);
106: }
107: else
108: {
109: nlimb = mpn_lshift (rp, np + (nsize - rsize),
110: rsize, normalization_steps);
111: }
112: if (nlimb != 0)
113: {
114: rp[rsize] = nlimb;
115: rsize++;
116: }
117: }
118: else
119: {
120: if (rsize > nsize)
121: {
122: MPN_ZERO (rp, rsize - nsize);
123: MPN_COPY (rp + (rsize - nsize), np, nsize);
124: }
125: else
126: {
127: MPN_COPY (rp, np + (nsize - rsize), rsize);
128: }
129: }
130:
131: qlimb = mpn_divmod (qp, rp, rsize, dp, dsize);
132: qsize = rsize - dsize;
133: if (qlimb)
134: {
135: qp[qsize] = qlimb;
136: qsize++;
137: }
138:
139: {
140: double res;
141: mp_size_t i;
1.1.1.2 ! maekawa 142: int scale = nsize - dsize - N_QLIMBS;
! 143:
! 144: #if defined (__vax__)
! 145: /* Ignore excess quotient limbs. This is necessary on a vax
! 146: with its small double exponent, since we'd otherwise get
! 147: exponent overflow while forming RES. */
! 148: if (qsize > N_QLIMBS)
! 149: {
! 150: qp += qsize - N_QLIMBS;
! 151: scale += qsize - N_QLIMBS;
! 152: qsize = N_QLIMBS;
! 153: }
! 154: #endif
1.1 maekawa 155:
156: res = qp[qsize - 1];
157: for (i = qsize - 2; i >= 0; i--)
158: res = res * MP_BASE_AS_DOUBLE + qp[i];
159:
1.1.1.2 ! maekawa 160: res = __gmp_scale2 (res, BITS_PER_MP_LIMB * scale);
1.1 maekawa 161:
162: TMP_FREE (marker);
163: return sign_quotient >= 0 ? res : -res;
164: }
165: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>