=================================================================== RCS file: /home/cvs/OpenXM_contrib/gmp/mpn/generic/Attic/mod_1.c,v retrieving revision 1.1.1.2 retrieving revision 1.1.1.3 diff -u -p -r1.1.1.2 -r1.1.1.3 --- OpenXM_contrib/gmp/mpn/generic/Attic/mod_1.c 2000/09/09 14:12:26 1.1.1.2 +++ OpenXM_contrib/gmp/mpn/generic/Attic/mod_1.c 2003/08/25 16:06:20 1.1.1.3 @@ -3,7 +3,7 @@ Return the single-limb remainder. There are no constraints on the value of the divisor. -Copyright (C) 1991, 1993, 1994, 1999 Free Software Foundation, Inc. +Copyright 1991, 1993, 1994, 1999, 2000, 2002 Free Software Foundation, Inc. This file is part of the GNU MP Library. @@ -26,150 +26,149 @@ MA 02111-1307, USA. */ #include "gmp-impl.h" #include "longlong.h" -#ifndef UMUL_TIME -#define UMUL_TIME 1 -#endif -#ifndef UDIV_TIME -#define UDIV_TIME UMUL_TIME +/* The size where udiv_qrnnd_preinv should be used rather than udiv_qrnnd, + meaning the quotient size where that should happen, the quotient size + being how many udiv divisions will be done. + + The default is to use preinv always, CPUs where this doesn't suit have + tuned thresholds. Note in particular that preinv should certainly be + used if that's the only division available (USE_PREINV_ALWAYS). */ + +#ifndef MOD_1_NORM_THRESHOLD +#define MOD_1_NORM_THRESHOLD 0 #endif +#ifndef MOD_1_UNNORM_THRESHOLD +#define MOD_1_UNNORM_THRESHOLD 0 +#endif + +/* The comments in mpn/generic/divrem_1.c apply here too. + + As noted in the algorithms section of the manual, the shifts in the loop + for the unnorm case can be avoided by calculating r = a%(d*2^n), followed + by a final (r*2^n)%(d*2^n). In fact if it happens that a%(d*2^n) can + skip a division where (a*2^n)%(d*2^n) can't then there's the same number + of divide steps, though how often that happens depends on the assumed + distributions of dividend and divisor. In any case this idea is left to + CPU specific implementations to consider. */ + mp_limb_t -#if __STDC__ -mpn_mod_1 (mp_srcptr dividend_ptr, mp_size_t dividend_size, - mp_limb_t divisor_limb) -#else -mpn_mod_1 (dividend_ptr, dividend_size, divisor_limb) - mp_srcptr dividend_ptr; - mp_size_t dividend_size; - mp_limb_t divisor_limb; -#endif +mpn_mod_1 (mp_srcptr up, mp_size_t un, mp_limb_t d) { - mp_size_t i; - mp_limb_t n1, n0, r; - int dummy; + mp_size_t i; + mp_limb_t n1, n0, r; + mp_limb_t dummy; - /* Botch: Should this be handled at all? Rely on callers? */ - if (dividend_size == 0) + ASSERT (un >= 0); + ASSERT (d != 0); + + /* Botch: Should this be handled at all? Rely on callers? + But note un==0 is currently required by mpz/fdiv_r_ui.c and possibly + other places. */ + if (un == 0) return 0; - /* If multiplication is much faster than division, and the - dividend is large, pre-invert the divisor, and use - only multiplications in the inner loop. */ + d <<= GMP_NAIL_BITS; - /* This test should be read: - Does it ever help to use udiv_qrnnd_preinv? - && Does what we save compensate for the inversion overhead? */ - if (UDIV_TIME > (2 * UMUL_TIME + 6) - && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) + if ((d & GMP_LIMB_HIGHBIT) != 0) { - int normalization_steps; + /* High limb is initial remainder, possibly with one subtract of + d to get r= d) + r -= d; + r >>= GMP_NAIL_BITS; + un--; + if (un == 0) + return r; - count_leading_zeros (normalization_steps, divisor_limb); - if (normalization_steps != 0) + if (BELOW_THRESHOLD (un, MOD_1_NORM_THRESHOLD)) { - mp_limb_t divisor_limb_inverted; - - divisor_limb <<= normalization_steps; - invert_limb (divisor_limb_inverted, divisor_limb); - - n1 = dividend_ptr[dividend_size - 1]; - r = n1 >> (BITS_PER_MP_LIMB - normalization_steps); - - /* Possible optimization: - if (r == 0 - && divisor_limb > ((n1 << normalization_steps) - | (dividend_ptr[dividend_size - 2] >> ...))) - ...one division less... */ - - for (i = dividend_size - 2; i >= 0; i--) + plain: + for (i = un - 1; i >= 0; i--) { - n0 = dividend_ptr[i]; - udiv_qrnnd_preinv (dummy, r, r, - ((n1 << normalization_steps) - | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))), - divisor_limb, divisor_limb_inverted); - n1 = n0; + n0 = up[i] << GMP_NAIL_BITS; + udiv_qrnnd (dummy, r, r, n0, d); + r >>= GMP_NAIL_BITS; } - udiv_qrnnd_preinv (dummy, r, r, - n1 << normalization_steps, - divisor_limb, divisor_limb_inverted); - return r >> normalization_steps; + return r; } else { - mp_limb_t divisor_limb_inverted; - - invert_limb (divisor_limb_inverted, divisor_limb); - - i = dividend_size - 1; - r = dividend_ptr[i]; - - if (r >= divisor_limb) - r = 0; - else - i--; - - for (; i >= 0; i--) + mp_limb_t inv; + invert_limb (inv, d); + for (i = un - 1; i >= 0; i--) { - n0 = dividend_ptr[i]; - udiv_qrnnd_preinv (dummy, r, r, - n0, divisor_limb, divisor_limb_inverted); + n0 = up[i] << GMP_NAIL_BITS; + udiv_qrnnd_preinv (dummy, r, r, n0, d, inv); + r >>= GMP_NAIL_BITS; } return r; } } else { - if (UDIV_NEEDS_NORMALIZATION) + int norm; + + /* Skip a division if high < divisor. Having the test here before + normalizing will still skip as often as possible. */ + r = up[un - 1] << GMP_NAIL_BITS; + if (r < d) { - int normalization_steps; + r >>= GMP_NAIL_BITS; + un--; + if (un == 0) + return r; + } + else + r = 0; - count_leading_zeros (normalization_steps, divisor_limb); - if (normalization_steps != 0) - { - divisor_limb <<= normalization_steps; + /* If udiv_qrnnd doesn't need a normalized divisor, can use the simple + code above. */ + if (! UDIV_NEEDS_NORMALIZATION + && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD)) + goto plain; - n1 = dividend_ptr[dividend_size - 1]; - r = n1 >> (BITS_PER_MP_LIMB - normalization_steps); + count_leading_zeros (norm, d); + d <<= norm; - /* Possible optimization: - if (r == 0 - && divisor_limb > ((n1 << normalization_steps) - | (dividend_ptr[dividend_size - 2] >> ...))) - ...one division less... */ + n1 = up[un - 1] << GMP_NAIL_BITS; + r = (r << norm) | (n1 >> (GMP_LIMB_BITS - norm)); - for (i = dividend_size - 2; i >= 0; i--) - { - n0 = dividend_ptr[i]; - udiv_qrnnd (dummy, r, r, - ((n1 << normalization_steps) - | (n0 >> (BITS_PER_MP_LIMB - normalization_steps))), - divisor_limb); - n1 = n0; - } + if (UDIV_NEEDS_NORMALIZATION + && BELOW_THRESHOLD (un, MOD_1_UNNORM_THRESHOLD)) + { + for (i = un - 2; i >= 0; i--) + { + n0 = up[i] << GMP_NAIL_BITS; udiv_qrnnd (dummy, r, r, - n1 << normalization_steps, - divisor_limb); - return r >> normalization_steps; + (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)), + d); + r >>= GMP_NAIL_BITS; + n1 = n0; } + udiv_qrnnd (dummy, r, r, n1 << norm, d); + r >>= GMP_NAIL_BITS; + return r >> norm; } - /* No normalization needed, either because udiv_qrnnd doesn't require - it, or because DIVISOR_LIMB is already normalized. */ - - i = dividend_size - 1; - r = dividend_ptr[i]; - - if (r >= divisor_limb) - r = 0; else - i--; - - for (; i >= 0; i--) { - n0 = dividend_ptr[i]; - udiv_qrnnd (dummy, r, r, n0, divisor_limb); + mp_limb_t inv; + invert_limb (inv, d); + + for (i = un - 2; i >= 0; i--) + { + n0 = up[i] << GMP_NAIL_BITS; + udiv_qrnnd_preinv (dummy, r, r, + (n1 << norm) | (n0 >> (GMP_NUMB_BITS - norm)), + d, inv); + r >>= GMP_NAIL_BITS; + n1 = n0; + } + udiv_qrnnd_preinv (dummy, r, r, n1 << norm, d, inv); + r >>= GMP_NAIL_BITS; + return r >> norm; } - return r; } }