[BACK]Return to dive_1.c CVS log [TXT][DIR] Up to [local] / OpenXM_contrib / gmp / mpn / generic

File: [local] / OpenXM_contrib / gmp / mpn / generic / Attic / dive_1.c (download)

Revision 1.1.1.1 (vendor branch), Mon Aug 25 16:06:20 2003 UTC (20 years, 10 months ago) by ohara
Branch: GMP
CVS Tags: VERSION_4_1_2, RELEASE_1_2_3, RELEASE_1_2_2_KNOPPIX_b, RELEASE_1_2_2_KNOPPIX
Changes since 1.1: +0 -0 lines

Import gmp 4.1.2

/* mpn_divexact_1 -- mpn by limb exact division.

   THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY.  THEY'RE ALMOST
   CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
   FUTURE GNU MP RELEASES.

Copyright 2000, 2001, 2002 Free Software Foundation, Inc.

This file is part of the GNU MP Library.

The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version.

The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
License for more details.

You should have received a copy of the GNU Lesser General Public License
along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA. */

#include "gmp.h"
#include "gmp-impl.h"
#include "longlong.h"



/* Divide a={src,size} by d=divisor and store the quotient in q={dst,size}.
   q will only be correct if d divides a exactly.

   A separate loop is used for shift==0 because n<<BITS_PER_MP_LIMB doesn't
   give zero on all CPUs (for instance it doesn't on the x86s).  This
   separate loop might run faster too, helping odd divisors.

   Possibilities:

   mpn_divexact_1c could be created, accepting and returning c.  This would
   let a long calculation be done piece by piece.  Currently there's no
   particular need for that, and not returning c means that a final umul can
   be skipped.

   Another use for returning c would be letting the caller know whether the
   division was in fact exact.  It would work just to return the carry bit
   "c=(l>s)" and let the caller do a final umul if interested.

   When the divisor is even, the factors of two could be handled with a
   separate mpn_rshift, instead of shifting on the fly.  That might be
   faster on some CPUs and would mean just the shift==0 style loop would be
   needed.

   If n<<BITS_PER_MP_LIMB gives zero on a particular CPU then the separate
   shift==0 loop is unnecessary, and could be eliminated if there's no great
   speed difference.

   It's not clear whether "/" is the best way to handle size==1.  Alpha gcc
   2.95 for instance has a poor "/" and might prefer the modular method.
   Perhaps a tuned parameter should control this.

   If src[size-1] < divisor then dst[size-1] will be zero, and one divide
   step could be skipped.  A test at last step for s<divisor (or ls in the
   even case) might be a good way to do that.  But if this code is often
   used with small divisors then it might not be worth bothering  */

void
mpn_divexact_1 (mp_ptr dst, mp_srcptr src, mp_size_t size, mp_limb_t divisor)
{
  mp_size_t  i;
  mp_limb_t  c, l, ls, s, s_next, inverse, dummy;
  unsigned   shift;

  ASSERT (size >= 1);
  ASSERT (divisor != 0);
  ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size));
  ASSERT_MPN (src, size);
  ASSERT_LIMB (divisor);

  if (size == 1)
    {
      dst[0] = src[0] / divisor;
      return;
    }

  if ((divisor & 1) == 0)
    {
      count_trailing_zeros (shift, divisor);
      divisor >>= shift;
    }
  else
    shift = 0;

  modlimb_invert (inverse, divisor);
  divisor <<= GMP_NAIL_BITS;

  if (shift != 0)
    {
      s = src[0];
      c = 0;
      i = 0;
      size--;
      goto even_entry;

      do
	{
	  umul_ppmm (l, dummy, l, divisor);
	  c += l;

	even_entry:
	  s_next = src[i+1];
	  ls = ((s >> shift) | (s_next << (GMP_NUMB_BITS-shift))) & GMP_NUMB_MASK;
	  s = s_next;

          SUBC_LIMB (c, l, ls, c);

	  l = (l * inverse) & GMP_NUMB_MASK;
	  dst[i] = l;
	  i++;
	}
      while (i < size);

      umul_ppmm (l, dummy, l, divisor);
      c += l;
      ls = s >> shift;
      l = ls - c;
      l = (l * inverse) & GMP_NUMB_MASK;
      dst[i] = l;
    }
  else
    {
      l = src[0];
      l = (l * inverse) & GMP_NUMB_MASK;
      dst[0] = l;
      i = 1;
      c = 0;

      do
	{
	  umul_ppmm (l, dummy, l, divisor);
	  c += l;

	  s = src[i];
          SUBC_LIMB (c, l, s, c);

          l = (l * inverse) & GMP_NUMB_MASK;
	  dst[i] = l;
	  i++;
	}
      while (i < size);
    }
}