root/lib/mini-gmp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gmp_die
  2. gmp_default_alloc
  3. gmp_default_realloc
  4. gmp_default_free
  5. mp_get_memory_functions
  6. mp_set_memory_functions
  7. gmp_alloc_limbs
  8. gmp_realloc_limbs
  9. gmp_free_limbs
  10. mpn_copyi
  11. mpn_copyd
  12. mpn_cmp
  13. mpn_cmp4
  14. mpn_normalized_size
  15. mpn_zero_p
  16. mpn_zero
  17. mpn_add_1
  18. mpn_add_n
  19. mpn_add
  20. mpn_sub_1
  21. mpn_sub_n
  22. mpn_sub
  23. mpn_mul_1
  24. mpn_addmul_1
  25. mpn_submul_1
  26. mpn_mul
  27. mpn_mul_n
  28. mpn_sqr
  29. mpn_lshift
  30. mpn_rshift
  31. mpn_common_scan
  32. mpn_scan1
  33. mpn_scan0
  34. mpn_com
  35. mpn_neg
  36. mpn_invert_3by2
  37. mpn_div_qr_1_invert
  38. mpn_div_qr_2_invert
  39. mpn_div_qr_invert
  40. mpn_div_qr_1_preinv
  41. mpn_div_qr_2_preinv
  42. mpn_div_qr_pi1
  43. mpn_div_qr_preinv
  44. mpn_div_qr
  45. mpn_base_power_of_two_p
  46. mpn_get_base_info
  47. mpn_limb_size_in_base_2
  48. mpn_get_str_bits
  49. mpn_limb_get_str
  50. mpn_get_str_other
  51. mpn_get_str
  52. mpn_set_str_bits
  53. mpn_set_str_other
  54. mpn_set_str
  55. mpz_init
  56. mpz_init2
  57. mpz_clear
  58. mpz_realloc
  59. mpz_set_si
  60. mpz_set_ui
  61. mpz_set
  62. mpz_init_set_si
  63. mpz_init_set_ui
  64. mpz_init_set
  65. mpz_fits_slong_p
  66. mpn_absfits_ulong_p
  67. mpz_fits_ulong_p
  68. mpz_fits_sint_p
  69. mpz_fits_uint_p
  70. mpz_fits_sshort_p
  71. mpz_fits_ushort_p
  72. mpz_get_si
  73. mpz_get_ui
  74. mpz_size
  75. mpz_getlimbn
  76. mpz_realloc2
  77. mpz_limbs_read
  78. mpz_limbs_modify
  79. mpz_limbs_write
  80. mpz_limbs_finish
  81. mpz_roinit_normal_n
  82. mpz_roinit_n
  83. mpz_set_d
  84. mpz_init_set_d
  85. mpz_get_d
  86. mpz_cmpabs_d
  87. mpz_cmp_d
  88. mpz_sgn
  89. mpz_cmp_si
  90. mpz_cmp_ui
  91. mpz_cmp
  92. mpz_cmpabs_ui
  93. mpz_cmpabs
  94. mpz_abs
  95. mpz_neg
  96. mpz_swap
  97. mpz_add_ui
  98. mpz_sub_ui
  99. mpz_ui_sub
  100. mpz_abs_add
  101. mpz_abs_sub
  102. mpz_add
  103. mpz_sub
  104. mpz_mul_si
  105. mpz_mul_ui
  106. mpz_mul
  107. mpz_mul_2exp
  108. mpz_addmul_ui
  109. mpz_submul_ui
  110. mpz_addmul
  111. mpz_submul
  112. mpz_div_qr
  113. mpz_cdiv_qr
  114. mpz_fdiv_qr
  115. mpz_tdiv_qr
  116. mpz_cdiv_q
  117. mpz_fdiv_q
  118. mpz_tdiv_q
  119. mpz_cdiv_r
  120. mpz_fdiv_r
  121. mpz_tdiv_r
  122. mpz_mod
  123. mpz_div_q_2exp
  124. mpz_div_r_2exp
  125. mpz_cdiv_q_2exp
  126. mpz_fdiv_q_2exp
  127. mpz_tdiv_q_2exp
  128. mpz_cdiv_r_2exp
  129. mpz_fdiv_r_2exp
  130. mpz_tdiv_r_2exp
  131. mpz_divexact
  132. mpz_divisible_p
  133. mpz_congruent_p
  134. mpz_div_qr_ui
  135. mpz_cdiv_qr_ui
  136. mpz_fdiv_qr_ui
  137. mpz_tdiv_qr_ui
  138. mpz_cdiv_q_ui
  139. mpz_fdiv_q_ui
  140. mpz_tdiv_q_ui
  141. mpz_cdiv_r_ui
  142. mpz_fdiv_r_ui
  143. mpz_tdiv_r_ui
  144. mpz_cdiv_ui
  145. mpz_fdiv_ui
  146. mpz_tdiv_ui
  147. mpz_mod_ui
  148. mpz_divexact_ui
  149. mpz_divisible_ui_p
  150. mpn_gcd_11
  151. mpz_gcd_ui
  152. mpz_make_odd
  153. mpz_gcd
  154. mpz_gcdext
  155. mpz_lcm
  156. mpz_lcm_ui
  157. mpz_invert
  158. mpz_pow_ui
  159. mpz_ui_pow_ui
  160. mpz_powm
  161. mpz_powm_ui
  162. mpz_rootrem
  163. mpz_root
  164. mpz_sqrtrem
  165. mpz_sqrt
  166. mpz_perfect_square_p
  167. mpn_perfect_square_p
  168. mpn_sqrtrem
  169. mpz_mfac_uiui
  170. mpz_2fac_ui
  171. mpz_fac_ui
  172. mpz_bin_uiui
  173. gmp_jacobi_coprime
  174. gmp_lucas_step_k_2k
  175. gmp_lucas_mod
  176. gmp_stronglucas
  177. gmp_millerrabin
  178. mpz_probab_prime_p
  179. mpz_tstbit
  180. mpz_abs_add_bit
  181. mpz_abs_sub_bit
  182. mpz_setbit
  183. mpz_clrbit
  184. mpz_combit
  185. mpz_com
  186. mpz_and
  187. mpz_ior
  188. mpz_xor
  189. gmp_popcount_limb
  190. mpn_popcount
  191. mpz_popcount
  192. mpz_hamdist
  193. mpz_scan1
  194. mpz_scan0
  195. mpz_sizeinbase
  196. mpz_get_str
  197. mpz_set_str
  198. mpz_init_set_str
  199. mpz_out_str
  200. gmp_detect_endian
  201. mpz_import
  202. mpz_export

     1 /* mini-gmp, a minimalistic implementation of a GNU GMP subset.
     2 
     3    Contributed to the GNU project by Niels Möller
     4    Additional functionalities and improvements by Marco Bodrato.
     5 
     6 Copyright 1991-1997, 1999-2022 Free Software Foundation, Inc.
     7 
     8 This file is part of the GNU MP Library.
     9 
    10 The GNU MP Library is free software; you can redistribute it and/or modify
    11 it under the terms of either:
    12 
    13   * the GNU Lesser General Public License as published by the Free
    14     Software Foundation; either version 3 of the License, or (at your
    15     option) any later version.
    16 
    17 or
    18 
    19   * the GNU General Public License as published by the Free Software
    20     Foundation; either version 2 of the License, or (at your option) any
    21     later version.
    22 
    23 or both in parallel, as here.
    24 
    25 The GNU MP Library is distributed in the hope that it will be useful, but
    26 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
    27 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    28 for more details.
    29 
    30 You should have received copies of the GNU General Public License and the
    31 GNU Lesser General Public License along with the GNU MP Library.  If not,
    32 see https://www.gnu.org/licenses/.  */
    33 
    34 /* NOTE: All functions in this file which are not declared in
    35    mini-gmp.h are internal, and are not intended to be compatible
    36    with GMP or with future versions of mini-gmp. */
    37 
    38 /* Much of the material copied from GMP files, including: gmp-impl.h,
    39    longlong.h, mpn/generic/add_n.c, mpn/generic/addmul_1.c,
    40    mpn/generic/lshift.c, mpn/generic/mul_1.c,
    41    mpn/generic/mul_basecase.c, mpn/generic/rshift.c,
    42    mpn/generic/sbpi1_div_qr.c, mpn/generic/sub_n.c,
    43    mpn/generic/submul_1.c. */
    44 
    45 #include <assert.h>
    46 #include <ctype.h>
    47 #include <limits.h>
    48 #include <stdio.h>
    49 #include <stdlib.h>
    50 #include <string.h>
    51 
    52 #include "mini-gmp.h"
    53 
    54 #if !defined(MINI_GMP_DONT_USE_FLOAT_H)
    55 #include <float.h>
    56 #endif
    57 
    58 
    59 /* Macros */
    60 #define GMP_LIMB_BITS (sizeof(mp_limb_t) * CHAR_BIT)
    61 
    62 #define GMP_LIMB_MAX ((mp_limb_t) ~ (mp_limb_t) 0)
    63 #define GMP_LIMB_HIGHBIT ((mp_limb_t) 1 << (GMP_LIMB_BITS - 1))
    64 
    65 #define GMP_HLIMB_BIT ((mp_limb_t) 1 << (GMP_LIMB_BITS / 2))
    66 #define GMP_LLIMB_MASK (GMP_HLIMB_BIT - 1)
    67 
    68 #define GMP_ULONG_BITS (sizeof(unsigned long) * CHAR_BIT)
    69 #define GMP_ULONG_HIGHBIT ((unsigned long) 1 << (GMP_ULONG_BITS - 1))
    70 
    71 #define GMP_ABS(x) ((x) >= 0 ? (x) : -(x))
    72 #define GMP_NEG_CAST(T,x) (-((T)((x) + 1) - 1))
    73 
    74 #define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
    75 #define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
    76 
    77 #define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
    78 
    79 #if defined(DBL_MANT_DIG) && FLT_RADIX == 2
    80 #define GMP_DBL_MANT_BITS DBL_MANT_DIG
    81 #else
    82 #define GMP_DBL_MANT_BITS (53)
    83 #endif
    84 
    85 /* Return non-zero if xp,xsize and yp,ysize overlap.
    86    If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
    87    overlap.  If both these are false, there's an overlap. */
    88 #define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize)                         \
    89   ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
    90 
    91 #define gmp_assert_nocarry(x) do { \
    92     mp_limb_t __cy = (x);          \
    93     assert (__cy == 0);            \
    94     (void) (__cy);                 \
    95   } while (0)
    96 
    97 #define gmp_clz(count, x) do {                                          \
    98     mp_limb_t __clz_x = (x);                                            \
    99     unsigned __clz_c = 0;                                               \
   100     int LOCAL_SHIFT_BITS = 8;                                           \
   101     if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)                               \
   102       for (;                                                            \
   103            (__clz_x & ((mp_limb_t) 0xff << (GMP_LIMB_BITS - 8))) == 0;  \
   104            __clz_c += 8)                                                \
   105         { __clz_x <<= LOCAL_SHIFT_BITS; }                               \
   106     for (; (__clz_x & GMP_LIMB_HIGHBIT) == 0; __clz_c++)                \
   107       __clz_x <<= 1;                                                    \
   108     (count) = __clz_c;                                                  \
   109   } while (0)
   110 
   111 #define gmp_ctz(count, x) do {                                          \
   112     mp_limb_t __ctz_x = (x);                                            \
   113     unsigned __ctz_c = 0;                                               \
   114     gmp_clz (__ctz_c, __ctz_x & - __ctz_x);                             \
   115     (count) = GMP_LIMB_BITS - 1 - __ctz_c;                              \
   116   } while (0)
   117 
   118 #define gmp_add_ssaaaa(sh, sl, ah, al, bh, bl) \
   119   do {                                                                  \
   120     mp_limb_t __x;                                                      \
   121     __x = (al) + (bl);                                                  \
   122     (sh) = (ah) + (bh) + (__x < (al));                                  \
   123     (sl) = __x;                                                         \
   124   } while (0)
   125 
   126 #define gmp_sub_ddmmss(sh, sl, ah, al, bh, bl) \
   127   do {                                                                  \
   128     mp_limb_t __x;                                                      \
   129     __x = (al) - (bl);                                                  \
   130     (sh) = (ah) - (bh) - ((al) < (bl));                                 \
   131     (sl) = __x;                                                         \
   132   } while (0)
   133 
   134 #define gmp_umul_ppmm(w1, w0, u, v)                                     \
   135   do {                                                                  \
   136     int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;                            \
   137     if (sizeof(unsigned int) * CHAR_BIT >= 2 * GMP_LIMB_BITS)           \
   138       {                                                                 \
   139         unsigned int __ww = (unsigned int) (u) * (v);                   \
   140         w0 = (mp_limb_t) __ww;                                          \
   141         w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);                 \
   142       }                                                                 \
   143     else if (GMP_ULONG_BITS >= 2 * GMP_LIMB_BITS)                       \
   144       {                                                                 \
   145         unsigned long int __ww = (unsigned long int) (u) * (v);         \
   146         w0 = (mp_limb_t) __ww;                                          \
   147         w1 = (mp_limb_t) (__ww >> LOCAL_GMP_LIMB_BITS);                 \
   148       }                                                                 \
   149     else {                                                              \
   150       mp_limb_t __x0, __x1, __x2, __x3;                                 \
   151       unsigned __ul, __vl, __uh, __vh;                                  \
   152       mp_limb_t __u = (u), __v = (v);                                   \
   153       assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t));             \
   154                                                                         \
   155       __ul = __u & GMP_LLIMB_MASK;                                      \
   156       __uh = __u >> (GMP_LIMB_BITS / 2);                                \
   157       __vl = __v & GMP_LLIMB_MASK;                                      \
   158       __vh = __v >> (GMP_LIMB_BITS / 2);                                \
   159                                                                         \
   160       __x0 = (mp_limb_t) __ul * __vl;                                   \
   161       __x1 = (mp_limb_t) __ul * __vh;                                   \
   162       __x2 = (mp_limb_t) __uh * __vl;                                   \
   163       __x3 = (mp_limb_t) __uh * __vh;                                   \
   164                                                                         \
   165       __x1 += __x0 >> (GMP_LIMB_BITS / 2);/* this can't give carry */   \
   166       __x1 += __x2;             /* but this indeed can */               \
   167       if (__x1 < __x2)          /* did we get it? */                    \
   168         __x3 += GMP_HLIMB_BIT;  /* yes, add it in the proper pos. */    \
   169                                                                         \
   170       (w1) = __x3 + (__x1 >> (GMP_LIMB_BITS / 2));                      \
   171       (w0) = (__x1 << (GMP_LIMB_BITS / 2)) + (__x0 & GMP_LLIMB_MASK);   \
   172     }                                                                   \
   173   } while (0)
   174 
   175 /* If mp_limb_t is of size smaller than int, plain u*v implies
   176    automatic promotion to *signed* int, and then multiply may overflow
   177    and cause undefined behavior. Explicitly cast to unsigned int for
   178    that case. */
   179 #define gmp_umullo_limb(u, v) \
   180   ((sizeof(mp_limb_t) >= sizeof(int)) ? (u)*(v) : (unsigned int)(u) * (v))
   181 
   182 #define gmp_udiv_qrnnd_preinv(q, r, nh, nl, d, di)                      \
   183   do {                                                                  \
   184     mp_limb_t _qh, _ql, _r, _mask;                                      \
   185     gmp_umul_ppmm (_qh, _ql, (nh), (di));                               \
   186     gmp_add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));                \
   187     _r = (nl) - gmp_umullo_limb (_qh, (d));                             \
   188     _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */         \
   189     _qh += _mask;                                                       \
   190     _r += _mask & (d);                                                  \
   191     if (_r >= (d))                                                      \
   192       {                                                                 \
   193         _r -= (d);                                                      \
   194         _qh++;                                                          \
   195       }                                                                 \
   196                                                                         \
   197     (r) = _r;                                                           \
   198     (q) = _qh;                                                          \
   199   } while (0)
   200 
   201 #define gmp_udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)           \
   202   do {                                                                  \
   203     mp_limb_t _q0, _t1, _t0, _mask;                                     \
   204     gmp_umul_ppmm ((q), _q0, (n2), (dinv));                             \
   205     gmp_add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));                    \
   206                                                                         \
   207     /* Compute the two most significant limbs of n - q'd */             \
   208     (r1) = (n1) - gmp_umullo_limb ((d1), (q));                          \
   209     gmp_sub_ddmmss ((r1), (r0), (r1), (n0), (d1), (d0));                \
   210     gmp_umul_ppmm (_t1, _t0, (d0), (q));                                \
   211     gmp_sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);                  \
   212     (q)++;                                                              \
   213                                                                         \
   214     /* Conditionally adjust q and the remainders */                     \
   215     _mask = - (mp_limb_t) ((r1) >= _q0);                                \
   216     (q) += _mask;                                                       \
   217     gmp_add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0)); \
   218     if ((r1) >= (d1))                                                   \
   219       {                                                                 \
   220         if ((r1) > (d1) || (r0) >= (d0))                                \
   221           {                                                             \
   222             (q)++;                                                      \
   223             gmp_sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));        \
   224           }                                                             \
   225       }                                                                 \
   226   } while (0)
   227 
   228 /* Swap macros. */
   229 #define MP_LIMB_T_SWAP(x, y)                                            \
   230   do {                                                                  \
   231     mp_limb_t __mp_limb_t_swap__tmp = (x);                              \
   232     (x) = (y);                                                          \
   233     (y) = __mp_limb_t_swap__tmp;                                        \
   234   } while (0)
   235 #define MP_SIZE_T_SWAP(x, y)                                            \
   236   do {                                                                  \
   237     mp_size_t __mp_size_t_swap__tmp = (x);                              \
   238     (x) = (y);                                                          \
   239     (y) = __mp_size_t_swap__tmp;                                        \
   240   } while (0)
   241 #define MP_BITCNT_T_SWAP(x,y)                   \
   242   do {                                          \
   243     mp_bitcnt_t __mp_bitcnt_t_swap__tmp = (x);  \
   244     (x) = (y);                                  \
   245     (y) = __mp_bitcnt_t_swap__tmp;              \
   246   } while (0)
   247 #define MP_PTR_SWAP(x, y)                                               \
   248   do {                                                                  \
   249     mp_ptr __mp_ptr_swap__tmp = (x);                                    \
   250     (x) = (y);                                                          \
   251     (y) = __mp_ptr_swap__tmp;                                           \
   252   } while (0)
   253 #define MP_SRCPTR_SWAP(x, y)                                            \
   254   do {                                                                  \
   255     mp_srcptr __mp_srcptr_swap__tmp = (x);                              \
   256     (x) = (y);                                                          \
   257     (y) = __mp_srcptr_swap__tmp;                                        \
   258   } while (0)
   259 
   260 #define MPN_PTR_SWAP(xp,xs, yp,ys)                                      \
   261   do {                                                                  \
   262     MP_PTR_SWAP (xp, yp);                                               \
   263     MP_SIZE_T_SWAP (xs, ys);                                            \
   264   } while(0)
   265 #define MPN_SRCPTR_SWAP(xp,xs, yp,ys)                                   \
   266   do {                                                                  \
   267     MP_SRCPTR_SWAP (xp, yp);                                            \
   268     MP_SIZE_T_SWAP (xs, ys);                                            \
   269   } while(0)
   270 
   271 #define MPZ_PTR_SWAP(x, y)                                              \
   272   do {                                                                  \
   273     mpz_ptr __mpz_ptr_swap__tmp = (x);                                  \
   274     (x) = (y);                                                          \
   275     (y) = __mpz_ptr_swap__tmp;                                          \
   276   } while (0)
   277 #define MPZ_SRCPTR_SWAP(x, y)                                           \
   278   do {                                                                  \
   279     mpz_srcptr __mpz_srcptr_swap__tmp = (x);                            \
   280     (x) = (y);                                                          \
   281     (y) = __mpz_srcptr_swap__tmp;                                       \
   282   } while (0)
   283 
   284 const int mp_bits_per_limb = GMP_LIMB_BITS;
   285 
   286 
   287 /* Memory allocation and other helper functions. */
   288 static void
   289 gmp_die (const char *msg)
   290 {
   291   fprintf (stderr, "%s\n", msg);
   292   abort();
   293 }
   294 
   295 static void *
   296 gmp_default_alloc (size_t size)
   297 {
   298   void *p;
   299 
   300   assert (size > 0);
   301 
   302   p = malloc (size);
   303   if (!p)
   304     gmp_die("gmp_default_alloc: Virtual memory exhausted.");
   305 
   306   return p;
   307 }
   308 
   309 static void *
   310 gmp_default_realloc (void *old, size_t unused_old_size, size_t new_size)
   311 {
   312   void * p;
   313 
   314   p = realloc (old, new_size);
   315 
   316   if (!p)
   317     gmp_die("gmp_default_realloc: Virtual memory exhausted.");
   318 
   319   return p;
   320 }
   321 
   322 static void
   323 gmp_default_free (void *p, size_t unused_size)
   324 {
   325   free (p);
   326 }
   327 
   328 static void * (*gmp_allocate_func) (size_t) = gmp_default_alloc;
   329 static void * (*gmp_reallocate_func) (void *, size_t, size_t) = gmp_default_realloc;
   330 static void (*gmp_free_func) (void *, size_t) = gmp_default_free;
   331 
   332 void
   333 mp_get_memory_functions (void *(**alloc_func) (size_t),
   334                          void *(**realloc_func) (void *, size_t, size_t),
   335                          void (**free_func) (void *, size_t))
   336 {
   337   if (alloc_func)
   338     *alloc_func = gmp_allocate_func;
   339 
   340   if (realloc_func)
   341     *realloc_func = gmp_reallocate_func;
   342 
   343   if (free_func)
   344     *free_func = gmp_free_func;
   345 }
   346 
   347 void
   348 mp_set_memory_functions (void *(*alloc_func) (size_t),
   349                          void *(*realloc_func) (void *, size_t, size_t),
   350                          void (*free_func) (void *, size_t))
   351 {
   352   if (!alloc_func)
   353     alloc_func = gmp_default_alloc;
   354   if (!realloc_func)
   355     realloc_func = gmp_default_realloc;
   356   if (!free_func)
   357     free_func = gmp_default_free;
   358 
   359   gmp_allocate_func = alloc_func;
   360   gmp_reallocate_func = realloc_func;
   361   gmp_free_func = free_func;
   362 }
   363 
   364 #define gmp_alloc(size) ((*gmp_allocate_func)((size)))
   365 #define gmp_free(p, size) ((*gmp_free_func) ((p), (size)))
   366 #define gmp_realloc(ptr, old_size, size) ((*gmp_reallocate_func)(ptr, old_size, size))
   367 
   368 static mp_ptr
   369 gmp_alloc_limbs (mp_size_t size)
   370 {
   371   return (mp_ptr) gmp_alloc (size * sizeof (mp_limb_t));
   372 }
   373 
   374 static mp_ptr
   375 gmp_realloc_limbs (mp_ptr old, mp_size_t old_size, mp_size_t size)
   376 {
   377   assert (size > 0);
   378   return (mp_ptr) gmp_realloc (old, old_size * sizeof (mp_limb_t), size * sizeof (mp_limb_t));
   379 }
   380 
   381 static void
   382 gmp_free_limbs (mp_ptr old, mp_size_t size)
   383 {
   384   gmp_free (old, size * sizeof (mp_limb_t));
   385 }
   386 
   387 
   388 /* MPN interface */
   389 
   390 void
   391 mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
   392 {
   393   mp_size_t i;
   394   for (i = 0; i < n; i++)
   395     d[i] = s[i];
   396 }
   397 
   398 void
   399 mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
   400 {
   401   while (--n >= 0)
   402     d[n] = s[n];
   403 }
   404 
   405 int
   406 mpn_cmp (mp_srcptr ap, mp_srcptr bp, mp_size_t n)
   407 {
   408   while (--n >= 0)
   409     {
   410       if (ap[n] != bp[n])
   411         return ap[n] > bp[n] ? 1 : -1;
   412     }
   413   return 0;
   414 }
   415 
   416 static int
   417 mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
   418 {
   419   if (an != bn)
   420     return an < bn ? -1 : 1;
   421   else
   422     return mpn_cmp (ap, bp, an);
   423 }
   424 
   425 static mp_size_t
   426 mpn_normalized_size (mp_srcptr xp, mp_size_t n)
   427 {
   428   while (n > 0 && xp[n-1] == 0)
   429     --n;
   430   return n;
   431 }
   432 
   433 int
   434 mpn_zero_p(mp_srcptr rp, mp_size_t n)
   435 {
   436   return mpn_normalized_size (rp, n) == 0;
   437 }
   438 
   439 void
   440 mpn_zero (mp_ptr rp, mp_size_t n)
   441 {
   442   while (--n >= 0)
   443     rp[n] = 0;
   444 }
   445 
   446 mp_limb_t
   447 mpn_add_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
   448 {
   449   mp_size_t i;
   450 
   451   assert (n > 0);
   452   i = 0;
   453   do
   454     {
   455       mp_limb_t r = ap[i] + b;
   456       /* Carry out */
   457       b = (r < b);
   458       rp[i] = r;
   459     }
   460   while (++i < n);
   461 
   462   return b;
   463 }
   464 
   465 mp_limb_t
   466 mpn_add_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
   467 {
   468   mp_size_t i;
   469   mp_limb_t cy;
   470 
   471   for (i = 0, cy = 0; i < n; i++)
   472     {
   473       mp_limb_t a, b, r;
   474       a = ap[i]; b = bp[i];
   475       r = a + cy;
   476       cy = (r < cy);
   477       r += b;
   478       cy += (r < b);
   479       rp[i] = r;
   480     }
   481   return cy;
   482 }
   483 
   484 mp_limb_t
   485 mpn_add (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
   486 {
   487   mp_limb_t cy;
   488 
   489   assert (an >= bn);
   490 
   491   cy = mpn_add_n (rp, ap, bp, bn);
   492   if (an > bn)
   493     cy = mpn_add_1 (rp + bn, ap + bn, an - bn, cy);
   494   return cy;
   495 }
   496 
   497 mp_limb_t
   498 mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
   499 {
   500   mp_size_t i;
   501 
   502   assert (n > 0);
   503 
   504   i = 0;
   505   do
   506     {
   507       mp_limb_t a = ap[i];
   508       /* Carry out */
   509       mp_limb_t cy = a < b;
   510       rp[i] = a - b;
   511       b = cy;
   512     }
   513   while (++i < n);
   514 
   515   return b;
   516 }
   517 
   518 mp_limb_t
   519 mpn_sub_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
   520 {
   521   mp_size_t i;
   522   mp_limb_t cy;
   523 
   524   for (i = 0, cy = 0; i < n; i++)
   525     {
   526       mp_limb_t a, b;
   527       a = ap[i]; b = bp[i];
   528       b += cy;
   529       cy = (b < cy);
   530       cy += (a < b);
   531       rp[i] = a - b;
   532     }
   533   return cy;
   534 }
   535 
   536 mp_limb_t
   537 mpn_sub (mp_ptr rp, mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
   538 {
   539   mp_limb_t cy;
   540 
   541   assert (an >= bn);
   542 
   543   cy = mpn_sub_n (rp, ap, bp, bn);
   544   if (an > bn)
   545     cy = mpn_sub_1 (rp + bn, ap + bn, an - bn, cy);
   546   return cy;
   547 }
   548 
   549 mp_limb_t
   550 mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
   551 {
   552   mp_limb_t ul, cl, hpl, lpl;
   553 
   554   assert (n >= 1);
   555 
   556   cl = 0;
   557   do
   558     {
   559       ul = *up++;
   560       gmp_umul_ppmm (hpl, lpl, ul, vl);
   561 
   562       lpl += cl;
   563       cl = (lpl < cl) + hpl;
   564 
   565       *rp++ = lpl;
   566     }
   567   while (--n != 0);
   568 
   569   return cl;
   570 }
   571 
   572 mp_limb_t
   573 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
   574 {
   575   mp_limb_t ul, cl, hpl, lpl, rl;
   576 
   577   assert (n >= 1);
   578 
   579   cl = 0;
   580   do
   581     {
   582       ul = *up++;
   583       gmp_umul_ppmm (hpl, lpl, ul, vl);
   584 
   585       lpl += cl;
   586       cl = (lpl < cl) + hpl;
   587 
   588       rl = *rp;
   589       lpl = rl + lpl;
   590       cl += lpl < rl;
   591       *rp++ = lpl;
   592     }
   593   while (--n != 0);
   594 
   595   return cl;
   596 }
   597 
   598 mp_limb_t
   599 mpn_submul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
   600 {
   601   mp_limb_t ul, cl, hpl, lpl, rl;
   602 
   603   assert (n >= 1);
   604 
   605   cl = 0;
   606   do
   607     {
   608       ul = *up++;
   609       gmp_umul_ppmm (hpl, lpl, ul, vl);
   610 
   611       lpl += cl;
   612       cl = (lpl < cl) + hpl;
   613 
   614       rl = *rp;
   615       lpl = rl - lpl;
   616       cl += lpl > rl;
   617       *rp++ = lpl;
   618     }
   619   while (--n != 0);
   620 
   621   return cl;
   622 }
   623 
   624 mp_limb_t
   625 mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
   626 {
   627   assert (un >= vn);
   628   assert (vn >= 1);
   629   assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un));
   630   assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn));
   631 
   632   /* We first multiply by the low order limb. This result can be
   633      stored, not added, to rp. We also avoid a loop for zeroing this
   634      way. */
   635 
   636   rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
   637 
   638   /* Now accumulate the product of up[] and the next higher limb from
   639      vp[]. */
   640 
   641   while (--vn >= 1)
   642     {
   643       rp += 1, vp += 1;
   644       rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
   645     }
   646   return rp[un];
   647 }
   648 
   649 void
   650 mpn_mul_n (mp_ptr rp, mp_srcptr ap, mp_srcptr bp, mp_size_t n)
   651 {
   652   mpn_mul (rp, ap, n, bp, n);
   653 }
   654 
   655 void
   656 mpn_sqr (mp_ptr rp, mp_srcptr ap, mp_size_t n)
   657 {
   658   mpn_mul (rp, ap, n, ap, n);
   659 }
   660 
   661 mp_limb_t
   662 mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
   663 {
   664   mp_limb_t high_limb, low_limb;
   665   unsigned int tnc;
   666   mp_limb_t retval;
   667 
   668   assert (n >= 1);
   669   assert (cnt >= 1);
   670   assert (cnt < GMP_LIMB_BITS);
   671 
   672   up += n;
   673   rp += n;
   674 
   675   tnc = GMP_LIMB_BITS - cnt;
   676   low_limb = *--up;
   677   retval = low_limb >> tnc;
   678   high_limb = (low_limb << cnt);
   679 
   680   while (--n != 0)
   681     {
   682       low_limb = *--up;
   683       *--rp = high_limb | (low_limb >> tnc);
   684       high_limb = (low_limb << cnt);
   685     }
   686   *--rp = high_limb;
   687 
   688   return retval;
   689 }
   690 
   691 mp_limb_t
   692 mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
   693 {
   694   mp_limb_t high_limb, low_limb;
   695   unsigned int tnc;
   696   mp_limb_t retval;
   697 
   698   assert (n >= 1);
   699   assert (cnt >= 1);
   700   assert (cnt < GMP_LIMB_BITS);
   701 
   702   tnc = GMP_LIMB_BITS - cnt;
   703   high_limb = *up++;
   704   retval = (high_limb << tnc);
   705   low_limb = high_limb >> cnt;
   706 
   707   while (--n != 0)
   708     {
   709       high_limb = *up++;
   710       *rp++ = low_limb | (high_limb << tnc);
   711       low_limb = high_limb >> cnt;
   712     }
   713   *rp = low_limb;
   714 
   715   return retval;
   716 }
   717 
   718 static mp_bitcnt_t
   719 mpn_common_scan (mp_limb_t limb, mp_size_t i, mp_srcptr up, mp_size_t un,
   720                  mp_limb_t ux)
   721 {
   722   unsigned cnt;
   723 
   724   assert (ux == 0 || ux == GMP_LIMB_MAX);
   725   assert (0 <= i && i <= un );
   726 
   727   while (limb == 0)
   728     {
   729       i++;
   730       if (i == un)
   731         return (ux == 0 ? ~(mp_bitcnt_t) 0 : un * GMP_LIMB_BITS);
   732       limb = ux ^ up[i];
   733     }
   734   gmp_ctz (cnt, limb);
   735   return (mp_bitcnt_t) i * GMP_LIMB_BITS + cnt;
   736 }
   737 
   738 mp_bitcnt_t
   739 mpn_scan1 (mp_srcptr ptr, mp_bitcnt_t bit)
   740 {
   741   mp_size_t i;
   742   i = bit / GMP_LIMB_BITS;
   743 
   744   return mpn_common_scan ( ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
   745                           i, ptr, i, 0);
   746 }
   747 
   748 mp_bitcnt_t
   749 mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
   750 {
   751   mp_size_t i;
   752   i = bit / GMP_LIMB_BITS;
   753 
   754   return mpn_common_scan (~ptr[i] & (GMP_LIMB_MAX << (bit % GMP_LIMB_BITS)),
   755                           i, ptr, i, GMP_LIMB_MAX);
   756 }
   757 
   758 void
   759 mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n)
   760 {
   761   while (--n >= 0)
   762     *rp++ = ~ *up++;
   763 }
   764 
   765 mp_limb_t
   766 mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n)
   767 {
   768   while (*up == 0)
   769     {
   770       *rp = 0;
   771       if (!--n)
   772         return 0;
   773       ++up; ++rp;
   774     }
   775   *rp = - *up;
   776   mpn_com (++rp, ++up, --n);
   777   return 1;
   778 }
   779 
   780 
   781 /* MPN division interface. */
   782 
   783 /* The 3/2 inverse is defined as
   784 
   785      m = floor( (B^3-1) / (B u1 + u0)) - B
   786 */
   787 mp_limb_t
   788 mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
   789 {
   790   mp_limb_t r, m;
   791 
   792   {
   793     mp_limb_t p, ql;
   794     unsigned ul, uh, qh;
   795 
   796     assert (sizeof (unsigned) * 2 >= sizeof (mp_limb_t));
   797     /* For notation, let b denote the half-limb base, so that B = b^2.
   798        Split u1 = b uh + ul. */
   799     ul = u1 & GMP_LLIMB_MASK;
   800     uh = u1 >> (GMP_LIMB_BITS / 2);
   801 
   802     /* Approximation of the high half of quotient. Differs from the 2/1
   803        inverse of the half limb uh, since we have already subtracted
   804        u0. */
   805     qh = (u1 ^ GMP_LIMB_MAX) / uh;
   806 
   807     /* Adjust to get a half-limb 3/2 inverse, i.e., we want
   808 
   809        qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
   810            = floor( (b (~u) + b-1) / u),
   811 
   812        and the remainder
   813 
   814        r = b (~u) + b-1 - qh (b uh + ul)
   815        = b (~u - qh uh) + b-1 - qh ul
   816 
   817        Subtraction of qh ul may underflow, which implies adjustments.
   818        But by normalization, 2 u >= B > qh ul, so we need to adjust by
   819        at most 2.
   820     */
   821 
   822     r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
   823 
   824     p = (mp_limb_t) qh * ul;
   825     /* Adjustment steps taken from udiv_qrnnd_c */
   826     if (r < p)
   827       {
   828         qh--;
   829         r += u1;
   830         if (r >= u1) /* i.e. we didn't get carry when adding to r */
   831           if (r < p)
   832             {
   833               qh--;
   834               r += u1;
   835             }
   836       }
   837     r -= p;
   838 
   839     /* Low half of the quotient is
   840 
   841        ql = floor ( (b r + b-1) / u1).
   842 
   843        This is a 3/2 division (on half-limbs), for which qh is a
   844        suitable inverse. */
   845 
   846     p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
   847     /* Unlike full-limb 3/2, we can add 1 without overflow. For this to
   848        work, it is essential that ql is a full mp_limb_t. */
   849     ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
   850 
   851     /* By the 3/2 trick, we don't need the high half limb. */
   852     r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
   853 
   854     if (r >= (GMP_LIMB_MAX & (p << (GMP_LIMB_BITS / 2))))
   855       {
   856         ql--;
   857         r += u1;
   858       }
   859     m = ((mp_limb_t) qh << (GMP_LIMB_BITS / 2)) + ql;
   860     if (r >= u1)
   861       {
   862         m++;
   863         r -= u1;
   864       }
   865   }
   866 
   867   /* Now m is the 2/1 inverse of u1. If u0 > 0, adjust it to become a
   868      3/2 inverse. */
   869   if (u0 > 0)
   870     {
   871       mp_limb_t th, tl;
   872       r = ~r;
   873       r += u0;
   874       if (r < u0)
   875         {
   876           m--;
   877           if (r >= u1)
   878             {
   879               m--;
   880               r -= u1;
   881             }
   882           r -= u1;
   883         }
   884       gmp_umul_ppmm (th, tl, u0, m);
   885       r += th;
   886       if (r < th)
   887         {
   888           m--;
   889           m -= ((r > u1) | ((r == u1) & (tl > u0)));
   890         }
   891     }
   892 
   893   return m;
   894 }
   895 
   896 struct gmp_div_inverse
   897 {
   898   /* Normalization shift count. */
   899   unsigned shift;
   900   /* Normalized divisor (d0 unused for mpn_div_qr_1) */
   901   mp_limb_t d1, d0;
   902   /* Inverse, for 2/1 or 3/2. */
   903   mp_limb_t di;
   904 };
   905 
   906 static void
   907 mpn_div_qr_1_invert (struct gmp_div_inverse *inv, mp_limb_t d)
   908 {
   909   unsigned shift;
   910 
   911   assert (d > 0);
   912   gmp_clz (shift, d);
   913   inv->shift = shift;
   914   inv->d1 = d << shift;
   915   inv->di = mpn_invert_limb (inv->d1);
   916 }
   917 
   918 static void
   919 mpn_div_qr_2_invert (struct gmp_div_inverse *inv,
   920                      mp_limb_t d1, mp_limb_t d0)
   921 {
   922   unsigned shift;
   923 
   924   assert (d1 > 0);
   925   gmp_clz (shift, d1);
   926   inv->shift = shift;
   927   if (shift > 0)
   928     {
   929       d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
   930       d0 <<= shift;
   931     }
   932   inv->d1 = d1;
   933   inv->d0 = d0;
   934   inv->di = mpn_invert_3by2 (d1, d0);
   935 }
   936 
   937 static void
   938 mpn_div_qr_invert (struct gmp_div_inverse *inv,
   939                    mp_srcptr dp, mp_size_t dn)
   940 {
   941   assert (dn > 0);
   942 
   943   if (dn == 1)
   944     mpn_div_qr_1_invert (inv, dp[0]);
   945   else if (dn == 2)
   946     mpn_div_qr_2_invert (inv, dp[1], dp[0]);
   947   else
   948     {
   949       unsigned shift;
   950       mp_limb_t d1, d0;
   951 
   952       d1 = dp[dn-1];
   953       d0 = dp[dn-2];
   954       assert (d1 > 0);
   955       gmp_clz (shift, d1);
   956       inv->shift = shift;
   957       if (shift > 0)
   958         {
   959           d1 = (d1 << shift) | (d0 >> (GMP_LIMB_BITS - shift));
   960           d0 = (d0 << shift) | (dp[dn-3] >> (GMP_LIMB_BITS - shift));
   961         }
   962       inv->d1 = d1;
   963       inv->d0 = d0;
   964       inv->di = mpn_invert_3by2 (d1, d0);
   965     }
   966 }
   967 
   968 /* Not matching current public gmp interface, rather corresponding to
   969    the sbpi1_div_* functions. */
   970 static mp_limb_t
   971 mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
   972                      const struct gmp_div_inverse *inv)
   973 {
   974   mp_limb_t d, di;
   975   mp_limb_t r;
   976   mp_ptr tp = NULL;
   977   mp_size_t tn = 0;
   978 
   979   if (inv->shift > 0)
   980     {
   981       /* Shift, reusing qp area if possible. In-place shift if qp == np. */
   982       tp = qp;
   983       if (!tp)
   984         {
   985            tn = nn;
   986            tp = gmp_alloc_limbs (tn);
   987         }
   988       r = mpn_lshift (tp, np, nn, inv->shift);
   989       np = tp;
   990     }
   991   else
   992     r = 0;
   993 
   994   d = inv->d1;
   995   di = inv->di;
   996   while (--nn >= 0)
   997     {
   998       mp_limb_t q;
   999 
  1000       gmp_udiv_qrnnd_preinv (q, r, r, np[nn], d, di);
  1001       if (qp)
  1002         qp[nn] = q;
  1003     }
  1004   if (tn)
  1005     gmp_free_limbs (tp, tn);
  1006 
  1007   return r >> inv->shift;
  1008 }
  1009 
  1010 static void
  1011 mpn_div_qr_2_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
  1012                      const struct gmp_div_inverse *inv)
  1013 {
  1014   unsigned shift;
  1015   mp_size_t i;
  1016   mp_limb_t d1, d0, di, r1, r0;
  1017 
  1018   assert (nn >= 2);
  1019   shift = inv->shift;
  1020   d1 = inv->d1;
  1021   d0 = inv->d0;
  1022   di = inv->di;
  1023 
  1024   if (shift > 0)
  1025     r1 = mpn_lshift (np, np, nn, shift);
  1026   else
  1027     r1 = 0;
  1028 
  1029   r0 = np[nn - 1];
  1030 
  1031   i = nn - 2;
  1032   do
  1033     {
  1034       mp_limb_t n0, q;
  1035       n0 = np[i];
  1036       gmp_udiv_qr_3by2 (q, r1, r0, r1, r0, n0, d1, d0, di);
  1037 
  1038       if (qp)
  1039         qp[i] = q;
  1040     }
  1041   while (--i >= 0);
  1042 
  1043   if (shift > 0)
  1044     {
  1045       assert ((r0 & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - shift))) == 0);
  1046       r0 = (r0 >> shift) | (r1 << (GMP_LIMB_BITS - shift));
  1047       r1 >>= shift;
  1048     }
  1049 
  1050   np[1] = r1;
  1051   np[0] = r0;
  1052 }
  1053 
  1054 static void
  1055 mpn_div_qr_pi1 (mp_ptr qp,
  1056                 mp_ptr np, mp_size_t nn, mp_limb_t n1,
  1057                 mp_srcptr dp, mp_size_t dn,
  1058                 mp_limb_t dinv)
  1059 {
  1060   mp_size_t i;
  1061 
  1062   mp_limb_t d1, d0;
  1063   mp_limb_t cy, cy1;
  1064   mp_limb_t q;
  1065 
  1066   assert (dn > 2);
  1067   assert (nn >= dn);
  1068 
  1069   d1 = dp[dn - 1];
  1070   d0 = dp[dn - 2];
  1071 
  1072   assert ((d1 & GMP_LIMB_HIGHBIT) != 0);
  1073   /* Iteration variable is the index of the q limb.
  1074    *
  1075    * We divide <n1, np[dn-1+i], np[dn-2+i], np[dn-3+i],..., np[i]>
  1076    * by            <d1,          d0,        dp[dn-3],  ..., dp[0] >
  1077    */
  1078 
  1079   i = nn - dn;
  1080   do
  1081     {
  1082       mp_limb_t n0 = np[dn-1+i];
  1083 
  1084       if (n1 == d1 && n0 == d0)
  1085         {
  1086           q = GMP_LIMB_MAX;
  1087           mpn_submul_1 (np+i, dp, dn, q);
  1088           n1 = np[dn-1+i];      /* update n1, last loop's value will now be invalid */
  1089         }
  1090       else
  1091         {
  1092           gmp_udiv_qr_3by2 (q, n1, n0, n1, n0, np[dn-2+i], d1, d0, dinv);
  1093 
  1094           cy = mpn_submul_1 (np + i, dp, dn-2, q);
  1095 
  1096           cy1 = n0 < cy;
  1097           n0 = n0 - cy;
  1098           cy = n1 < cy1;
  1099           n1 = n1 - cy1;
  1100           np[dn-2+i] = n0;
  1101 
  1102           if (cy != 0)
  1103             {
  1104               n1 += d1 + mpn_add_n (np + i, np + i, dp, dn - 1);
  1105               q--;
  1106             }
  1107         }
  1108 
  1109       if (qp)
  1110         qp[i] = q;
  1111     }
  1112   while (--i >= 0);
  1113 
  1114   np[dn - 1] = n1;
  1115 }
  1116 
  1117 static void
  1118 mpn_div_qr_preinv (mp_ptr qp, mp_ptr np, mp_size_t nn,
  1119                    mp_srcptr dp, mp_size_t dn,
  1120                    const struct gmp_div_inverse *inv)
  1121 {
  1122   assert (dn > 0);
  1123   assert (nn >= dn);
  1124 
  1125   if (dn == 1)
  1126     np[0] = mpn_div_qr_1_preinv (qp, np, nn, inv);
  1127   else if (dn == 2)
  1128     mpn_div_qr_2_preinv (qp, np, nn, inv);
  1129   else
  1130     {
  1131       mp_limb_t nh;
  1132       unsigned shift;
  1133 
  1134       assert (inv->d1 == dp[dn-1]);
  1135       assert (inv->d0 == dp[dn-2]);
  1136       assert ((inv->d1 & GMP_LIMB_HIGHBIT) != 0);
  1137 
  1138       shift = inv->shift;
  1139       if (shift > 0)
  1140         nh = mpn_lshift (np, np, nn, shift);
  1141       else
  1142         nh = 0;
  1143 
  1144       mpn_div_qr_pi1 (qp, np, nn, nh, dp, dn, inv->di);
  1145 
  1146       if (shift > 0)
  1147         gmp_assert_nocarry (mpn_rshift (np, np, dn, shift));
  1148     }
  1149 }
  1150 
  1151 static void
  1152 mpn_div_qr (mp_ptr qp, mp_ptr np, mp_size_t nn, mp_srcptr dp, mp_size_t dn)
  1153 {
  1154   struct gmp_div_inverse inv;
  1155   mp_ptr tp = NULL;
  1156 
  1157   assert (dn > 0);
  1158   assert (nn >= dn);
  1159 
  1160   mpn_div_qr_invert (&inv, dp, dn);
  1161   if (dn > 2 && inv.shift > 0)
  1162     {
  1163       tp = gmp_alloc_limbs (dn);
  1164       gmp_assert_nocarry (mpn_lshift (tp, dp, dn, inv.shift));
  1165       dp = tp;
  1166     }
  1167   mpn_div_qr_preinv (qp, np, nn, dp, dn, &inv);
  1168   if (tp)
  1169     gmp_free_limbs (tp, dn);
  1170 }
  1171 
  1172 
  1173 /* MPN base conversion. */
  1174 static unsigned
  1175 mpn_base_power_of_two_p (unsigned b)
  1176 {
  1177   switch (b)
  1178     {
  1179     case 2: return 1;
  1180     case 4: return 2;
  1181     case 8: return 3;
  1182     case 16: return 4;
  1183     case 32: return 5;
  1184     case 64: return 6;
  1185     case 128: return 7;
  1186     case 256: return 8;
  1187     default: return 0;
  1188     }
  1189 }
  1190 
  1191 struct mpn_base_info
  1192 {
  1193   /* bb is the largest power of the base which fits in one limb, and
  1194      exp is the corresponding exponent. */
  1195   unsigned exp;
  1196   mp_limb_t bb;
  1197 };
  1198 
  1199 static void
  1200 mpn_get_base_info (struct mpn_base_info *info, mp_limb_t b)
  1201 {
  1202   mp_limb_t m;
  1203   mp_limb_t p;
  1204   unsigned exp;
  1205 
  1206   m = GMP_LIMB_MAX / b;
  1207   for (exp = 1, p = b; p <= m; exp++)
  1208     p *= b;
  1209 
  1210   info->exp = exp;
  1211   info->bb = p;
  1212 }
  1213 
  1214 static mp_bitcnt_t
  1215 mpn_limb_size_in_base_2 (mp_limb_t u)
  1216 {
  1217   unsigned shift;
  1218 
  1219   assert (u > 0);
  1220   gmp_clz (shift, u);
  1221   return GMP_LIMB_BITS - shift;
  1222 }
  1223 
  1224 static size_t
  1225 mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
  1226 {
  1227   unsigned char mask;
  1228   size_t sn, j;
  1229   mp_size_t i;
  1230   unsigned shift;
  1231 
  1232   sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
  1233         + bits - 1) / bits;
  1234 
  1235   mask = (1U << bits) - 1;
  1236 
  1237   for (i = 0, j = sn, shift = 0; j-- > 0;)
  1238     {
  1239       unsigned char digit = up[i] >> shift;
  1240 
  1241       shift += bits;
  1242 
  1243       if (shift >= GMP_LIMB_BITS && ++i < un)
  1244         {
  1245           shift -= GMP_LIMB_BITS;
  1246           digit |= up[i] << (bits - shift);
  1247         }
  1248       sp[j] = digit & mask;
  1249     }
  1250   return sn;
  1251 }
  1252 
  1253 /* We generate digits from the least significant end, and reverse at
  1254    the end. */
  1255 static size_t
  1256 mpn_limb_get_str (unsigned char *sp, mp_limb_t w,
  1257                   const struct gmp_div_inverse *binv)
  1258 {
  1259   mp_size_t i;
  1260   for (i = 0; w > 0; i++)
  1261     {
  1262       mp_limb_t h, l, r;
  1263 
  1264       h = w >> (GMP_LIMB_BITS - binv->shift);
  1265       l = w << binv->shift;
  1266 
  1267       gmp_udiv_qrnnd_preinv (w, r, h, l, binv->d1, binv->di);
  1268       assert ((r & (GMP_LIMB_MAX >> (GMP_LIMB_BITS - binv->shift))) == 0);
  1269       r >>= binv->shift;
  1270 
  1271       sp[i] = r;
  1272     }
  1273   return i;
  1274 }
  1275 
  1276 static size_t
  1277 mpn_get_str_other (unsigned char *sp,
  1278                    int base, const struct mpn_base_info *info,
  1279                    mp_ptr up, mp_size_t un)
  1280 {
  1281   struct gmp_div_inverse binv;
  1282   size_t sn;
  1283   size_t i;
  1284 
  1285   mpn_div_qr_1_invert (&binv, base);
  1286 
  1287   sn = 0;
  1288 
  1289   if (un > 1)
  1290     {
  1291       struct gmp_div_inverse bbinv;
  1292       mpn_div_qr_1_invert (&bbinv, info->bb);
  1293 
  1294       do
  1295         {
  1296           mp_limb_t w;
  1297           size_t done;
  1298           w = mpn_div_qr_1_preinv (up, up, un, &bbinv);
  1299           un -= (up[un-1] == 0);
  1300           done = mpn_limb_get_str (sp + sn, w, &binv);
  1301 
  1302           for (sn += done; done < info->exp; done++)
  1303             sp[sn++] = 0;
  1304         }
  1305       while (un > 1);
  1306     }
  1307   sn += mpn_limb_get_str (sp + sn, up[0], &binv);
  1308 
  1309   /* Reverse order */
  1310   for (i = 0; 2*i + 1 < sn; i++)
  1311     {
  1312       unsigned char t = sp[i];
  1313       sp[i] = sp[sn - i - 1];
  1314       sp[sn - i - 1] = t;
  1315     }
  1316 
  1317   return sn;
  1318 }
  1319 
  1320 size_t
  1321 mpn_get_str (unsigned char *sp, int base, mp_ptr up, mp_size_t un)
  1322 {
  1323   unsigned bits;
  1324 
  1325   assert (un > 0);
  1326   assert (up[un-1] > 0);
  1327 
  1328   bits = mpn_base_power_of_two_p (base);
  1329   if (bits)
  1330     return mpn_get_str_bits (sp, bits, up, un);
  1331   else
  1332     {
  1333       struct mpn_base_info info;
  1334 
  1335       mpn_get_base_info (&info, base);
  1336       return mpn_get_str_other (sp, base, &info, up, un);
  1337     }
  1338 }
  1339 
  1340 static mp_size_t
  1341 mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
  1342                   unsigned bits)
  1343 {
  1344   mp_size_t rn;
  1345   mp_limb_t limb;
  1346   unsigned shift;
  1347 
  1348   for (limb = 0, rn = 0, shift = 0; sn-- > 0; )
  1349     {
  1350       limb |= (mp_limb_t) sp[sn] << shift;
  1351       shift += bits;
  1352       if (shift >= GMP_LIMB_BITS)
  1353         {
  1354           shift -= GMP_LIMB_BITS;
  1355           rp[rn++] = limb;
  1356           /* Next line is correct also if shift == 0,
  1357              bits == 8, and mp_limb_t == unsigned char. */
  1358           limb = (unsigned int) sp[sn] >> (bits - shift);
  1359         }
  1360     }
  1361   if (limb != 0)
  1362     rp[rn++] = limb;
  1363   else
  1364     rn = mpn_normalized_size (rp, rn);
  1365   return rn;
  1366 }
  1367 
  1368 /* Result is usually normalized, except for all-zero input, in which
  1369    case a single zero limb is written at *RP, and 1 is returned. */
  1370 static mp_size_t
  1371 mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
  1372                    mp_limb_t b, const struct mpn_base_info *info)
  1373 {
  1374   mp_size_t rn;
  1375   mp_limb_t w;
  1376   unsigned k;
  1377   size_t j;
  1378 
  1379   assert (sn > 0);
  1380 
  1381   k = 1 + (sn - 1) % info->exp;
  1382 
  1383   j = 0;
  1384   w = sp[j++];
  1385   while (--k != 0)
  1386     w = w * b + sp[j++];
  1387 
  1388   rp[0] = w;
  1389 
  1390   for (rn = 1; j < sn;)
  1391     {
  1392       mp_limb_t cy;
  1393 
  1394       w = sp[j++];
  1395       for (k = 1; k < info->exp; k++)
  1396         w = w * b + sp[j++];
  1397 
  1398       cy = mpn_mul_1 (rp, rp, rn, info->bb);
  1399       cy += mpn_add_1 (rp, rp, rn, w);
  1400       if (cy > 0)
  1401         rp[rn++] = cy;
  1402     }
  1403   assert (j == sn);
  1404 
  1405   return rn;
  1406 }
  1407 
  1408 mp_size_t
  1409 mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
  1410 {
  1411   unsigned bits;
  1412 
  1413   if (sn == 0)
  1414     return 0;
  1415 
  1416   bits = mpn_base_power_of_two_p (base);
  1417   if (bits)
  1418     return mpn_set_str_bits (rp, sp, sn, bits);
  1419   else
  1420     {
  1421       struct mpn_base_info info;
  1422 
  1423       mpn_get_base_info (&info, base);
  1424       return mpn_set_str_other (rp, sp, sn, base, &info);
  1425     }
  1426 }
  1427 
  1428 
  1429 /* MPZ interface */
  1430 void
  1431 mpz_init (mpz_t r)
  1432 {
  1433   static const mp_limb_t dummy_limb = GMP_LIMB_MAX & 0xc1a0;
  1434 
  1435   r->_mp_alloc = 0;
  1436   r->_mp_size = 0;
  1437   r->_mp_d = (mp_ptr) &dummy_limb;
  1438 }
  1439 
  1440 /* The utility of this function is a bit limited, since many functions
  1441    assigns the result variable using mpz_swap. */
  1442 void
  1443 mpz_init2 (mpz_t r, mp_bitcnt_t bits)
  1444 {
  1445   mp_size_t rn;
  1446 
  1447   bits -= (bits != 0);          /* Round down, except if 0 */
  1448   rn = 1 + bits / GMP_LIMB_BITS;
  1449 
  1450   r->_mp_alloc = rn;
  1451   r->_mp_size = 0;
  1452   r->_mp_d = gmp_alloc_limbs (rn);
  1453 }
  1454 
  1455 void
  1456 mpz_clear (mpz_t r)
  1457 {
  1458   if (r->_mp_alloc)
  1459     gmp_free_limbs (r->_mp_d, r->_mp_alloc);
  1460 }
  1461 
  1462 static mp_ptr
  1463 mpz_realloc (mpz_t r, mp_size_t size)
  1464 {
  1465   size = GMP_MAX (size, 1);
  1466 
  1467   if (r->_mp_alloc)
  1468     r->_mp_d = gmp_realloc_limbs (r->_mp_d, r->_mp_alloc, size);
  1469   else
  1470     r->_mp_d = gmp_alloc_limbs (size);
  1471   r->_mp_alloc = size;
  1472 
  1473   if (GMP_ABS (r->_mp_size) > size)
  1474     r->_mp_size = 0;
  1475 
  1476   return r->_mp_d;
  1477 }
  1478 
  1479 /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs.  */
  1480 #define MPZ_REALLOC(z,n) ((n) > (z)->_mp_alloc                  \
  1481                           ? mpz_realloc(z,n)                    \
  1482                           : (z)->_mp_d)
  1483 
  1484 /* MPZ assignment and basic conversions. */
  1485 void
  1486 mpz_set_si (mpz_t r, signed long int x)
  1487 {
  1488   if (x >= 0)
  1489     mpz_set_ui (r, x);
  1490   else /* (x < 0) */
  1491     if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1492       {
  1493         mpz_set_ui (r, GMP_NEG_CAST (unsigned long int, x));
  1494         mpz_neg (r, r);
  1495       }
  1496   else
  1497     {
  1498       r->_mp_size = -1;
  1499       MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x);
  1500     }
  1501 }
  1502 
  1503 void
  1504 mpz_set_ui (mpz_t r, unsigned long int x)
  1505 {
  1506   if (x > 0)
  1507     {
  1508       r->_mp_size = 1;
  1509       MPZ_REALLOC (r, 1)[0] = x;
  1510       if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1511         {
  1512           int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
  1513           while (x >>= LOCAL_GMP_LIMB_BITS)
  1514             {
  1515               ++ r->_mp_size;
  1516               MPZ_REALLOC (r, r->_mp_size)[r->_mp_size - 1] = x;
  1517             }
  1518         }
  1519     }
  1520   else
  1521     r->_mp_size = 0;
  1522 }
  1523 
  1524 void
  1525 mpz_set (mpz_t r, const mpz_t x)
  1526 {
  1527   /* Allow the NOP r == x */
  1528   if (r != x)
  1529     {
  1530       mp_size_t n;
  1531       mp_ptr rp;
  1532 
  1533       n = GMP_ABS (x->_mp_size);
  1534       rp = MPZ_REALLOC (r, n);
  1535 
  1536       mpn_copyi (rp, x->_mp_d, n);
  1537       r->_mp_size = x->_mp_size;
  1538     }
  1539 }
  1540 
  1541 void
  1542 mpz_init_set_si (mpz_t r, signed long int x)
  1543 {
  1544   mpz_init (r);
  1545   mpz_set_si (r, x);
  1546 }
  1547 
  1548 void
  1549 mpz_init_set_ui (mpz_t r, unsigned long int x)
  1550 {
  1551   mpz_init (r);
  1552   mpz_set_ui (r, x);
  1553 }
  1554 
  1555 void
  1556 mpz_init_set (mpz_t r, const mpz_t x)
  1557 {
  1558   mpz_init (r);
  1559   mpz_set (r, x);
  1560 }
  1561 
  1562 int
  1563 mpz_fits_slong_p (const mpz_t u)
  1564 {
  1565   return mpz_cmp_si (u, LONG_MAX) <= 0 && mpz_cmp_si (u, LONG_MIN) >= 0;
  1566 }
  1567 
  1568 static int
  1569 mpn_absfits_ulong_p (mp_srcptr up, mp_size_t un)
  1570 {
  1571   int ulongsize = GMP_ULONG_BITS / GMP_LIMB_BITS;
  1572   mp_limb_t ulongrem = 0;
  1573 
  1574   if (GMP_ULONG_BITS % GMP_LIMB_BITS != 0)
  1575     ulongrem = (mp_limb_t) (ULONG_MAX >> GMP_LIMB_BITS * ulongsize) + 1;
  1576 
  1577   return un <= ulongsize || (up[ulongsize] < ulongrem && un == ulongsize + 1);
  1578 }
  1579 
  1580 int
  1581 mpz_fits_ulong_p (const mpz_t u)
  1582 {
  1583   mp_size_t us = u->_mp_size;
  1584 
  1585   return us >= 0 && mpn_absfits_ulong_p (u->_mp_d, us);
  1586 }
  1587 
  1588 int
  1589 mpz_fits_sint_p (const mpz_t u)
  1590 {
  1591   return mpz_cmp_si (u, INT_MAX) <= 0 && mpz_cmp_si (u, INT_MIN) >= 0;
  1592 }
  1593 
  1594 int
  1595 mpz_fits_uint_p (const mpz_t u)
  1596 {
  1597   return u->_mp_size >= 0 && mpz_cmpabs_ui (u, UINT_MAX) <= 0;
  1598 }
  1599 
  1600 int
  1601 mpz_fits_sshort_p (const mpz_t u)
  1602 {
  1603   return mpz_cmp_si (u, SHRT_MAX) <= 0 && mpz_cmp_si (u, SHRT_MIN) >= 0;
  1604 }
  1605 
  1606 int
  1607 mpz_fits_ushort_p (const mpz_t u)
  1608 {
  1609   return u->_mp_size >= 0 && mpz_cmpabs_ui (u, USHRT_MAX) <= 0;
  1610 }
  1611 
  1612 long int
  1613 mpz_get_si (const mpz_t u)
  1614 {
  1615   unsigned long r = mpz_get_ui (u);
  1616   unsigned long c = -LONG_MAX - LONG_MIN;
  1617 
  1618   if (u->_mp_size < 0)
  1619     /* This expression is necessary to properly handle -LONG_MIN */
  1620     return -(long) c - (long) ((r - c) & LONG_MAX);
  1621   else
  1622     return (long) (r & LONG_MAX);
  1623 }
  1624 
  1625 unsigned long int
  1626 mpz_get_ui (const mpz_t u)
  1627 {
  1628   if (GMP_LIMB_BITS < GMP_ULONG_BITS)
  1629     {
  1630       int LOCAL_GMP_LIMB_BITS = GMP_LIMB_BITS;
  1631       unsigned long r = 0;
  1632       mp_size_t n = GMP_ABS (u->_mp_size);
  1633       n = GMP_MIN (n, 1 + (mp_size_t) (GMP_ULONG_BITS - 1) / GMP_LIMB_BITS);
  1634       while (--n >= 0)
  1635         r = (r << LOCAL_GMP_LIMB_BITS) + u->_mp_d[n];
  1636       return r;
  1637     }
  1638 
  1639   return u->_mp_size == 0 ? 0 : u->_mp_d[0];
  1640 }
  1641 
  1642 size_t
  1643 mpz_size (const mpz_t u)
  1644 {
  1645   return GMP_ABS (u->_mp_size);
  1646 }
  1647 
  1648 mp_limb_t
  1649 mpz_getlimbn (const mpz_t u, mp_size_t n)
  1650 {
  1651   if (n >= 0 && n < GMP_ABS (u->_mp_size))
  1652     return u->_mp_d[n];
  1653   else
  1654     return 0;
  1655 }
  1656 
  1657 void
  1658 mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
  1659 {
  1660   mpz_realloc (x, 1 + (n - (n != 0)) / GMP_LIMB_BITS);
  1661 }
  1662 
  1663 mp_srcptr
  1664 mpz_limbs_read (mpz_srcptr x)
  1665 {
  1666   return x->_mp_d;
  1667 }
  1668 
  1669 mp_ptr
  1670 mpz_limbs_modify (mpz_t x, mp_size_t n)
  1671 {
  1672   assert (n > 0);
  1673   return MPZ_REALLOC (x, n);
  1674 }
  1675 
  1676 mp_ptr
  1677 mpz_limbs_write (mpz_t x, mp_size_t n)
  1678 {
  1679   return mpz_limbs_modify (x, n);
  1680 }
  1681 
  1682 void
  1683 mpz_limbs_finish (mpz_t x, mp_size_t xs)
  1684 {
  1685   mp_size_t xn;
  1686   xn = mpn_normalized_size (x->_mp_d, GMP_ABS (xs));
  1687   x->_mp_size = xs < 0 ? -xn : xn;
  1688 }
  1689 
  1690 static mpz_srcptr
  1691 mpz_roinit_normal_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
  1692 {
  1693   x->_mp_alloc = 0;
  1694   x->_mp_d = (mp_ptr) xp;
  1695   x->_mp_size = xs;
  1696   return x;
  1697 }
  1698 
  1699 mpz_srcptr
  1700 mpz_roinit_n (mpz_t x, mp_srcptr xp, mp_size_t xs)
  1701 {
  1702   mpz_roinit_normal_n (x, xp, xs);
  1703   mpz_limbs_finish (x, xs);
  1704   return x;
  1705 }
  1706 
  1707 
  1708 /* Conversions and comparison to double. */
  1709 void
  1710 mpz_set_d (mpz_t r, double x)
  1711 {
  1712   int sign;
  1713   mp_ptr rp;
  1714   mp_size_t rn, i;
  1715   double B;
  1716   double Bi;
  1717   mp_limb_t f;
  1718 
  1719   /* x != x is true when x is a NaN, and x == x * 0.5 is true when x is
  1720      zero or infinity. */
  1721   if (x != x || x == x * 0.5)
  1722     {
  1723       r->_mp_size = 0;
  1724       return;
  1725     }
  1726 
  1727   sign = x < 0.0 ;
  1728   if (sign)
  1729     x = - x;
  1730 
  1731   if (x < 1.0)
  1732     {
  1733       r->_mp_size = 0;
  1734       return;
  1735     }
  1736   B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1737   Bi = 1.0 / B;
  1738   for (rn = 1; x >= B; rn++)
  1739     x *= Bi;
  1740 
  1741   rp = MPZ_REALLOC (r, rn);
  1742 
  1743   f = (mp_limb_t) x;
  1744   x -= f;
  1745   assert (x < 1.0);
  1746   i = rn-1;
  1747   rp[i] = f;
  1748   while (--i >= 0)
  1749     {
  1750       x = B * x;
  1751       f = (mp_limb_t) x;
  1752       x -= f;
  1753       assert (x < 1.0);
  1754       rp[i] = f;
  1755     }
  1756 
  1757   r->_mp_size = sign ? - rn : rn;
  1758 }
  1759 
  1760 void
  1761 mpz_init_set_d (mpz_t r, double x)
  1762 {
  1763   mpz_init (r);
  1764   mpz_set_d (r, x);
  1765 }
  1766 
  1767 double
  1768 mpz_get_d (const mpz_t u)
  1769 {
  1770   int m;
  1771   mp_limb_t l;
  1772   mp_size_t un;
  1773   double x;
  1774   double B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1775 
  1776   un = GMP_ABS (u->_mp_size);
  1777 
  1778   if (un == 0)
  1779     return 0.0;
  1780 
  1781   l = u->_mp_d[--un];
  1782   gmp_clz (m, l);
  1783   m = m + GMP_DBL_MANT_BITS - GMP_LIMB_BITS;
  1784   if (m < 0)
  1785     l &= GMP_LIMB_MAX << -m;
  1786 
  1787   for (x = l; --un >= 0;)
  1788     {
  1789       x = B*x;
  1790       if (m > 0) {
  1791         l = u->_mp_d[un];
  1792         m -= GMP_LIMB_BITS;
  1793         if (m < 0)
  1794           l &= GMP_LIMB_MAX << -m;
  1795         x += l;
  1796       }
  1797     }
  1798 
  1799   if (u->_mp_size < 0)
  1800     x = -x;
  1801 
  1802   return x;
  1803 }
  1804 
  1805 int
  1806 mpz_cmpabs_d (const mpz_t x, double d)
  1807 {
  1808   mp_size_t xn;
  1809   double B, Bi;
  1810   mp_size_t i;
  1811 
  1812   xn = x->_mp_size;
  1813   d = GMP_ABS (d);
  1814 
  1815   if (xn != 0)
  1816     {
  1817       xn = GMP_ABS (xn);
  1818 
  1819       B = 4.0 * (double) (GMP_LIMB_HIGHBIT >> 1);
  1820       Bi = 1.0 / B;
  1821 
  1822       /* Scale d so it can be compared with the top limb. */
  1823       for (i = 1; i < xn; i++)
  1824         d *= Bi;
  1825 
  1826       if (d >= B)
  1827         return -1;
  1828 
  1829       /* Compare floor(d) to top limb, subtract and cancel when equal. */
  1830       for (i = xn; i-- > 0;)
  1831         {
  1832           mp_limb_t f, xl;
  1833 
  1834           f = (mp_limb_t) d;
  1835           xl = x->_mp_d[i];
  1836           if (xl > f)
  1837             return 1;
  1838           else if (xl < f)
  1839             return -1;
  1840           d = B * (d - f);
  1841         }
  1842     }
  1843   return - (d > 0.0);
  1844 }
  1845 
  1846 int
  1847 mpz_cmp_d (const mpz_t x, double d)
  1848 {
  1849   if (x->_mp_size < 0)
  1850     {
  1851       if (d >= 0.0)
  1852         return -1;
  1853       else
  1854         return -mpz_cmpabs_d (x, d);
  1855     }
  1856   else
  1857     {
  1858       if (d < 0.0)
  1859         return 1;
  1860       else
  1861         return mpz_cmpabs_d (x, d);
  1862     }
  1863 }
  1864 
  1865 
  1866 /* MPZ comparisons and the like. */
  1867 int
  1868 mpz_sgn (const mpz_t u)
  1869 {
  1870   return GMP_CMP (u->_mp_size, 0);
  1871 }
  1872 
  1873 int
  1874 mpz_cmp_si (const mpz_t u, long v)
  1875 {
  1876   mp_size_t usize = u->_mp_size;
  1877 
  1878   if (v >= 0)
  1879     return mpz_cmp_ui (u, v);
  1880   else if (usize >= 0)
  1881     return 1;
  1882   else
  1883     return - mpz_cmpabs_ui (u, GMP_NEG_CAST (unsigned long int, v));
  1884 }
  1885 
  1886 int
  1887 mpz_cmp_ui (const mpz_t u, unsigned long v)
  1888 {
  1889   mp_size_t usize = u->_mp_size;
  1890 
  1891   if (usize < 0)
  1892     return -1;
  1893   else
  1894     return mpz_cmpabs_ui (u, v);
  1895 }
  1896 
  1897 int
  1898 mpz_cmp (const mpz_t a, const mpz_t b)
  1899 {
  1900   mp_size_t asize = a->_mp_size;
  1901   mp_size_t bsize = b->_mp_size;
  1902 
  1903   if (asize != bsize)
  1904     return (asize < bsize) ? -1 : 1;
  1905   else if (asize >= 0)
  1906     return mpn_cmp (a->_mp_d, b->_mp_d, asize);
  1907   else
  1908     return mpn_cmp (b->_mp_d, a->_mp_d, -asize);
  1909 }
  1910 
  1911 int
  1912 mpz_cmpabs_ui (const mpz_t u, unsigned long v)
  1913 {
  1914   mp_size_t un = GMP_ABS (u->_mp_size);
  1915 
  1916   if (! mpn_absfits_ulong_p (u->_mp_d, un))
  1917     return 1;
  1918   else
  1919     {
  1920       unsigned long uu = mpz_get_ui (u);
  1921       return GMP_CMP(uu, v);
  1922     }
  1923 }
  1924 
  1925 int
  1926 mpz_cmpabs (const mpz_t u, const mpz_t v)
  1927 {
  1928   return mpn_cmp4 (u->_mp_d, GMP_ABS (u->_mp_size),
  1929                    v->_mp_d, GMP_ABS (v->_mp_size));
  1930 }
  1931 
  1932 void
  1933 mpz_abs (mpz_t r, const mpz_t u)
  1934 {
  1935   mpz_set (r, u);
  1936   r->_mp_size = GMP_ABS (r->_mp_size);
  1937 }
  1938 
  1939 void
  1940 mpz_neg (mpz_t r, const mpz_t u)
  1941 {
  1942   mpz_set (r, u);
  1943   r->_mp_size = -r->_mp_size;
  1944 }
  1945 
  1946 void
  1947 mpz_swap (mpz_t u, mpz_t v)
  1948 {
  1949   MP_SIZE_T_SWAP (u->_mp_alloc, v->_mp_alloc);
  1950   MPN_PTR_SWAP (u->_mp_d, u->_mp_size, v->_mp_d, v->_mp_size);
  1951 }
  1952 
  1953 
  1954 /* MPZ addition and subtraction */
  1955 
  1956 
  1957 void
  1958 mpz_add_ui (mpz_t r, const mpz_t a, unsigned long b)
  1959 {
  1960   mpz_t bb;
  1961   mpz_init_set_ui (bb, b);
  1962   mpz_add (r, a, bb);
  1963   mpz_clear (bb);
  1964 }
  1965 
  1966 void
  1967 mpz_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
  1968 {
  1969   mpz_ui_sub (r, b, a);
  1970   mpz_neg (r, r);
  1971 }
  1972 
  1973 void
  1974 mpz_ui_sub (mpz_t r, unsigned long a, const mpz_t b)
  1975 {
  1976   mpz_neg (r, b);
  1977   mpz_add_ui (r, r, a);
  1978 }
  1979 
  1980 static mp_size_t
  1981 mpz_abs_add (mpz_t r, const mpz_t a, const mpz_t b)
  1982 {
  1983   mp_size_t an = GMP_ABS (a->_mp_size);
  1984   mp_size_t bn = GMP_ABS (b->_mp_size);
  1985   mp_ptr rp;
  1986   mp_limb_t cy;
  1987 
  1988   if (an < bn)
  1989     {
  1990       MPZ_SRCPTR_SWAP (a, b);
  1991       MP_SIZE_T_SWAP (an, bn);
  1992     }
  1993 
  1994   rp = MPZ_REALLOC (r, an + 1);
  1995   cy = mpn_add (rp, a->_mp_d, an, b->_mp_d, bn);
  1996 
  1997   rp[an] = cy;
  1998 
  1999   return an + cy;
  2000 }
  2001 
  2002 static mp_size_t
  2003 mpz_abs_sub (mpz_t r, const mpz_t a, const mpz_t b)
  2004 {
  2005   mp_size_t an = GMP_ABS (a->_mp_size);
  2006   mp_size_t bn = GMP_ABS (b->_mp_size);
  2007   int cmp;
  2008   mp_ptr rp;
  2009 
  2010   cmp = mpn_cmp4 (a->_mp_d, an, b->_mp_d, bn);
  2011   if (cmp > 0)
  2012     {
  2013       rp = MPZ_REALLOC (r, an);
  2014       gmp_assert_nocarry (mpn_sub (rp, a->_mp_d, an, b->_mp_d, bn));
  2015       return mpn_normalized_size (rp, an);
  2016     }
  2017   else if (cmp < 0)
  2018     {
  2019       rp = MPZ_REALLOC (r, bn);
  2020       gmp_assert_nocarry (mpn_sub (rp, b->_mp_d, bn, a->_mp_d, an));
  2021       return -mpn_normalized_size (rp, bn);
  2022     }
  2023   else
  2024     return 0;
  2025 }
  2026 
  2027 void
  2028 mpz_add (mpz_t r, const mpz_t a, const mpz_t b)
  2029 {
  2030   mp_size_t rn;
  2031 
  2032   if ( (a->_mp_size ^ b->_mp_size) >= 0)
  2033     rn = mpz_abs_add (r, a, b);
  2034   else
  2035     rn = mpz_abs_sub (r, a, b);
  2036 
  2037   r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  2038 }
  2039 
  2040 void
  2041 mpz_sub (mpz_t r, const mpz_t a, const mpz_t b)
  2042 {
  2043   mp_size_t rn;
  2044 
  2045   if ( (a->_mp_size ^ b->_mp_size) >= 0)
  2046     rn = mpz_abs_sub (r, a, b);
  2047   else
  2048     rn = mpz_abs_add (r, a, b);
  2049 
  2050   r->_mp_size = a->_mp_size >= 0 ? rn : - rn;
  2051 }
  2052 
  2053 
  2054 /* MPZ multiplication */
  2055 void
  2056 mpz_mul_si (mpz_t r, const mpz_t u, long int v)
  2057 {
  2058   if (v < 0)
  2059     {
  2060       mpz_mul_ui (r, u, GMP_NEG_CAST (unsigned long int, v));
  2061       mpz_neg (r, r);
  2062     }
  2063   else
  2064     mpz_mul_ui (r, u, v);
  2065 }
  2066 
  2067 void
  2068 mpz_mul_ui (mpz_t r, const mpz_t u, unsigned long int v)
  2069 {
  2070   mpz_t vv;
  2071   mpz_init_set_ui (vv, v);
  2072   mpz_mul (r, u, vv);
  2073   mpz_clear (vv);
  2074   return;
  2075 }
  2076 
  2077 void
  2078 mpz_mul (mpz_t r, const mpz_t u, const mpz_t v)
  2079 {
  2080   int sign;
  2081   mp_size_t un, vn, rn;
  2082   mpz_t t;
  2083   mp_ptr tp;
  2084 
  2085   un = u->_mp_size;
  2086   vn = v->_mp_size;
  2087 
  2088   if (un == 0 || vn == 0)
  2089     {
  2090       r->_mp_size = 0;
  2091       return;
  2092     }
  2093 
  2094   sign = (un ^ vn) < 0;
  2095 
  2096   un = GMP_ABS (un);
  2097   vn = GMP_ABS (vn);
  2098 
  2099   mpz_init2 (t, (un + vn) * GMP_LIMB_BITS);
  2100 
  2101   tp = t->_mp_d;
  2102   if (un >= vn)
  2103     mpn_mul (tp, u->_mp_d, un, v->_mp_d, vn);
  2104   else
  2105     mpn_mul (tp, v->_mp_d, vn, u->_mp_d, un);
  2106 
  2107   rn = un + vn;
  2108   rn -= tp[rn-1] == 0;
  2109 
  2110   t->_mp_size = sign ? - rn : rn;
  2111   mpz_swap (r, t);
  2112   mpz_clear (t);
  2113 }
  2114 
  2115 void
  2116 mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
  2117 {
  2118   mp_size_t un, rn;
  2119   mp_size_t limbs;
  2120   unsigned shift;
  2121   mp_ptr rp;
  2122 
  2123   un = GMP_ABS (u->_mp_size);
  2124   if (un == 0)
  2125     {
  2126       r->_mp_size = 0;
  2127       return;
  2128     }
  2129 
  2130   limbs = bits / GMP_LIMB_BITS;
  2131   shift = bits % GMP_LIMB_BITS;
  2132 
  2133   rn = un + limbs + (shift > 0);
  2134   rp = MPZ_REALLOC (r, rn);
  2135   if (shift > 0)
  2136     {
  2137       mp_limb_t cy = mpn_lshift (rp + limbs, u->_mp_d, un, shift);
  2138       rp[rn-1] = cy;
  2139       rn -= (cy == 0);
  2140     }
  2141   else
  2142     mpn_copyd (rp + limbs, u->_mp_d, un);
  2143 
  2144   mpn_zero (rp, limbs);
  2145 
  2146   r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
  2147 }
  2148 
  2149 void
  2150 mpz_addmul_ui (mpz_t r, const mpz_t u, unsigned long int v)
  2151 {
  2152   mpz_t t;
  2153   mpz_init_set_ui (t, v);
  2154   mpz_mul (t, u, t);
  2155   mpz_add (r, r, t);
  2156   mpz_clear (t);
  2157 }
  2158 
  2159 void
  2160 mpz_submul_ui (mpz_t r, const mpz_t u, unsigned long int v)
  2161 {
  2162   mpz_t t;
  2163   mpz_init_set_ui (t, v);
  2164   mpz_mul (t, u, t);
  2165   mpz_sub (r, r, t);
  2166   mpz_clear (t);
  2167 }
  2168 
  2169 void
  2170 mpz_addmul (mpz_t r, const mpz_t u, const mpz_t v)
  2171 {
  2172   mpz_t t;
  2173   mpz_init (t);
  2174   mpz_mul (t, u, v);
  2175   mpz_add (r, r, t);
  2176   mpz_clear (t);
  2177 }
  2178 
  2179 void
  2180 mpz_submul (mpz_t r, const mpz_t u, const mpz_t v)
  2181 {
  2182   mpz_t t;
  2183   mpz_init (t);
  2184   mpz_mul (t, u, v);
  2185   mpz_sub (r, r, t);
  2186   mpz_clear (t);
  2187 }
  2188 
  2189 
  2190 /* MPZ division */
  2191 enum mpz_div_round_mode { GMP_DIV_FLOOR, GMP_DIV_CEIL, GMP_DIV_TRUNC };
  2192 
  2193 /* Allows q or r to be zero. Returns 1 iff remainder is non-zero. */
  2194 static int
  2195 mpz_div_qr (mpz_t q, mpz_t r,
  2196             const mpz_t n, const mpz_t d, enum mpz_div_round_mode mode)
  2197 {
  2198   mp_size_t ns, ds, nn, dn, qs;
  2199   ns = n->_mp_size;
  2200   ds = d->_mp_size;
  2201 
  2202   if (ds == 0)
  2203     gmp_die("mpz_div_qr: Divide by zero.");
  2204 
  2205   if (ns == 0)
  2206     {
  2207       if (q)
  2208         q->_mp_size = 0;
  2209       if (r)
  2210         r->_mp_size = 0;
  2211       return 0;
  2212     }
  2213 
  2214   nn = GMP_ABS (ns);
  2215   dn = GMP_ABS (ds);
  2216 
  2217   qs = ds ^ ns;
  2218 
  2219   if (nn < dn)
  2220     {
  2221       if (mode == GMP_DIV_CEIL && qs >= 0)
  2222         {
  2223           /* q = 1, r = n - d */
  2224           if (r)
  2225             mpz_sub (r, n, d);
  2226           if (q)
  2227             mpz_set_ui (q, 1);
  2228         }
  2229       else if (mode == GMP_DIV_FLOOR && qs < 0)
  2230         {
  2231           /* q = -1, r = n + d */
  2232           if (r)
  2233             mpz_add (r, n, d);
  2234           if (q)
  2235             mpz_set_si (q, -1);
  2236         }
  2237       else
  2238         {
  2239           /* q = 0, r = d */
  2240           if (r)
  2241             mpz_set (r, n);
  2242           if (q)
  2243             q->_mp_size = 0;
  2244         }
  2245       return 1;
  2246     }
  2247   else
  2248     {
  2249       mp_ptr np, qp;
  2250       mp_size_t qn, rn;
  2251       mpz_t tq, tr;
  2252 
  2253       mpz_init_set (tr, n);
  2254       np = tr->_mp_d;
  2255 
  2256       qn = nn - dn + 1;
  2257 
  2258       if (q)
  2259         {
  2260           mpz_init2 (tq, qn * GMP_LIMB_BITS);
  2261           qp = tq->_mp_d;
  2262         }
  2263       else
  2264         qp = NULL;
  2265 
  2266       mpn_div_qr (qp, np, nn, d->_mp_d, dn);
  2267 
  2268       if (qp)
  2269         {
  2270           qn -= (qp[qn-1] == 0);
  2271 
  2272           tq->_mp_size = qs < 0 ? -qn : qn;
  2273         }
  2274       rn = mpn_normalized_size (np, dn);
  2275       tr->_mp_size = ns < 0 ? - rn : rn;
  2276 
  2277       if (mode == GMP_DIV_FLOOR && qs < 0 && rn != 0)
  2278         {
  2279           if (q)
  2280             mpz_sub_ui (tq, tq, 1);
  2281           if (r)
  2282             mpz_add (tr, tr, d);
  2283         }
  2284       else if (mode == GMP_DIV_CEIL && qs >= 0 && rn != 0)
  2285         {
  2286           if (q)
  2287             mpz_add_ui (tq, tq, 1);
  2288           if (r)
  2289             mpz_sub (tr, tr, d);
  2290         }
  2291 
  2292       if (q)
  2293         {
  2294           mpz_swap (tq, q);
  2295           mpz_clear (tq);
  2296         }
  2297       if (r)
  2298         mpz_swap (tr, r);
  2299 
  2300       mpz_clear (tr);
  2301 
  2302       return rn != 0;
  2303     }
  2304 }
  2305 
  2306 void
  2307 mpz_cdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  2308 {
  2309   mpz_div_qr (q, r, n, d, GMP_DIV_CEIL);
  2310 }
  2311 
  2312 void
  2313 mpz_fdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  2314 {
  2315   mpz_div_qr (q, r, n, d, GMP_DIV_FLOOR);
  2316 }
  2317 
  2318 void
  2319 mpz_tdiv_qr (mpz_t q, mpz_t r, const mpz_t n, const mpz_t d)
  2320 {
  2321   mpz_div_qr (q, r, n, d, GMP_DIV_TRUNC);
  2322 }
  2323 
  2324 void
  2325 mpz_cdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  2326 {
  2327   mpz_div_qr (q, NULL, n, d, GMP_DIV_CEIL);
  2328 }
  2329 
  2330 void
  2331 mpz_fdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  2332 {
  2333   mpz_div_qr (q, NULL, n, d, GMP_DIV_FLOOR);
  2334 }
  2335 
  2336 void
  2337 mpz_tdiv_q (mpz_t q, const mpz_t n, const mpz_t d)
  2338 {
  2339   mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC);
  2340 }
  2341 
  2342 void
  2343 mpz_cdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  2344 {
  2345   mpz_div_qr (NULL, r, n, d, GMP_DIV_CEIL);
  2346 }
  2347 
  2348 void
  2349 mpz_fdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  2350 {
  2351   mpz_div_qr (NULL, r, n, d, GMP_DIV_FLOOR);
  2352 }
  2353 
  2354 void
  2355 mpz_tdiv_r (mpz_t r, const mpz_t n, const mpz_t d)
  2356 {
  2357   mpz_div_qr (NULL, r, n, d, GMP_DIV_TRUNC);
  2358 }
  2359 
  2360 void
  2361 mpz_mod (mpz_t r, const mpz_t n, const mpz_t d)
  2362 {
  2363   mpz_div_qr (NULL, r, n, d, d->_mp_size >= 0 ? GMP_DIV_FLOOR : GMP_DIV_CEIL);
  2364 }
  2365 
  2366 static void
  2367 mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
  2368                 enum mpz_div_round_mode mode)
  2369 {
  2370   mp_size_t un, qn;
  2371   mp_size_t limb_cnt;
  2372   mp_ptr qp;
  2373   int adjust;
  2374 
  2375   un = u->_mp_size;
  2376   if (un == 0)
  2377     {
  2378       q->_mp_size = 0;
  2379       return;
  2380     }
  2381   limb_cnt = bit_index / GMP_LIMB_BITS;
  2382   qn = GMP_ABS (un) - limb_cnt;
  2383   bit_index %= GMP_LIMB_BITS;
  2384 
  2385   if (mode == ((un > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* un != 0 here. */
  2386     /* Note: Below, the final indexing at limb_cnt is valid because at
  2387        that point we have qn > 0. */
  2388     adjust = (qn <= 0
  2389               || !mpn_zero_p (u->_mp_d, limb_cnt)
  2390               || (u->_mp_d[limb_cnt]
  2391                   & (((mp_limb_t) 1 << bit_index) - 1)));
  2392   else
  2393     adjust = 0;
  2394 
  2395   if (qn <= 0)
  2396     qn = 0;
  2397   else
  2398     {
  2399       qp = MPZ_REALLOC (q, qn);
  2400 
  2401       if (bit_index != 0)
  2402         {
  2403           mpn_rshift (qp, u->_mp_d + limb_cnt, qn, bit_index);
  2404           qn -= qp[qn - 1] == 0;
  2405         }
  2406       else
  2407         {
  2408           mpn_copyi (qp, u->_mp_d + limb_cnt, qn);
  2409         }
  2410     }
  2411 
  2412   q->_mp_size = qn;
  2413 
  2414   if (adjust)
  2415     mpz_add_ui (q, q, 1);
  2416   if (un < 0)
  2417     mpz_neg (q, q);
  2418 }
  2419 
  2420 static void
  2421 mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
  2422                 enum mpz_div_round_mode mode)
  2423 {
  2424   mp_size_t us, un, rn;
  2425   mp_ptr rp;
  2426   mp_limb_t mask;
  2427 
  2428   us = u->_mp_size;
  2429   if (us == 0 || bit_index == 0)
  2430     {
  2431       r->_mp_size = 0;
  2432       return;
  2433     }
  2434   rn = (bit_index + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  2435   assert (rn > 0);
  2436 
  2437   rp = MPZ_REALLOC (r, rn);
  2438   un = GMP_ABS (us);
  2439 
  2440   mask = GMP_LIMB_MAX >> (rn * GMP_LIMB_BITS - bit_index);
  2441 
  2442   if (rn > un)
  2443     {
  2444       /* Quotient (with truncation) is zero, and remainder is
  2445          non-zero */
  2446       if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  2447         {
  2448           /* Have to negate and sign extend. */
  2449           mp_size_t i;
  2450 
  2451           gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un));
  2452           for (i = un; i < rn - 1; i++)
  2453             rp[i] = GMP_LIMB_MAX;
  2454 
  2455           rp[rn-1] = mask;
  2456           us = -us;
  2457         }
  2458       else
  2459         {
  2460           /* Just copy */
  2461           if (r != u)
  2462             mpn_copyi (rp, u->_mp_d, un);
  2463 
  2464           rn = un;
  2465         }
  2466     }
  2467   else
  2468     {
  2469       if (r != u)
  2470         mpn_copyi (rp, u->_mp_d, rn - 1);
  2471 
  2472       rp[rn-1] = u->_mp_d[rn-1] & mask;
  2473 
  2474       if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
  2475         {
  2476           /* If r != 0, compute 2^{bit_count} - r. */
  2477           mpn_neg (rp, rp, rn);
  2478 
  2479           rp[rn-1] &= mask;
  2480 
  2481           /* us is not used for anything else, so we can modify it
  2482              here to indicate flipped sign. */
  2483           us = -us;
  2484         }
  2485     }
  2486   rn = mpn_normalized_size (rp, rn);
  2487   r->_mp_size = us < 0 ? -rn : rn;
  2488 }
  2489 
  2490 void
  2491 mpz_cdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2492 {
  2493   mpz_div_q_2exp (r, u, cnt, GMP_DIV_CEIL);
  2494 }
  2495 
  2496 void
  2497 mpz_fdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2498 {
  2499   mpz_div_q_2exp (r, u, cnt, GMP_DIV_FLOOR);
  2500 }
  2501 
  2502 void
  2503 mpz_tdiv_q_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2504 {
  2505   mpz_div_q_2exp (r, u, cnt, GMP_DIV_TRUNC);
  2506 }
  2507 
  2508 void
  2509 mpz_cdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2510 {
  2511   mpz_div_r_2exp (r, u, cnt, GMP_DIV_CEIL);
  2512 }
  2513 
  2514 void
  2515 mpz_fdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2516 {
  2517   mpz_div_r_2exp (r, u, cnt, GMP_DIV_FLOOR);
  2518 }
  2519 
  2520 void
  2521 mpz_tdiv_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t cnt)
  2522 {
  2523   mpz_div_r_2exp (r, u, cnt, GMP_DIV_TRUNC);
  2524 }
  2525 
  2526 void
  2527 mpz_divexact (mpz_t q, const mpz_t n, const mpz_t d)
  2528 {
  2529   gmp_assert_nocarry (mpz_div_qr (q, NULL, n, d, GMP_DIV_TRUNC));
  2530 }
  2531 
  2532 int
  2533 mpz_divisible_p (const mpz_t n, const mpz_t d)
  2534 {
  2535   return mpz_div_qr (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  2536 }
  2537 
  2538 int
  2539 mpz_congruent_p (const mpz_t a, const mpz_t b, const mpz_t m)
  2540 {
  2541   mpz_t t;
  2542   int res;
  2543 
  2544   /* a == b (mod 0) iff a == b */
  2545   if (mpz_sgn (m) == 0)
  2546     return (mpz_cmp (a, b) == 0);
  2547 
  2548   mpz_init (t);
  2549   mpz_sub (t, a, b);
  2550   res = mpz_divisible_p (t, m);
  2551   mpz_clear (t);
  2552 
  2553   return res;
  2554 }
  2555 
  2556 static unsigned long
  2557 mpz_div_qr_ui (mpz_t q, mpz_t r,
  2558                const mpz_t n, unsigned long d, enum mpz_div_round_mode mode)
  2559 {
  2560   unsigned long ret;
  2561   mpz_t rr, dd;
  2562 
  2563   mpz_init (rr);
  2564   mpz_init_set_ui (dd, d);
  2565   mpz_div_qr (q, rr, n, dd, mode);
  2566   mpz_clear (dd);
  2567   ret = mpz_get_ui (rr);
  2568 
  2569   if (r)
  2570     mpz_swap (r, rr);
  2571   mpz_clear (rr);
  2572 
  2573   return ret;
  2574 }
  2575 
  2576 unsigned long
  2577 mpz_cdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2578 {
  2579   return mpz_div_qr_ui (q, r, n, d, GMP_DIV_CEIL);
  2580 }
  2581 
  2582 unsigned long
  2583 mpz_fdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2584 {
  2585   return mpz_div_qr_ui (q, r, n, d, GMP_DIV_FLOOR);
  2586 }
  2587 
  2588 unsigned long
  2589 mpz_tdiv_qr_ui (mpz_t q, mpz_t r, const mpz_t n, unsigned long d)
  2590 {
  2591   return mpz_div_qr_ui (q, r, n, d, GMP_DIV_TRUNC);
  2592 }
  2593 
  2594 unsigned long
  2595 mpz_cdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2596 {
  2597   return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_CEIL);
  2598 }
  2599 
  2600 unsigned long
  2601 mpz_fdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2602 {
  2603   return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_FLOOR);
  2604 }
  2605 
  2606 unsigned long
  2607 mpz_tdiv_q_ui (mpz_t q, const mpz_t n, unsigned long d)
  2608 {
  2609   return mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC);
  2610 }
  2611 
  2612 unsigned long
  2613 mpz_cdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2614 {
  2615   return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_CEIL);
  2616 }
  2617 unsigned long
  2618 mpz_fdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2619 {
  2620   return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2621 }
  2622 unsigned long
  2623 mpz_tdiv_r_ui (mpz_t r, const mpz_t n, unsigned long d)
  2624 {
  2625   return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_TRUNC);
  2626 }
  2627 
  2628 unsigned long
  2629 mpz_cdiv_ui (const mpz_t n, unsigned long d)
  2630 {
  2631   return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_CEIL);
  2632 }
  2633 
  2634 unsigned long
  2635 mpz_fdiv_ui (const mpz_t n, unsigned long d)
  2636 {
  2637   return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_FLOOR);
  2638 }
  2639 
  2640 unsigned long
  2641 mpz_tdiv_ui (const mpz_t n, unsigned long d)
  2642 {
  2643   return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC);
  2644 }
  2645 
  2646 unsigned long
  2647 mpz_mod_ui (mpz_t r, const mpz_t n, unsigned long d)
  2648 {
  2649   return mpz_div_qr_ui (NULL, r, n, d, GMP_DIV_FLOOR);
  2650 }
  2651 
  2652 void
  2653 mpz_divexact_ui (mpz_t q, const mpz_t n, unsigned long d)
  2654 {
  2655   gmp_assert_nocarry (mpz_div_qr_ui (q, NULL, n, d, GMP_DIV_TRUNC));
  2656 }
  2657 
  2658 int
  2659 mpz_divisible_ui_p (const mpz_t n, unsigned long d)
  2660 {
  2661   return mpz_div_qr_ui (NULL, NULL, n, d, GMP_DIV_TRUNC) == 0;
  2662 }
  2663 
  2664 
  2665 /* GCD */
  2666 static mp_limb_t
  2667 mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
  2668 {
  2669   unsigned shift;
  2670 
  2671   assert ( (u | v) > 0);
  2672 
  2673   if (u == 0)
  2674     return v;
  2675   else if (v == 0)
  2676     return u;
  2677 
  2678   gmp_ctz (shift, u | v);
  2679 
  2680   u >>= shift;
  2681   v >>= shift;
  2682 
  2683   if ( (u & 1) == 0)
  2684     MP_LIMB_T_SWAP (u, v);
  2685 
  2686   while ( (v & 1) == 0)
  2687     v >>= 1;
  2688 
  2689   while (u != v)
  2690     {
  2691       if (u > v)
  2692         {
  2693           u -= v;
  2694           do
  2695             u >>= 1;
  2696           while ( (u & 1) == 0);
  2697         }
  2698       else
  2699         {
  2700           v -= u;
  2701           do
  2702             v >>= 1;
  2703           while ( (v & 1) == 0);
  2704         }
  2705     }
  2706   return u << shift;
  2707 }
  2708 
  2709 unsigned long
  2710 mpz_gcd_ui (mpz_t g, const mpz_t u, unsigned long v)
  2711 {
  2712   mpz_t t;
  2713   mpz_init_set_ui(t, v);
  2714   mpz_gcd (t, u, t);
  2715   if (v > 0)
  2716     v = mpz_get_ui (t);
  2717 
  2718   if (g)
  2719     mpz_swap (t, g);
  2720 
  2721   mpz_clear (t);
  2722 
  2723   return v;
  2724 }
  2725 
  2726 static mp_bitcnt_t
  2727 mpz_make_odd (mpz_t r)
  2728 {
  2729   mp_bitcnt_t shift;
  2730 
  2731   assert (r->_mp_size > 0);
  2732   /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
  2733   shift = mpn_scan1 (r->_mp_d, 0);
  2734   mpz_tdiv_q_2exp (r, r, shift);
  2735 
  2736   return shift;
  2737 }
  2738 
  2739 void
  2740 mpz_gcd (mpz_t g, const mpz_t u, const mpz_t v)
  2741 {
  2742   mpz_t tu, tv;
  2743   mp_bitcnt_t uz, vz, gz;
  2744 
  2745   if (u->_mp_size == 0)
  2746     {
  2747       mpz_abs (g, v);
  2748       return;
  2749     }
  2750   if (v->_mp_size == 0)
  2751     {
  2752       mpz_abs (g, u);
  2753       return;
  2754     }
  2755 
  2756   mpz_init (tu);
  2757   mpz_init (tv);
  2758 
  2759   mpz_abs (tu, u);
  2760   uz = mpz_make_odd (tu);
  2761   mpz_abs (tv, v);
  2762   vz = mpz_make_odd (tv);
  2763   gz = GMP_MIN (uz, vz);
  2764 
  2765   if (tu->_mp_size < tv->_mp_size)
  2766     mpz_swap (tu, tv);
  2767 
  2768   mpz_tdiv_r (tu, tu, tv);
  2769   if (tu->_mp_size == 0)
  2770     {
  2771       mpz_swap (g, tv);
  2772     }
  2773   else
  2774     for (;;)
  2775       {
  2776         int c;
  2777 
  2778         mpz_make_odd (tu);
  2779         c = mpz_cmp (tu, tv);
  2780         if (c == 0)
  2781           {
  2782             mpz_swap (g, tu);
  2783             break;
  2784           }
  2785         if (c < 0)
  2786           mpz_swap (tu, tv);
  2787 
  2788         if (tv->_mp_size == 1)
  2789           {
  2790             mp_limb_t *gp;
  2791 
  2792             mpz_tdiv_r (tu, tu, tv);
  2793             gp = MPZ_REALLOC (g, 1); /* gp = mpz_limbs_modify (g, 1); */
  2794             *gp = mpn_gcd_11 (tu->_mp_d[0], tv->_mp_d[0]);
  2795 
  2796             g->_mp_size = *gp != 0; /* mpz_limbs_finish (g, 1); */
  2797             break;
  2798           }
  2799         mpz_sub (tu, tu, tv);
  2800       }
  2801   mpz_clear (tu);
  2802   mpz_clear (tv);
  2803   mpz_mul_2exp (g, g, gz);
  2804 }
  2805 
  2806 void
  2807 mpz_gcdext (mpz_t g, mpz_t s, mpz_t t, const mpz_t u, const mpz_t v)
  2808 {
  2809   mpz_t tu, tv, s0, s1, t0, t1;
  2810   mp_bitcnt_t uz, vz, gz;
  2811   mp_bitcnt_t power;
  2812 
  2813   if (u->_mp_size == 0)
  2814     {
  2815       /* g = 0 u + sgn(v) v */
  2816       signed long sign = mpz_sgn (v);
  2817       mpz_abs (g, v);
  2818       if (s)
  2819         s->_mp_size = 0;
  2820       if (t)
  2821         mpz_set_si (t, sign);
  2822       return;
  2823     }
  2824 
  2825   if (v->_mp_size == 0)
  2826     {
  2827       /* g = sgn(u) u + 0 v */
  2828       signed long sign = mpz_sgn (u);
  2829       mpz_abs (g, u);
  2830       if (s)
  2831         mpz_set_si (s, sign);
  2832       if (t)
  2833         t->_mp_size = 0;
  2834       return;
  2835     }
  2836 
  2837   mpz_init (tu);
  2838   mpz_init (tv);
  2839   mpz_init (s0);
  2840   mpz_init (s1);
  2841   mpz_init (t0);
  2842   mpz_init (t1);
  2843 
  2844   mpz_abs (tu, u);
  2845   uz = mpz_make_odd (tu);
  2846   mpz_abs (tv, v);
  2847   vz = mpz_make_odd (tv);
  2848   gz = GMP_MIN (uz, vz);
  2849 
  2850   uz -= gz;
  2851   vz -= gz;
  2852 
  2853   /* Cofactors corresponding to odd gcd. gz handled later. */
  2854   if (tu->_mp_size < tv->_mp_size)
  2855     {
  2856       mpz_swap (tu, tv);
  2857       MPZ_SRCPTR_SWAP (u, v);
  2858       MPZ_PTR_SWAP (s, t);
  2859       MP_BITCNT_T_SWAP (uz, vz);
  2860     }
  2861 
  2862   /* Maintain
  2863    *
  2864    * u = t0 tu + t1 tv
  2865    * v = s0 tu + s1 tv
  2866    *
  2867    * where u and v denote the inputs with common factors of two
  2868    * eliminated, and det (s0, t0; s1, t1) = 2^p. Then
  2869    *
  2870    * 2^p tu =  s1 u - t1 v
  2871    * 2^p tv = -s0 u + t0 v
  2872    */
  2873 
  2874   /* After initial division, tu = q tv + tu', we have
  2875    *
  2876    * u = 2^uz (tu' + q tv)
  2877    * v = 2^vz tv
  2878    *
  2879    * or
  2880    *
  2881    * t0 = 2^uz, t1 = 2^uz q
  2882    * s0 = 0,    s1 = 2^vz
  2883    */
  2884 
  2885   mpz_tdiv_qr (t1, tu, tu, tv);
  2886   mpz_mul_2exp (t1, t1, uz);
  2887 
  2888   mpz_setbit (s1, vz);
  2889   power = uz + vz;
  2890 
  2891   if (tu->_mp_size > 0)
  2892     {
  2893       mp_bitcnt_t shift;
  2894       shift = mpz_make_odd (tu);
  2895       mpz_setbit (t0, uz + shift);
  2896       power += shift;
  2897 
  2898       for (;;)
  2899         {
  2900           int c;
  2901           c = mpz_cmp (tu, tv);
  2902           if (c == 0)
  2903             break;
  2904 
  2905           if (c < 0)
  2906             {
  2907               /* tv = tv' + tu
  2908                *
  2909                * u = t0 tu + t1 (tv' + tu) = (t0 + t1) tu + t1 tv'
  2910                * v = s0 tu + s1 (tv' + tu) = (s0 + s1) tu + s1 tv' */
  2911 
  2912               mpz_sub (tv, tv, tu);
  2913               mpz_add (t0, t0, t1);
  2914               mpz_add (s0, s0, s1);
  2915 
  2916               shift = mpz_make_odd (tv);
  2917               mpz_mul_2exp (t1, t1, shift);
  2918               mpz_mul_2exp (s1, s1, shift);
  2919             }
  2920           else
  2921             {
  2922               mpz_sub (tu, tu, tv);
  2923               mpz_add (t1, t0, t1);
  2924               mpz_add (s1, s0, s1);
  2925 
  2926               shift = mpz_make_odd (tu);
  2927               mpz_mul_2exp (t0, t0, shift);
  2928               mpz_mul_2exp (s0, s0, shift);
  2929             }
  2930           power += shift;
  2931         }
  2932     }
  2933   else
  2934     mpz_setbit (t0, uz);
  2935 
  2936   /* Now tv = odd part of gcd, and -s0 and t0 are corresponding
  2937      cofactors. */
  2938 
  2939   mpz_mul_2exp (tv, tv, gz);
  2940   mpz_neg (s0, s0);
  2941 
  2942   /* 2^p g = s0 u + t0 v. Eliminate one factor of two at a time. To
  2943      adjust cofactors, we need u / g and v / g */
  2944 
  2945   mpz_divexact (s1, v, tv);
  2946   mpz_abs (s1, s1);
  2947   mpz_divexact (t1, u, tv);
  2948   mpz_abs (t1, t1);
  2949 
  2950   while (power-- > 0)
  2951     {
  2952       /* s0 u + t0 v = (s0 - v/g) u - (t0 + u/g) v */
  2953       if (mpz_odd_p (s0) || mpz_odd_p (t0))
  2954         {
  2955           mpz_sub (s0, s0, s1);
  2956           mpz_add (t0, t0, t1);
  2957         }
  2958       assert (mpz_even_p (t0) && mpz_even_p (s0));
  2959       mpz_tdiv_q_2exp (s0, s0, 1);
  2960       mpz_tdiv_q_2exp (t0, t0, 1);
  2961     }
  2962 
  2963   /* Arrange so that |s| < |u| / 2g */
  2964   mpz_add (s1, s0, s1);
  2965   if (mpz_cmpabs (s0, s1) > 0)
  2966     {
  2967       mpz_swap (s0, s1);
  2968       mpz_sub (t0, t0, t1);
  2969     }
  2970   if (u->_mp_size < 0)
  2971     mpz_neg (s0, s0);
  2972   if (v->_mp_size < 0)
  2973     mpz_neg (t0, t0);
  2974 
  2975   mpz_swap (g, tv);
  2976   if (s)
  2977     mpz_swap (s, s0);
  2978   if (t)
  2979     mpz_swap (t, t0);
  2980 
  2981   mpz_clear (tu);
  2982   mpz_clear (tv);
  2983   mpz_clear (s0);
  2984   mpz_clear (s1);
  2985   mpz_clear (t0);
  2986   mpz_clear (t1);
  2987 }
  2988 
  2989 void
  2990 mpz_lcm (mpz_t r, const mpz_t u, const mpz_t v)
  2991 {
  2992   mpz_t g;
  2993 
  2994   if (u->_mp_size == 0 || v->_mp_size == 0)
  2995     {
  2996       r->_mp_size = 0;
  2997       return;
  2998     }
  2999 
  3000   mpz_init (g);
  3001 
  3002   mpz_gcd (g, u, v);
  3003   mpz_divexact (g, u, g);
  3004   mpz_mul (r, g, v);
  3005 
  3006   mpz_clear (g);
  3007   mpz_abs (r, r);
  3008 }
  3009 
  3010 void
  3011 mpz_lcm_ui (mpz_t r, const mpz_t u, unsigned long v)
  3012 {
  3013   if (v == 0 || u->_mp_size == 0)
  3014     {
  3015       r->_mp_size = 0;
  3016       return;
  3017     }
  3018 
  3019   v /= mpz_gcd_ui (NULL, u, v);
  3020   mpz_mul_ui (r, u, v);
  3021 
  3022   mpz_abs (r, r);
  3023 }
  3024 
  3025 int
  3026 mpz_invert (mpz_t r, const mpz_t u, const mpz_t m)
  3027 {
  3028   mpz_t g, tr;
  3029   int invertible;
  3030 
  3031   if (u->_mp_size == 0 || mpz_cmpabs_ui (m, 1) <= 0)
  3032     return 0;
  3033 
  3034   mpz_init (g);
  3035   mpz_init (tr);
  3036 
  3037   mpz_gcdext (g, tr, NULL, u, m);
  3038   invertible = (mpz_cmp_ui (g, 1) == 0);
  3039 
  3040   if (invertible)
  3041     {
  3042       if (tr->_mp_size < 0)
  3043         {
  3044           if (m->_mp_size >= 0)
  3045             mpz_add (tr, tr, m);
  3046           else
  3047             mpz_sub (tr, tr, m);
  3048         }
  3049       mpz_swap (r, tr);
  3050     }
  3051 
  3052   mpz_clear (g);
  3053   mpz_clear (tr);
  3054   return invertible;
  3055 }
  3056 
  3057 
  3058 /* Higher level operations (sqrt, pow and root) */
  3059 
  3060 void
  3061 mpz_pow_ui (mpz_t r, const mpz_t b, unsigned long e)
  3062 {
  3063   unsigned long bit;
  3064   mpz_t tr;
  3065   mpz_init_set_ui (tr, 1);
  3066 
  3067   bit = GMP_ULONG_HIGHBIT;
  3068   do
  3069     {
  3070       mpz_mul (tr, tr, tr);
  3071       if (e & bit)
  3072         mpz_mul (tr, tr, b);
  3073       bit >>= 1;
  3074     }
  3075   while (bit > 0);
  3076 
  3077   mpz_swap (r, tr);
  3078   mpz_clear (tr);
  3079 }
  3080 
  3081 void
  3082 mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
  3083 {
  3084   mpz_t b;
  3085 
  3086   mpz_init_set_ui (b, blimb);
  3087   mpz_pow_ui (r, b, e);
  3088   mpz_clear (b);
  3089 }
  3090 
  3091 void
  3092 mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
  3093 {
  3094   mpz_t tr;
  3095   mpz_t base;
  3096   mp_size_t en, mn;
  3097   mp_srcptr mp;
  3098   struct gmp_div_inverse minv;
  3099   unsigned shift;
  3100   mp_ptr tp = NULL;
  3101 
  3102   en = GMP_ABS (e->_mp_size);
  3103   mn = GMP_ABS (m->_mp_size);
  3104   if (mn == 0)
  3105     gmp_die ("mpz_powm: Zero modulo.");
  3106 
  3107   if (en == 0)
  3108     {
  3109       mpz_set_ui (r, mpz_cmpabs_ui (m, 1));
  3110       return;
  3111     }
  3112 
  3113   mp = m->_mp_d;
  3114   mpn_div_qr_invert (&minv, mp, mn);
  3115   shift = minv.shift;
  3116 
  3117   if (shift > 0)
  3118     {
  3119       /* To avoid shifts, we do all our reductions, except the final
  3120          one, using a *normalized* m. */
  3121       minv.shift = 0;
  3122 
  3123       tp = gmp_alloc_limbs (mn);
  3124       gmp_assert_nocarry (mpn_lshift (tp, mp, mn, shift));
  3125       mp = tp;
  3126     }
  3127 
  3128   mpz_init (base);
  3129 
  3130   if (e->_mp_size < 0)
  3131     {
  3132       if (!mpz_invert (base, b, m))
  3133         gmp_die ("mpz_powm: Negative exponent and non-invertible base.");
  3134     }
  3135   else
  3136     {
  3137       mp_size_t bn;
  3138       mpz_abs (base, b);
  3139 
  3140       bn = base->_mp_size;
  3141       if (bn >= mn)
  3142         {
  3143           mpn_div_qr_preinv (NULL, base->_mp_d, base->_mp_size, mp, mn, &minv);
  3144           bn = mn;
  3145         }
  3146 
  3147       /* We have reduced the absolute value. Now take care of the
  3148          sign. Note that we get zero represented non-canonically as
  3149          m. */
  3150       if (b->_mp_size < 0)
  3151         {
  3152           mp_ptr bp = MPZ_REALLOC (base, mn);
  3153           gmp_assert_nocarry (mpn_sub (bp, mp, mn, bp, bn));
  3154           bn = mn;
  3155         }
  3156       base->_mp_size = mpn_normalized_size (base->_mp_d, bn);
  3157     }
  3158   mpz_init_set_ui (tr, 1);
  3159 
  3160   while (--en >= 0)
  3161     {
  3162       mp_limb_t w = e->_mp_d[en];
  3163       mp_limb_t bit;
  3164 
  3165       bit = GMP_LIMB_HIGHBIT;
  3166       do
  3167         {
  3168           mpz_mul (tr, tr, tr);
  3169           if (w & bit)
  3170             mpz_mul (tr, tr, base);
  3171           if (tr->_mp_size > mn)
  3172             {
  3173               mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  3174               tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  3175             }
  3176           bit >>= 1;
  3177         }
  3178       while (bit > 0);
  3179     }
  3180 
  3181   /* Final reduction */
  3182   if (tr->_mp_size >= mn)
  3183     {
  3184       minv.shift = shift;
  3185       mpn_div_qr_preinv (NULL, tr->_mp_d, tr->_mp_size, mp, mn, &minv);
  3186       tr->_mp_size = mpn_normalized_size (tr->_mp_d, mn);
  3187     }
  3188   if (tp)
  3189     gmp_free_limbs (tp, mn);
  3190 
  3191   mpz_swap (r, tr);
  3192   mpz_clear (tr);
  3193   mpz_clear (base);
  3194 }
  3195 
  3196 void
  3197 mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
  3198 {
  3199   mpz_t e;
  3200 
  3201   mpz_init_set_ui (e, elimb);
  3202   mpz_powm (r, b, e, m);
  3203   mpz_clear (e);
  3204 }
  3205 
  3206 /* x=trunc(y^(1/z)), r=y-x^z */
  3207 void
  3208 mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
  3209 {
  3210   int sgn;
  3211   mp_bitcnt_t bc;
  3212   mpz_t t, u;
  3213 
  3214   sgn = y->_mp_size < 0;
  3215   if ((~z & sgn) != 0)
  3216     gmp_die ("mpz_rootrem: Negative argument, with even root.");
  3217   if (z == 0)
  3218     gmp_die ("mpz_rootrem: Zeroth root.");
  3219 
  3220   if (mpz_cmpabs_ui (y, 1) <= 0) {
  3221     if (x)
  3222       mpz_set (x, y);
  3223     if (r)
  3224       r->_mp_size = 0;
  3225     return;
  3226   }
  3227 
  3228   mpz_init (u);
  3229   mpz_init (t);
  3230   bc = (mpz_sizeinbase (y, 2) - 1) / z + 1;
  3231   mpz_setbit (t, bc);
  3232 
  3233   if (z == 2) /* simplify sqrt loop: z-1 == 1 */
  3234     do {
  3235       mpz_swap (u, t);                  /* u = x */
  3236       mpz_tdiv_q (t, y, u);             /* t = y/x */
  3237       mpz_add (t, t, u);                /* t = y/x + x */
  3238       mpz_tdiv_q_2exp (t, t, 1);        /* x'= (y/x + x)/2 */
  3239     } while (mpz_cmpabs (t, u) < 0);    /* |x'| < |x| */
  3240   else /* z != 2 */ {
  3241     mpz_t v;
  3242 
  3243     mpz_init (v);
  3244     if (sgn)
  3245       mpz_neg (t, t);
  3246 
  3247     do {
  3248       mpz_swap (u, t);                  /* u = x */
  3249       mpz_pow_ui (t, u, z - 1);         /* t = x^(z-1) */
  3250       mpz_tdiv_q (t, y, t);             /* t = y/x^(z-1) */
  3251       mpz_mul_ui (v, u, z - 1);         /* v = x*(z-1) */
  3252       mpz_add (t, t, v);                /* t = y/x^(z-1) + x*(z-1) */
  3253       mpz_tdiv_q_ui (t, t, z);          /* x'=(y/x^(z-1) + x*(z-1))/z */
  3254     } while (mpz_cmpabs (t, u) < 0);    /* |x'| < |x| */
  3255 
  3256     mpz_clear (v);
  3257   }
  3258 
  3259   if (r) {
  3260     mpz_pow_ui (t, u, z);
  3261     mpz_sub (r, y, t);
  3262   }
  3263   if (x)
  3264     mpz_swap (x, u);
  3265   mpz_clear (u);
  3266   mpz_clear (t);
  3267 }
  3268 
  3269 int
  3270 mpz_root (mpz_t x, const mpz_t y, unsigned long z)
  3271 {
  3272   int res;
  3273   mpz_t r;
  3274 
  3275   mpz_init (r);
  3276   mpz_rootrem (x, r, y, z);
  3277   res = r->_mp_size == 0;
  3278   mpz_clear (r);
  3279 
  3280   return res;
  3281 }
  3282 
  3283 /* Compute s = floor(sqrt(u)) and r = u - s^2. Allows r == NULL */
  3284 void
  3285 mpz_sqrtrem (mpz_t s, mpz_t r, const mpz_t u)
  3286 {
  3287   mpz_rootrem (s, r, u, 2);
  3288 }
  3289 
  3290 void
  3291 mpz_sqrt (mpz_t s, const mpz_t u)
  3292 {
  3293   mpz_rootrem (s, NULL, u, 2);
  3294 }
  3295 
  3296 int
  3297 mpz_perfect_square_p (const mpz_t u)
  3298 {
  3299   if (u->_mp_size <= 0)
  3300     return (u->_mp_size == 0);
  3301   else
  3302     return mpz_root (NULL, u, 2);
  3303 }
  3304 
  3305 int
  3306 mpn_perfect_square_p (mp_srcptr p, mp_size_t n)
  3307 {
  3308   mpz_t t;
  3309 
  3310   assert (n > 0);
  3311   assert (p [n-1] != 0);
  3312   return mpz_root (NULL, mpz_roinit_normal_n (t, p, n), 2);
  3313 }
  3314 
  3315 mp_size_t
  3316 mpn_sqrtrem (mp_ptr sp, mp_ptr rp, mp_srcptr p, mp_size_t n)
  3317 {
  3318   mpz_t s, r, u;
  3319   mp_size_t res;
  3320 
  3321   assert (n > 0);
  3322   assert (p [n-1] != 0);
  3323 
  3324   mpz_init (r);
  3325   mpz_init (s);
  3326   mpz_rootrem (s, r, mpz_roinit_normal_n (u, p, n), 2);
  3327 
  3328   assert (s->_mp_size == (n+1)/2);
  3329   mpn_copyd (sp, s->_mp_d, s->_mp_size);
  3330   mpz_clear (s);
  3331   res = r->_mp_size;
  3332   if (rp)
  3333     mpn_copyd (rp, r->_mp_d, res);
  3334   mpz_clear (r);
  3335   return res;
  3336 }
  3337 
  3338 /* Combinatorics */
  3339 
  3340 void
  3341 mpz_mfac_uiui (mpz_t x, unsigned long n, unsigned long m)
  3342 {
  3343   mpz_set_ui (x, n + (n == 0));
  3344   if (m + 1 < 2) return;
  3345   while (n > m + 1)
  3346     mpz_mul_ui (x, x, n -= m);
  3347 }
  3348 
  3349 void
  3350 mpz_2fac_ui (mpz_t x, unsigned long n)
  3351 {
  3352   mpz_mfac_uiui (x, n, 2);
  3353 }
  3354 
  3355 void
  3356 mpz_fac_ui (mpz_t x, unsigned long n)
  3357 {
  3358   mpz_mfac_uiui (x, n, 1);
  3359 }
  3360 
  3361 void
  3362 mpz_bin_uiui (mpz_t r, unsigned long n, unsigned long k)
  3363 {
  3364   mpz_t t;
  3365 
  3366   mpz_set_ui (r, k <= n);
  3367 
  3368   if (k > (n >> 1))
  3369     k = (k <= n) ? n - k : 0;
  3370 
  3371   mpz_init (t);
  3372   mpz_fac_ui (t, k);
  3373 
  3374   for (; k > 0; --k)
  3375     mpz_mul_ui (r, r, n--);
  3376 
  3377   mpz_divexact (r, r, t);
  3378   mpz_clear (t);
  3379 }
  3380 
  3381 
  3382 /* Primality testing */
  3383 
  3384 /* Computes Kronecker (a/b) with odd b, a!=0 and GCD(a,b) = 1 */
  3385 /* Adapted from JACOBI_BASE_METHOD==4 in mpn/generic/jacbase.c */
  3386 static int
  3387 gmp_jacobi_coprime (mp_limb_t a, mp_limb_t b)
  3388 {
  3389   int c, bit = 0;
  3390 
  3391   assert (b & 1);
  3392   assert (a != 0);
  3393   /* assert (mpn_gcd_11 (a, b) == 1); */
  3394 
  3395   /* Below, we represent a and b shifted right so that the least
  3396      significant one bit is implicit. */
  3397   b >>= 1;
  3398 
  3399   gmp_ctz(c, a);
  3400   a >>= 1;
  3401 
  3402   for (;;)
  3403     {
  3404       a >>= c;
  3405       /* (2/b) = -1 if b = 3 or 5 mod 8 */
  3406       bit ^= c & (b ^ (b >> 1));
  3407       if (a < b)
  3408         {
  3409           if (a == 0)
  3410             return bit & 1 ? -1 : 1;
  3411           bit ^= a & b;
  3412           a = b - a;
  3413           b -= a;
  3414         }
  3415       else
  3416         {
  3417           a -= b;
  3418           assert (a != 0);
  3419         }
  3420 
  3421       gmp_ctz(c, a);
  3422       ++c;
  3423     }
  3424 }
  3425 
  3426 static void
  3427 gmp_lucas_step_k_2k (mpz_t V, mpz_t Qk, const mpz_t n)
  3428 {
  3429   mpz_mod (Qk, Qk, n);
  3430   /* V_{2k} <- V_k ^ 2 - 2Q^k */
  3431   mpz_mul (V, V, V);
  3432   mpz_submul_ui (V, Qk, 2);
  3433   mpz_tdiv_r (V, V, n);
  3434   /* Q^{2k} = (Q^k)^2 */
  3435   mpz_mul (Qk, Qk, Qk);
  3436 }
  3437 
  3438 /* Computes V_k, Q^k (mod n) for the Lucas' sequence */
  3439 /* with P=1, Q=Q; k = (n>>b0)|1. */
  3440 /* Requires an odd n > 4; b0 > 0; -2*Q must not overflow a long */
  3441 /* Returns (U_k == 0) and sets V=V_k and Qk=Q^k. */
  3442 static int
  3443 gmp_lucas_mod (mpz_t V, mpz_t Qk, long Q,
  3444                mp_bitcnt_t b0, const mpz_t n)
  3445 {
  3446   mp_bitcnt_t bs;
  3447   mpz_t U;
  3448   int res;
  3449 
  3450   assert (b0 > 0);
  3451   assert (Q <= - (LONG_MIN / 2));
  3452   assert (Q >= - (LONG_MAX / 2));
  3453   assert (mpz_cmp_ui (n, 4) > 0);
  3454   assert (mpz_odd_p (n));
  3455 
  3456   mpz_init_set_ui (U, 1); /* U1 = 1 */
  3457   mpz_set_ui (V, 1); /* V1 = 1 */
  3458   mpz_set_si (Qk, Q);
  3459 
  3460   for (bs = mpz_sizeinbase (n, 2) - 1; --bs >= b0;)
  3461     {
  3462       /* U_{2k} <- U_k * V_k */
  3463       mpz_mul (U, U, V);
  3464       /* V_{2k} <- V_k ^ 2 - 2Q^k */
  3465       /* Q^{2k} = (Q^k)^2 */
  3466       gmp_lucas_step_k_2k (V, Qk, n);
  3467 
  3468       /* A step k->k+1 is performed if the bit in $n$ is 1      */
  3469       /* mpz_tstbit(n,bs) or the bit is 0 in $n$ but    */
  3470       /* should be 1 in $n+1$ (bs == b0)                        */
  3471       if (b0 == bs || mpz_tstbit (n, bs))
  3472         {
  3473           /* Q^{k+1} <- Q^k * Q */
  3474           mpz_mul_si (Qk, Qk, Q);
  3475           /* U_{k+1} <- (U_k + V_k) / 2 */
  3476           mpz_swap (U, V); /* Keep in V the old value of U_k */
  3477           mpz_add (U, U, V);
  3478           /* We have to compute U/2, so we need an even value, */
  3479           /* equivalent (mod n) */
  3480           if (mpz_odd_p (U))
  3481             mpz_add (U, U, n);
  3482           mpz_tdiv_q_2exp (U, U, 1);
  3483           /* V_{k+1} <-(D*U_k + V_k) / 2 =
  3484                         U_{k+1} + (D-1)/2*U_k = U_{k+1} - 2Q*U_k */
  3485           mpz_mul_si (V, V, -2*Q);
  3486           mpz_add (V, U, V);
  3487           mpz_tdiv_r (V, V, n);
  3488         }
  3489       mpz_tdiv_r (U, U, n);
  3490     }
  3491 
  3492   res = U->_mp_size == 0;
  3493   mpz_clear (U);
  3494   return res;
  3495 }
  3496 
  3497 /* Performs strong Lucas' test on x, with parameters suggested */
  3498 /* for the BPSW test. Qk is only passed to recycle a variable. */
  3499 /* Requires GCD (x,6) = 1.*/
  3500 static int
  3501 gmp_stronglucas (const mpz_t x, mpz_t Qk)
  3502 {
  3503   mp_bitcnt_t b0;
  3504   mpz_t V, n;
  3505   mp_limb_t maxD, D; /* The absolute value is stored. */
  3506   long Q;
  3507   mp_limb_t tl;
  3508 
  3509   /* Test on the absolute value. */
  3510   mpz_roinit_normal_n (n, x->_mp_d, GMP_ABS (x->_mp_size));
  3511 
  3512   assert (mpz_odd_p (n));
  3513   /* assert (mpz_gcd_ui (NULL, n, 6) == 1); */
  3514   if (mpz_root (Qk, n, 2))
  3515     return 0; /* A square is composite. */
  3516 
  3517   /* Check Ds up to square root (in case, n is prime)
  3518      or avoid overflows */
  3519   maxD = (Qk->_mp_size == 1) ? Qk->_mp_d [0] - 1 : GMP_LIMB_MAX;
  3520 
  3521   D = 3;
  3522   /* Search a D such that (D/n) = -1 in the sequence 5,-7,9,-11,.. */
  3523   /* For those Ds we have (D/n) = (n/|D|) */
  3524   do
  3525     {
  3526       if (D >= maxD)
  3527         return 1 + (D != GMP_LIMB_MAX); /* (1 + ! ~ D) */
  3528       D += 2;
  3529       tl = mpz_tdiv_ui (n, D);
  3530       if (tl == 0)
  3531         return 0;
  3532     }
  3533   while (gmp_jacobi_coprime (tl, D) == 1);
  3534 
  3535   mpz_init (V);
  3536 
  3537   /* n-(D/n) = n+1 = d*2^{b0}, with d = (n>>b0) | 1 */
  3538   b0 = mpn_common_scan (~ n->_mp_d[0], 0, n->_mp_d, n->_mp_size, GMP_LIMB_MAX);
  3539   /* b0 = mpz_scan0 (n, 0); */
  3540 
  3541   /* D= P^2 - 4Q; P = 1; Q = (1-D)/4 */
  3542   Q = (D & 2) ? (long) (D >> 2) + 1 : -(long) (D >> 2);
  3543 
  3544   if (! gmp_lucas_mod (V, Qk, Q, b0, n))        /* If Ud != 0 */
  3545     while (V->_mp_size != 0 && --b0 != 0)       /* while Vk != 0 */
  3546       /* V <- V ^ 2 - 2Q^k */
  3547       /* Q^{2k} = (Q^k)^2 */
  3548       gmp_lucas_step_k_2k (V, Qk, n);
  3549 
  3550   mpz_clear (V);
  3551   return (b0 != 0);
  3552 }
  3553 
  3554 static int
  3555 gmp_millerrabin (const mpz_t n, const mpz_t nm1, mpz_t y,
  3556                  const mpz_t q, mp_bitcnt_t k)
  3557 {
  3558   assert (k > 0);
  3559 
  3560   /* Caller must initialize y to the base. */
  3561   mpz_powm (y, y, q, n);
  3562 
  3563   if (mpz_cmp_ui (y, 1) == 0 || mpz_cmp (y, nm1) == 0)
  3564     return 1;
  3565 
  3566   while (--k > 0)
  3567     {
  3568       mpz_powm_ui (y, y, 2, n);
  3569       if (mpz_cmp (y, nm1) == 0)
  3570         return 1;
  3571     }
  3572   return 0;
  3573 }
  3574 
  3575 /* This product is 0xc0cfd797, and fits in 32 bits. */
  3576 #define GMP_PRIME_PRODUCT \
  3577   (3UL*5UL*7UL*11UL*13UL*17UL*19UL*23UL*29UL)
  3578 
  3579 /* Bit (p+1)/2 is set, for each odd prime <= 61 */
  3580 #define GMP_PRIME_MASK 0xc96996dcUL
  3581 
  3582 int
  3583 mpz_probab_prime_p (const mpz_t n, int reps)
  3584 {
  3585   mpz_t nm1;
  3586   mpz_t q;
  3587   mpz_t y;
  3588   mp_bitcnt_t k;
  3589   int is_prime;
  3590   int j;
  3591 
  3592   /* Note that we use the absolute value of n only, for compatibility
  3593      with the real GMP. */
  3594   if (mpz_even_p (n))
  3595     return (mpz_cmpabs_ui (n, 2) == 0) ? 2 : 0;
  3596 
  3597   /* Above test excludes n == 0 */
  3598   assert (n->_mp_size != 0);
  3599 
  3600   if (mpz_cmpabs_ui (n, 64) < 0)
  3601     return (GMP_PRIME_MASK >> (n->_mp_d[0] >> 1)) & 2;
  3602 
  3603   if (mpz_gcd_ui (NULL, n, GMP_PRIME_PRODUCT) != 1)
  3604     return 0;
  3605 
  3606   /* All prime factors are >= 31. */
  3607   if (mpz_cmpabs_ui (n, 31*31) < 0)
  3608     return 2;
  3609 
  3610   mpz_init (nm1);
  3611   mpz_init (q);
  3612 
  3613   /* Find q and k, where q is odd and n = 1 + 2**k * q.  */
  3614   mpz_abs (nm1, n);
  3615   nm1->_mp_d[0] -= 1;
  3616   /* Count trailing zeros, equivalent to mpn_scan1, because we know that there is a 1 */
  3617   k = mpn_scan1 (nm1->_mp_d, 0);
  3618   mpz_tdiv_q_2exp (q, nm1, k);
  3619 
  3620   /* BPSW test */
  3621   mpz_init_set_ui (y, 2);
  3622   is_prime = gmp_millerrabin (n, nm1, y, q, k) && gmp_stronglucas (n, y);
  3623   reps -= 24; /* skip the first 24 repetitions */
  3624 
  3625   /* Use Miller-Rabin, with a deterministic sequence of bases, a[j] =
  3626      j^2 + j + 41 using Euler's polynomial. We potentially stop early,
  3627      if a[j] >= n - 1. Since n >= 31*31, this can happen only if reps >
  3628      30 (a[30] == 971 > 31*31 == 961). */
  3629 
  3630   for (j = 0; is_prime & (j < reps); j++)
  3631     {
  3632       mpz_set_ui (y, (unsigned long) j*j+j+41);
  3633       if (mpz_cmp (y, nm1) >= 0)
  3634         {
  3635           /* Don't try any further bases. This "early" break does not affect
  3636              the result for any reasonable reps value (<=5000 was tested) */
  3637           assert (j >= 30);
  3638           break;
  3639         }
  3640       is_prime = gmp_millerrabin (n, nm1, y, q, k);
  3641     }
  3642   mpz_clear (nm1);
  3643   mpz_clear (q);
  3644   mpz_clear (y);
  3645 
  3646   return is_prime;
  3647 }
  3648 
  3649 
  3650 /* Logical operations and bit manipulation. */
  3651 
  3652 /* Numbers are treated as if represented in two's complement (and
  3653    infinitely sign extended). For a negative values we get the two's
  3654    complement from -x = ~x + 1, where ~ is bitwise complement.
  3655    Negation transforms
  3656 
  3657      xxxx10...0
  3658 
  3659    into
  3660 
  3661      yyyy10...0
  3662 
  3663    where yyyy is the bitwise complement of xxxx. So least significant
  3664    bits, up to and including the first one bit, are unchanged, and
  3665    the more significant bits are all complemented.
  3666 
  3667    To change a bit from zero to one in a negative number, subtract the
  3668    corresponding power of two from the absolute value. This can never
  3669    underflow. To change a bit from one to zero, add the corresponding
  3670    power of two, and this might overflow. E.g., if x = -001111, the
  3671    two's complement is 110001. Clearing the least significant bit, we
  3672    get two's complement 110000, and -010000. */
  3673 
  3674 int
  3675 mpz_tstbit (const mpz_t d, mp_bitcnt_t bit_index)
  3676 {
  3677   mp_size_t limb_index;
  3678   unsigned shift;
  3679   mp_size_t ds;
  3680   mp_size_t dn;
  3681   mp_limb_t w;
  3682   int bit;
  3683 
  3684   ds = d->_mp_size;
  3685   dn = GMP_ABS (ds);
  3686   limb_index = bit_index / GMP_LIMB_BITS;
  3687   if (limb_index >= dn)
  3688     return ds < 0;
  3689 
  3690   shift = bit_index % GMP_LIMB_BITS;
  3691   w = d->_mp_d[limb_index];
  3692   bit = (w >> shift) & 1;
  3693 
  3694   if (ds < 0)
  3695     {
  3696       /* d < 0. Check if any of the bits below is set: If so, our bit
  3697          must be complemented. */
  3698       if (shift > 0 && (mp_limb_t) (w << (GMP_LIMB_BITS - shift)) > 0)
  3699         return bit ^ 1;
  3700       while (--limb_index >= 0)
  3701         if (d->_mp_d[limb_index] > 0)
  3702           return bit ^ 1;
  3703     }
  3704   return bit;
  3705 }
  3706 
  3707 static void
  3708 mpz_abs_add_bit (mpz_t d, mp_bitcnt_t bit_index)
  3709 {
  3710   mp_size_t dn, limb_index;
  3711   mp_limb_t bit;
  3712   mp_ptr dp;
  3713 
  3714   dn = GMP_ABS (d->_mp_size);
  3715 
  3716   limb_index = bit_index / GMP_LIMB_BITS;
  3717   bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  3718 
  3719   if (limb_index >= dn)
  3720     {
  3721       mp_size_t i;
  3722       /* The bit should be set outside of the end of the number.
  3723          We have to increase the size of the number. */
  3724       dp = MPZ_REALLOC (d, limb_index + 1);
  3725 
  3726       dp[limb_index] = bit;
  3727       for (i = dn; i < limb_index; i++)
  3728         dp[i] = 0;
  3729       dn = limb_index + 1;
  3730     }
  3731   else
  3732     {
  3733       mp_limb_t cy;
  3734 
  3735       dp = d->_mp_d;
  3736 
  3737       cy = mpn_add_1 (dp + limb_index, dp + limb_index, dn - limb_index, bit);
  3738       if (cy > 0)
  3739         {
  3740           dp = MPZ_REALLOC (d, dn + 1);
  3741           dp[dn++] = cy;
  3742         }
  3743     }
  3744 
  3745   d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  3746 }
  3747 
  3748 static void
  3749 mpz_abs_sub_bit (mpz_t d, mp_bitcnt_t bit_index)
  3750 {
  3751   mp_size_t dn, limb_index;
  3752   mp_ptr dp;
  3753   mp_limb_t bit;
  3754 
  3755   dn = GMP_ABS (d->_mp_size);
  3756   dp = d->_mp_d;
  3757 
  3758   limb_index = bit_index / GMP_LIMB_BITS;
  3759   bit = (mp_limb_t) 1 << (bit_index % GMP_LIMB_BITS);
  3760 
  3761   assert (limb_index < dn);
  3762 
  3763   gmp_assert_nocarry (mpn_sub_1 (dp + limb_index, dp + limb_index,
  3764                                  dn - limb_index, bit));
  3765   dn = mpn_normalized_size (dp, dn);
  3766   d->_mp_size = (d->_mp_size < 0) ? - dn : dn;
  3767 }
  3768 
  3769 void
  3770 mpz_setbit (mpz_t d, mp_bitcnt_t bit_index)
  3771 {
  3772   if (!mpz_tstbit (d, bit_index))
  3773     {
  3774       if (d->_mp_size >= 0)
  3775         mpz_abs_add_bit (d, bit_index);
  3776       else
  3777         mpz_abs_sub_bit (d, bit_index);
  3778     }
  3779 }
  3780 
  3781 void
  3782 mpz_clrbit (mpz_t d, mp_bitcnt_t bit_index)
  3783 {
  3784   if (mpz_tstbit (d, bit_index))
  3785     {
  3786       if (d->_mp_size >= 0)
  3787         mpz_abs_sub_bit (d, bit_index);
  3788       else
  3789         mpz_abs_add_bit (d, bit_index);
  3790     }
  3791 }
  3792 
  3793 void
  3794 mpz_combit (mpz_t d, mp_bitcnt_t bit_index)
  3795 {
  3796   if (mpz_tstbit (d, bit_index) ^ (d->_mp_size < 0))
  3797     mpz_abs_sub_bit (d, bit_index);
  3798   else
  3799     mpz_abs_add_bit (d, bit_index);
  3800 }
  3801 
  3802 void
  3803 mpz_com (mpz_t r, const mpz_t u)
  3804 {
  3805   mpz_add_ui (r, u, 1);
  3806   mpz_neg (r, r);
  3807 }
  3808 
  3809 void
  3810 mpz_and (mpz_t r, const mpz_t u, const mpz_t v)
  3811 {
  3812   mp_size_t un, vn, rn, i;
  3813   mp_ptr up, vp, rp;
  3814 
  3815   mp_limb_t ux, vx, rx;
  3816   mp_limb_t uc, vc, rc;
  3817   mp_limb_t ul, vl, rl;
  3818 
  3819   un = GMP_ABS (u->_mp_size);
  3820   vn = GMP_ABS (v->_mp_size);
  3821   if (un < vn)
  3822     {
  3823       MPZ_SRCPTR_SWAP (u, v);
  3824       MP_SIZE_T_SWAP (un, vn);
  3825     }
  3826   if (vn == 0)
  3827     {
  3828       r->_mp_size = 0;
  3829       return;
  3830     }
  3831 
  3832   uc = u->_mp_size < 0;
  3833   vc = v->_mp_size < 0;
  3834   rc = uc & vc;
  3835 
  3836   ux = -uc;
  3837   vx = -vc;
  3838   rx = -rc;
  3839 
  3840   /* If the smaller input is positive, higher limbs don't matter. */
  3841   rn = vx ? un : vn;
  3842 
  3843   rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
  3844 
  3845   up = u->_mp_d;
  3846   vp = v->_mp_d;
  3847 
  3848   i = 0;
  3849   do
  3850     {
  3851       ul = (up[i] ^ ux) + uc;
  3852       uc = ul < uc;
  3853 
  3854       vl = (vp[i] ^ vx) + vc;
  3855       vc = vl < vc;
  3856 
  3857       rl = ( (ul & vl) ^ rx) + rc;
  3858       rc = rl < rc;
  3859       rp[i] = rl;
  3860     }
  3861   while (++i < vn);
  3862   assert (vc == 0);
  3863 
  3864   for (; i < rn; i++)
  3865     {
  3866       ul = (up[i] ^ ux) + uc;
  3867       uc = ul < uc;
  3868 
  3869       rl = ( (ul & vx) ^ rx) + rc;
  3870       rc = rl < rc;
  3871       rp[i] = rl;
  3872     }
  3873   if (rc)
  3874     rp[rn++] = rc;
  3875   else
  3876     rn = mpn_normalized_size (rp, rn);
  3877 
  3878   r->_mp_size = rx ? -rn : rn;
  3879 }
  3880 
  3881 void
  3882 mpz_ior (mpz_t r, const mpz_t u, const mpz_t v)
  3883 {
  3884   mp_size_t un, vn, rn, i;
  3885   mp_ptr up, vp, rp;
  3886 
  3887   mp_limb_t ux, vx, rx;
  3888   mp_limb_t uc, vc, rc;
  3889   mp_limb_t ul, vl, rl;
  3890 
  3891   un = GMP_ABS (u->_mp_size);
  3892   vn = GMP_ABS (v->_mp_size);
  3893   if (un < vn)
  3894     {
  3895       MPZ_SRCPTR_SWAP (u, v);
  3896       MP_SIZE_T_SWAP (un, vn);
  3897     }
  3898   if (vn == 0)
  3899     {
  3900       mpz_set (r, u);
  3901       return;
  3902     }
  3903 
  3904   uc = u->_mp_size < 0;
  3905   vc = v->_mp_size < 0;
  3906   rc = uc | vc;
  3907 
  3908   ux = -uc;
  3909   vx = -vc;
  3910   rx = -rc;
  3911 
  3912   /* If the smaller input is negative, by sign extension higher limbs
  3913      don't matter. */
  3914   rn = vx ? vn : un;
  3915 
  3916   rp = MPZ_REALLOC (r, rn + (mp_size_t) rc);
  3917 
  3918   up = u->_mp_d;
  3919   vp = v->_mp_d;
  3920 
  3921   i = 0;
  3922   do
  3923     {
  3924       ul = (up[i] ^ ux) + uc;
  3925       uc = ul < uc;
  3926 
  3927       vl = (vp[i] ^ vx) + vc;
  3928       vc = vl < vc;
  3929 
  3930       rl = ( (ul | vl) ^ rx) + rc;
  3931       rc = rl < rc;
  3932       rp[i] = rl;
  3933     }
  3934   while (++i < vn);
  3935   assert (vc == 0);
  3936 
  3937   for (; i < rn; i++)
  3938     {
  3939       ul = (up[i] ^ ux) + uc;
  3940       uc = ul < uc;
  3941 
  3942       rl = ( (ul | vx) ^ rx) + rc;
  3943       rc = rl < rc;
  3944       rp[i] = rl;
  3945     }
  3946   if (rc)
  3947     rp[rn++] = rc;
  3948   else
  3949     rn = mpn_normalized_size (rp, rn);
  3950 
  3951   r->_mp_size = rx ? -rn : rn;
  3952 }
  3953 
  3954 void
  3955 mpz_xor (mpz_t r, const mpz_t u, const mpz_t v)
  3956 {
  3957   mp_size_t un, vn, i;
  3958   mp_ptr up, vp, rp;
  3959 
  3960   mp_limb_t ux, vx, rx;
  3961   mp_limb_t uc, vc, rc;
  3962   mp_limb_t ul, vl, rl;
  3963 
  3964   un = GMP_ABS (u->_mp_size);
  3965   vn = GMP_ABS (v->_mp_size);
  3966   if (un < vn)
  3967     {
  3968       MPZ_SRCPTR_SWAP (u, v);
  3969       MP_SIZE_T_SWAP (un, vn);
  3970     }
  3971   if (vn == 0)
  3972     {
  3973       mpz_set (r, u);
  3974       return;
  3975     }
  3976 
  3977   uc = u->_mp_size < 0;
  3978   vc = v->_mp_size < 0;
  3979   rc = uc ^ vc;
  3980 
  3981   ux = -uc;
  3982   vx = -vc;
  3983   rx = -rc;
  3984 
  3985   rp = MPZ_REALLOC (r, un + (mp_size_t) rc);
  3986 
  3987   up = u->_mp_d;
  3988   vp = v->_mp_d;
  3989 
  3990   i = 0;
  3991   do
  3992     {
  3993       ul = (up[i] ^ ux) + uc;
  3994       uc = ul < uc;
  3995 
  3996       vl = (vp[i] ^ vx) + vc;
  3997       vc = vl < vc;
  3998 
  3999       rl = (ul ^ vl ^ rx) + rc;
  4000       rc = rl < rc;
  4001       rp[i] = rl;
  4002     }
  4003   while (++i < vn);
  4004   assert (vc == 0);
  4005 
  4006   for (; i < un; i++)
  4007     {
  4008       ul = (up[i] ^ ux) + uc;
  4009       uc = ul < uc;
  4010 
  4011       rl = (ul ^ ux) + rc;
  4012       rc = rl < rc;
  4013       rp[i] = rl;
  4014     }
  4015   if (rc)
  4016     rp[un++] = rc;
  4017   else
  4018     un = mpn_normalized_size (rp, un);
  4019 
  4020   r->_mp_size = rx ? -un : un;
  4021 }
  4022 
  4023 static unsigned
  4024 gmp_popcount_limb (mp_limb_t x)
  4025 {
  4026   unsigned c;
  4027 
  4028   /* Do 16 bits at a time, to avoid limb-sized constants. */
  4029   int LOCAL_SHIFT_BITS = 16;
  4030   for (c = 0; x > 0;)
  4031     {
  4032       unsigned w = x - ((x >> 1) & 0x5555);
  4033       w = ((w >> 2) & 0x3333) + (w & 0x3333);
  4034       w =  (w >> 4) + w;
  4035       w = ((w >> 8) & 0x000f) + (w & 0x000f);
  4036       c += w;
  4037       if (GMP_LIMB_BITS > LOCAL_SHIFT_BITS)
  4038         x >>= LOCAL_SHIFT_BITS;
  4039       else
  4040         x = 0;
  4041     }
  4042   return c;
  4043 }
  4044 
  4045 mp_bitcnt_t
  4046 mpn_popcount (mp_srcptr p, mp_size_t n)
  4047 {
  4048   mp_size_t i;
  4049   mp_bitcnt_t c;
  4050 
  4051   for (c = 0, i = 0; i < n; i++)
  4052     c += gmp_popcount_limb (p[i]);
  4053 
  4054   return c;
  4055 }
  4056 
  4057 mp_bitcnt_t
  4058 mpz_popcount (const mpz_t u)
  4059 {
  4060   mp_size_t un;
  4061 
  4062   un = u->_mp_size;
  4063 
  4064   if (un < 0)
  4065     return ~(mp_bitcnt_t) 0;
  4066 
  4067   return mpn_popcount (u->_mp_d, un);
  4068 }
  4069 
  4070 mp_bitcnt_t
  4071 mpz_hamdist (const mpz_t u, const mpz_t v)
  4072 {
  4073   mp_size_t un, vn, i;
  4074   mp_limb_t uc, vc, ul, vl, comp;
  4075   mp_srcptr up, vp;
  4076   mp_bitcnt_t c;
  4077 
  4078   un = u->_mp_size;
  4079   vn = v->_mp_size;
  4080 
  4081   if ( (un ^ vn) < 0)
  4082     return ~(mp_bitcnt_t) 0;
  4083 
  4084   comp = - (uc = vc = (un < 0));
  4085   if (uc)
  4086     {
  4087       assert (vn < 0);
  4088       un = -un;
  4089       vn = -vn;
  4090     }
  4091 
  4092   up = u->_mp_d;
  4093   vp = v->_mp_d;
  4094 
  4095   if (un < vn)
  4096     MPN_SRCPTR_SWAP (up, un, vp, vn);
  4097 
  4098   for (i = 0, c = 0; i < vn; i++)
  4099     {
  4100       ul = (up[i] ^ comp) + uc;
  4101       uc = ul < uc;
  4102 
  4103       vl = (vp[i] ^ comp) + vc;
  4104       vc = vl < vc;
  4105 
  4106       c += gmp_popcount_limb (ul ^ vl);
  4107     }
  4108   assert (vc == 0);
  4109 
  4110   for (; i < un; i++)
  4111     {
  4112       ul = (up[i] ^ comp) + uc;
  4113       uc = ul < uc;
  4114 
  4115       c += gmp_popcount_limb (ul ^ comp);
  4116     }
  4117 
  4118   return c;
  4119 }
  4120 
  4121 mp_bitcnt_t
  4122 mpz_scan1 (const mpz_t u, mp_bitcnt_t starting_bit)
  4123 {
  4124   mp_ptr up;
  4125   mp_size_t us, un, i;
  4126   mp_limb_t limb, ux;
  4127 
  4128   us = u->_mp_size;
  4129   un = GMP_ABS (us);
  4130   i = starting_bit / GMP_LIMB_BITS;
  4131 
  4132   /* Past the end there's no 1 bits for u>=0, or an immediate 1 bit
  4133      for u<0. Notice this test picks up any u==0 too. */
  4134   if (i >= un)
  4135     return (us >= 0 ? ~(mp_bitcnt_t) 0 : starting_bit);
  4136 
  4137   up = u->_mp_d;
  4138   ux = 0;
  4139   limb = up[i];
  4140 
  4141   if (starting_bit != 0)
  4142     {
  4143       if (us < 0)
  4144         {
  4145           ux = mpn_zero_p (up, i);
  4146           limb = ~ limb + ux;
  4147           ux = - (mp_limb_t) (limb >= ux);
  4148         }
  4149 
  4150       /* Mask to 0 all bits before starting_bit, thus ignoring them. */
  4151       limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
  4152     }
  4153 
  4154   return mpn_common_scan (limb, i, up, un, ux);
  4155 }
  4156 
  4157 mp_bitcnt_t
  4158 mpz_scan0 (const mpz_t u, mp_bitcnt_t starting_bit)
  4159 {
  4160   mp_ptr up;
  4161   mp_size_t us, un, i;
  4162   mp_limb_t limb, ux;
  4163 
  4164   us = u->_mp_size;
  4165   ux = - (mp_limb_t) (us >= 0);
  4166   un = GMP_ABS (us);
  4167   i = starting_bit / GMP_LIMB_BITS;
  4168 
  4169   /* When past end, there's an immediate 0 bit for u>=0, or no 0 bits for
  4170      u<0.  Notice this test picks up all cases of u==0 too. */
  4171   if (i >= un)
  4172     return (ux ? starting_bit : ~(mp_bitcnt_t) 0);
  4173 
  4174   up = u->_mp_d;
  4175   limb = up[i] ^ ux;
  4176 
  4177   if (ux == 0)
  4178     limb -= mpn_zero_p (up, i); /* limb = ~(~limb + zero_p) */
  4179 
  4180   /* Mask all bits before starting_bit, thus ignoring them. */
  4181   limb &= GMP_LIMB_MAX << (starting_bit % GMP_LIMB_BITS);
  4182 
  4183   return mpn_common_scan (limb, i, up, un, ux);
  4184 }
  4185 
  4186 
  4187 /* MPZ base conversion. */
  4188 
  4189 size_t
  4190 mpz_sizeinbase (const mpz_t u, int base)
  4191 {
  4192   mp_size_t un, tn;
  4193   mp_srcptr up;
  4194   mp_ptr tp;
  4195   mp_bitcnt_t bits;
  4196   struct gmp_div_inverse bi;
  4197   size_t ndigits;
  4198 
  4199   assert (base >= 2);
  4200   assert (base <= 62);
  4201 
  4202   un = GMP_ABS (u->_mp_size);
  4203   if (un == 0)
  4204     return 1;
  4205 
  4206   up = u->_mp_d;
  4207 
  4208   bits = (un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1]);
  4209   switch (base)
  4210     {
  4211     case 2:
  4212       return bits;
  4213     case 4:
  4214       return (bits + 1) / 2;
  4215     case 8:
  4216       return (bits + 2) / 3;
  4217     case 16:
  4218       return (bits + 3) / 4;
  4219     case 32:
  4220       return (bits + 4) / 5;
  4221       /* FIXME: Do something more clever for the common case of base
  4222          10. */
  4223     }
  4224 
  4225   tp = gmp_alloc_limbs (un);
  4226   mpn_copyi (tp, up, un);
  4227   mpn_div_qr_1_invert (&bi, base);
  4228 
  4229   tn = un;
  4230   ndigits = 0;
  4231   do
  4232     {
  4233       ndigits++;
  4234       mpn_div_qr_1_preinv (tp, tp, tn, &bi);
  4235       tn -= (tp[tn-1] == 0);
  4236     }
  4237   while (tn > 0);
  4238 
  4239   gmp_free_limbs (tp, un);
  4240   return ndigits;
  4241 }
  4242 
  4243 char *
  4244 mpz_get_str (char *sp, int base, const mpz_t u)
  4245 {
  4246   unsigned bits;
  4247   const char *digits;
  4248   mp_size_t un;
  4249   size_t i, sn, osn;
  4250 
  4251   digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
  4252   if (base > 1)
  4253     {
  4254       if (base <= 36)
  4255         digits = "0123456789abcdefghijklmnopqrstuvwxyz";
  4256       else if (base > 62)
  4257         return NULL;
  4258     }
  4259   else if (base >= -1)
  4260     base = 10;
  4261   else
  4262     {
  4263       base = -base;
  4264       if (base > 36)
  4265         return NULL;
  4266     }
  4267 
  4268   sn = 1 + mpz_sizeinbase (u, base);
  4269   if (!sp)
  4270     {
  4271       osn = 1 + sn;
  4272       sp = (char *) gmp_alloc (osn);
  4273     }
  4274   else
  4275     osn = 0;
  4276   un = GMP_ABS (u->_mp_size);
  4277 
  4278   if (un == 0)
  4279     {
  4280       sp[0] = '0';
  4281       sn = 1;
  4282       goto ret;
  4283     }
  4284 
  4285   i = 0;
  4286 
  4287   if (u->_mp_size < 0)
  4288     sp[i++] = '-';
  4289 
  4290   bits = mpn_base_power_of_two_p (base);
  4291 
  4292   if (bits)
  4293     /* Not modified in this case. */
  4294     sn = i + mpn_get_str_bits ((unsigned char *) sp + i, bits, u->_mp_d, un);
  4295   else
  4296     {
  4297       struct mpn_base_info info;
  4298       mp_ptr tp;
  4299 
  4300       mpn_get_base_info (&info, base);
  4301       tp = gmp_alloc_limbs (un);
  4302       mpn_copyi (tp, u->_mp_d, un);
  4303 
  4304       sn = i + mpn_get_str_other ((unsigned char *) sp + i, base, &info, tp, un);
  4305       gmp_free_limbs (tp, un);
  4306     }
  4307 
  4308   for (; i < sn; i++)
  4309     sp[i] = digits[(unsigned char) sp[i]];
  4310 
  4311 ret:
  4312   sp[sn] = '\0';
  4313   if (osn && osn != sn + 1)
  4314     sp = (char*) gmp_realloc (sp, osn, sn + 1);
  4315   return sp;
  4316 }
  4317 
  4318 int
  4319 mpz_set_str (mpz_t r, const char *sp, int base)
  4320 {
  4321   unsigned bits, value_of_a;
  4322   mp_size_t rn, alloc;
  4323   mp_ptr rp;
  4324   size_t dn, sn;
  4325   int sign;
  4326   unsigned char *dp;
  4327 
  4328   assert (base == 0 || (base >= 2 && base <= 62));
  4329 
  4330   while (isspace( (unsigned char) *sp))
  4331     sp++;
  4332 
  4333   sign = (*sp == '-');
  4334   sp += sign;
  4335 
  4336   if (base == 0)
  4337     {
  4338       if (sp[0] == '0')
  4339         {
  4340           if (sp[1] == 'x' || sp[1] == 'X')
  4341             {
  4342               base = 16;
  4343               sp += 2;
  4344             }
  4345           else if (sp[1] == 'b' || sp[1] == 'B')
  4346             {
  4347               base = 2;
  4348               sp += 2;
  4349             }
  4350           else
  4351             base = 8;
  4352         }
  4353       else
  4354         base = 10;
  4355     }
  4356 
  4357   if (!*sp)
  4358     {
  4359       r->_mp_size = 0;
  4360       return -1;
  4361     }
  4362   sn = strlen(sp);
  4363   dp = (unsigned char *) gmp_alloc (sn);
  4364 
  4365   value_of_a = (base > 36) ? 36 : 10;
  4366   for (dn = 0; *sp; sp++)
  4367     {
  4368       unsigned digit;
  4369 
  4370       if (isspace ((unsigned char) *sp))
  4371         continue;
  4372       else if (*sp >= '0' && *sp <= '9')
  4373         digit = *sp - '0';
  4374       else if (*sp >= 'a' && *sp <= 'z')
  4375         digit = *sp - 'a' + value_of_a;
  4376       else if (*sp >= 'A' && *sp <= 'Z')
  4377         digit = *sp - 'A' + 10;
  4378       else
  4379         digit = base; /* fail */
  4380 
  4381       if (digit >= (unsigned) base)
  4382         {
  4383           gmp_free (dp, sn);
  4384           r->_mp_size = 0;
  4385           return -1;
  4386         }
  4387 
  4388       dp[dn++] = digit;
  4389     }
  4390 
  4391   if (!dn)
  4392     {
  4393       gmp_free (dp, sn);
  4394       r->_mp_size = 0;
  4395       return -1;
  4396     }
  4397   bits = mpn_base_power_of_two_p (base);
  4398 
  4399   if (bits > 0)
  4400     {
  4401       alloc = (dn * bits + GMP_LIMB_BITS - 1) / GMP_LIMB_BITS;
  4402       rp = MPZ_REALLOC (r, alloc);
  4403       rn = mpn_set_str_bits (rp, dp, dn, bits);
  4404     }
  4405   else
  4406     {
  4407       struct mpn_base_info info;
  4408       mpn_get_base_info (&info, base);
  4409       alloc = (dn + info.exp - 1) / info.exp;
  4410       rp = MPZ_REALLOC (r, alloc);
  4411       rn = mpn_set_str_other (rp, dp, dn, base, &info);
  4412       /* Normalization, needed for all-zero input. */
  4413       assert (rn > 0);
  4414       rn -= rp[rn-1] == 0;
  4415     }
  4416   assert (rn <= alloc);
  4417   gmp_free (dp, sn);
  4418 
  4419   r->_mp_size = sign ? - rn : rn;
  4420 
  4421   return 0;
  4422 }
  4423 
  4424 int
  4425 mpz_init_set_str (mpz_t r, const char *sp, int base)
  4426 {
  4427   mpz_init (r);
  4428   return mpz_set_str (r, sp, base);
  4429 }
  4430 
  4431 size_t
  4432 mpz_out_str (FILE *stream, int base, const mpz_t x)
  4433 {
  4434   char *str;
  4435   size_t len, n;
  4436 
  4437   str = mpz_get_str (NULL, base, x);
  4438   if (!str)
  4439     return 0;
  4440   len = strlen (str);
  4441   n = fwrite (str, 1, len, stream);
  4442   gmp_free (str, len + 1);
  4443   return n;
  4444 }
  4445 
  4446 
  4447 static int
  4448 gmp_detect_endian (void)
  4449 {
  4450   static const int i = 2;
  4451   const unsigned char *p = (const unsigned char *) &i;
  4452   return 1 - *p;
  4453 }
  4454 
  4455 /* Import and export. Does not support nails. */
  4456 void
  4457 mpz_import (mpz_t r, size_t count, int order, size_t size, int endian,
  4458             size_t nails, const void *src)
  4459 {
  4460   const unsigned char *p;
  4461   ptrdiff_t word_step;
  4462   mp_ptr rp;
  4463   mp_size_t rn;
  4464 
  4465   /* The current (partial) limb. */
  4466   mp_limb_t limb;
  4467   /* The number of bytes already copied to this limb (starting from
  4468      the low end). */
  4469   size_t bytes;
  4470   /* The index where the limb should be stored, when completed. */
  4471   mp_size_t i;
  4472 
  4473   if (nails != 0)
  4474     gmp_die ("mpz_import: Nails not supported.");
  4475 
  4476   assert (order == 1 || order == -1);
  4477   assert (endian >= -1 && endian <= 1);
  4478 
  4479   if (endian == 0)
  4480     endian = gmp_detect_endian ();
  4481 
  4482   p = (unsigned char *) src;
  4483 
  4484   word_step = (order != endian) ? 2 * size : 0;
  4485 
  4486   /* Process bytes from the least significant end, so point p at the
  4487      least significant word. */
  4488   if (order == 1)
  4489     {
  4490       p += size * (count - 1);
  4491       word_step = - word_step;
  4492     }
  4493 
  4494   /* And at least significant byte of that word. */
  4495   if (endian == 1)
  4496     p += (size - 1);
  4497 
  4498   rn = (size * count + sizeof(mp_limb_t) - 1) / sizeof(mp_limb_t);
  4499   rp = MPZ_REALLOC (r, rn);
  4500 
  4501   for (limb = 0, bytes = 0, i = 0; count > 0; count--, p += word_step)
  4502     {
  4503       size_t j;
  4504       for (j = 0; j < size; j++, p -= (ptrdiff_t) endian)
  4505         {
  4506           limb |= (mp_limb_t) *p << (bytes++ * CHAR_BIT);
  4507           if (bytes == sizeof(mp_limb_t))
  4508             {
  4509               rp[i++] = limb;
  4510               bytes = 0;
  4511               limb = 0;
  4512             }
  4513         }
  4514     }
  4515   assert (i + (bytes > 0) == rn);
  4516   if (limb != 0)
  4517     rp[i++] = limb;
  4518   else
  4519     i = mpn_normalized_size (rp, i);
  4520 
  4521   r->_mp_size = i;
  4522 }
  4523 
  4524 void *
  4525 mpz_export (void *r, size_t *countp, int order, size_t size, int endian,
  4526             size_t nails, const mpz_t u)
  4527 {
  4528   size_t count;
  4529   mp_size_t un;
  4530 
  4531   if (nails != 0)
  4532     gmp_die ("mpz_export: Nails not supported.");
  4533 
  4534   assert (order == 1 || order == -1);
  4535   assert (endian >= -1 && endian <= 1);
  4536   assert (size > 0 || u->_mp_size == 0);
  4537 
  4538   un = u->_mp_size;
  4539   count = 0;
  4540   if (un != 0)
  4541     {
  4542       size_t k;
  4543       unsigned char *p;
  4544       ptrdiff_t word_step;
  4545       /* The current (partial) limb. */
  4546       mp_limb_t limb;
  4547       /* The number of bytes left to do in this limb. */
  4548       size_t bytes;
  4549       /* The index where the limb was read. */
  4550       mp_size_t i;
  4551 
  4552       un = GMP_ABS (un);
  4553 
  4554       /* Count bytes in top limb. */
  4555       limb = u->_mp_d[un-1];
  4556       assert (limb != 0);
  4557 
  4558       k = (GMP_LIMB_BITS <= CHAR_BIT);
  4559       if (!k)
  4560         {
  4561           do {
  4562             int LOCAL_CHAR_BIT = CHAR_BIT;
  4563             k++; limb >>= LOCAL_CHAR_BIT;
  4564           } while (limb != 0);
  4565         }
  4566       /* else limb = 0; */
  4567 
  4568       count = (k + (un-1) * sizeof (mp_limb_t) + size - 1) / size;
  4569 
  4570       if (!r)
  4571         r = gmp_alloc (count * size);
  4572 
  4573       if (endian == 0)
  4574         endian = gmp_detect_endian ();
  4575 
  4576       p = (unsigned char *) r;
  4577 
  4578       word_step = (order != endian) ? 2 * size : 0;
  4579 
  4580       /* Process bytes from the least significant end, so point p at the
  4581          least significant word. */
  4582       if (order == 1)
  4583         {
  4584           p += size * (count - 1);
  4585           word_step = - word_step;
  4586         }
  4587 
  4588       /* And at least significant byte of that word. */
  4589       if (endian == 1)
  4590         p += (size - 1);
  4591 
  4592       for (bytes = 0, i = 0, k = 0; k < count; k++, p += word_step)
  4593         {
  4594           size_t j;
  4595           for (j = 0; j < size; ++j, p -= (ptrdiff_t) endian)
  4596             {
  4597               if (sizeof (mp_limb_t) == 1)
  4598                 {
  4599                   if (i < un)
  4600                     *p = u->_mp_d[i++];
  4601                   else
  4602                     *p = 0;
  4603                 }
  4604               else
  4605                 {
  4606                   int LOCAL_CHAR_BIT = CHAR_BIT;
  4607                   if (bytes == 0)
  4608                     {
  4609                       if (i < un)
  4610                         limb = u->_mp_d[i++];
  4611                       bytes = sizeof (mp_limb_t);
  4612                     }
  4613                   *p = limb;
  4614                   limb >>= LOCAL_CHAR_BIT;
  4615                   bytes--;
  4616                 }
  4617             }
  4618         }
  4619       assert (i == un);
  4620       assert (k == count);
  4621     }
  4622 
  4623   if (countp)
  4624     *countp = count;
  4625 
  4626   return r;
  4627 }

/* [<][>][^][v][top][bottom][index][help] */