lexical_parse_float/bigint.rs
1//! A simple big-integer type for slow path algorithms.
2//!
3//! This includes minimal stackvector for use in big-integer arithmetic.
4
5#![doc(hidden)]
6
7#[cfg(feature = "radix")]
8use crate::float::ExtendedFloat80;
9use crate::float::RawFloat;
10use crate::limits::{u32_power_limit, u64_power_limit};
11#[cfg(not(feature = "compact"))]
12use crate::table::get_large_int_power;
13use core::{cmp, mem, ops, ptr, slice};
14
15// BIGINT
16// ------
17
18/// Number of bits in a Bigint.
19///
20/// This needs to be at least the number of bits required to store
21/// a Bigint, which is `log2(radix**digits)`.
22/// ≅ 5600 for base-36, rounded-up.
23#[cfg(feature = "radix")]
24const BIGINT_BITS: usize = 6000;
25
26/// ≅ 3600 for base-10, rounded-up.
27#[cfg(not(feature = "radix"))]
28const BIGINT_BITS: usize = 4000;
29
30/// The number of limbs for the bigint.
31const BIGINT_LIMBS: usize = BIGINT_BITS / LIMB_BITS;
32
33/// Storage for a big integer type.
34///
35/// This is used for algorithms when we have a finite number of digits.
36/// Specifically, it stores all the significant digits scaled to the
37/// proper exponent, as an integral type, and then directly compares
38/// these digits.
39///
40/// This requires us to store the number of significant bits, plus the
41/// number of exponent bits (required) since we scale everything
42/// to the same exponent.
43#[derive(Clone, PartialEq, Eq)]
44pub struct Bigint {
45 /// Significant digits for the float, stored in a big integer in LE order.
46 ///
47 /// This is pretty much the same number of digits for any radix, since the
48 /// significant digits balances out the zeros from the exponent:
49 /// 1. Decimal is 1091 digits, 767 mantissa digits + 324 exponent zeros.
50 /// 2. Base 6 is 1097 digits, or 680 mantissa digits + 417 exponent zeros.
51 /// 3. Base 36 is 1086 digits, or 877 mantissa digits + 209 exponent zeros.
52 ///
53 /// However, the number of bytes required is larger for large radixes:
54 /// for decimal, we need `log2(10**1091) ≅ 3600`, while for base 36
55 /// we need `log2(36**1086) ≅ 5600`. Since we use uninitialized data,
56 /// we avoid a major performance hit from the large buffer size.
57 pub data: StackVec<BIGINT_LIMBS>,
58}
59
60impl Bigint {
61 /// Construct a bigfloat representing 0.
62 #[inline(always)]
63 pub const fn new() -> Self {
64 Self {
65 data: StackVec::new(),
66 }
67 }
68
69 /// Construct a bigfloat from an integer.
70 #[inline(always)]
71 pub fn from_u32(value: u32) -> Self {
72 Self {
73 data: StackVec::from_u32(value),
74 }
75 }
76
77 /// Construct a bigfloat from an integer.
78 #[inline(always)]
79 pub fn from_u64(value: u64) -> Self {
80 Self {
81 data: StackVec::from_u64(value),
82 }
83 }
84
85 #[inline(always)]
86 pub fn hi64(&self) -> (u64, bool) {
87 self.data.hi64()
88 }
89
90 /// Multiply and assign as if by exponentiation by a power.
91 #[inline]
92 pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> {
93 let (odd, shift) = split_radix(base);
94 if odd != 0 {
95 pow::<BIGINT_LIMBS>(&mut self.data, odd, exp)?;
96 }
97 if shift != 0 {
98 shl(&mut self.data, (exp * shift) as usize)?;
99 }
100 Some(())
101 }
102
103 /// Calculate the bit-length of the big-integer.
104 #[inline]
105 pub fn bit_length(&self) -> u32 {
106 bit_length(&self.data)
107 }
108}
109
110impl ops::MulAssign<&Bigint> for Bigint {
111 fn mul_assign(&mut self, rhs: &Bigint) {
112 self.data *= &rhs.data;
113 }
114}
115
116/// Number of bits in a Bigfloat.
117///
118/// This needs to be at least the number of bits required to store
119/// a Bigint, which is `F::EXPONENT_BIAS + F::BITS`.
120/// Bias ≅ 1075, with 64 extra for the digits.
121#[cfg(feature = "radix")]
122const BIGFLOAT_BITS: usize = 1200;
123
124/// The number of limbs for the Bigfloat.
125#[cfg(feature = "radix")]
126const BIGFLOAT_LIMBS: usize = BIGFLOAT_BITS / LIMB_BITS;
127
128/// Storage for a big floating-point type.
129///
130/// This is used for the algorithm with a non-finite digit count, which creates
131/// a representation of `b+h` and the float scaled into the range `[1, radix)`.
132#[cfg(feature = "radix")]
133#[derive(Clone, PartialEq, Eq)]
134pub struct Bigfloat {
135 /// Significant digits for the float, stored in a big integer in LE order.
136 ///
137 /// This only needs ~1075 bits for the exponent, and ~64 more for the
138 /// significant digits, since it's based on a theoretical representation
139 /// of the halfway point. This means we can have a significantly smaller
140 /// representation. The largest 64-bit exponent in magnitude is 2^1074,
141 /// which will produce the same number of bits in any radix.
142 pub data: StackVec<BIGFLOAT_LIMBS>,
143 /// Binary exponent for the float type.
144 pub exp: i32,
145}
146
147#[cfg(feature = "radix")]
148impl Bigfloat {
149 /// Construct a bigfloat representing 0.
150 #[inline(always)]
151 pub const fn new() -> Self {
152 Self {
153 data: StackVec::new(),
154 exp: 0,
155 }
156 }
157
158 /// Construct a bigfloat from an extended-precision float.
159 #[inline(always)]
160 pub fn from_float(fp: ExtendedFloat80) -> Self {
161 Self {
162 data: StackVec::from_u64(fp.mant),
163 exp: fp.exp,
164 }
165 }
166
167 /// Construct a bigfloat from an integer.
168 #[inline(always)]
169 pub fn from_u32(value: u32) -> Self {
170 Self {
171 data: StackVec::from_u32(value),
172 exp: 0,
173 }
174 }
175
176 /// Construct a bigfloat from an integer.
177 #[inline(always)]
178 pub fn from_u64(value: u64) -> Self {
179 Self {
180 data: StackVec::from_u64(value),
181 exp: 0,
182 }
183 }
184
185 /// Multiply and assign as if by exponentiation by a power.
186 #[inline]
187 pub fn pow(&mut self, base: u32, exp: u32) -> Option<()> {
188 let (odd, shift) = split_radix(base);
189 if odd != 0 {
190 pow::<BIGFLOAT_LIMBS>(&mut self.data, odd, exp)?;
191 }
192 if shift != 0 {
193 self.exp += (exp * shift) as i32;
194 }
195 Some(())
196 }
197
198 /// Shift-left the entire buffer n bits, where bits is less than the limb size.
199 #[inline]
200 pub fn shl_bits(&mut self, n: usize) -> Option<()> {
201 shl_bits(&mut self.data, n)
202 }
203
204 /// Shift-left the entire buffer n limbs.
205 #[inline]
206 pub fn shl_limbs(&mut self, n: usize) -> Option<()> {
207 shl_limbs(&mut self.data, n)
208 }
209
210 /// Shift-left the entire buffer n bits.
211 #[inline]
212 pub fn shl(&mut self, n: usize) -> Option<()> {
213 shl(&mut self.data, n)
214 }
215
216 /// Get number of leading zero bits in the storage.
217 /// Assumes the value is normalized.
218 #[inline]
219 pub fn leading_zeros(&self) -> u32 {
220 leading_zeros(&self.data)
221 }
222}
223
224#[cfg(feature = "radix")]
225impl ops::MulAssign<&Bigfloat> for Bigfloat {
226 #[inline]
227 #[allow(clippy::suspicious_op_assign_impl)]
228 fn mul_assign(&mut self, rhs: &Bigfloat) {
229 large_mul(&mut self.data, &rhs.data).unwrap();
230 self.exp += rhs.exp;
231 }
232}
233
234// VEC
235// ---
236
237/// Simple stack vector implementation.
238#[derive(Clone)]
239pub struct StackVec<const SIZE: usize> {
240 /// The raw buffer for the elements.
241 data: [mem::MaybeUninit<Limb>; SIZE],
242 /// The number of elements in the array (we never need more than u16::MAX).
243 length: u16,
244}
245
246/// Extract the hi bits from the buffer.
247macro_rules! hi {
248 // # Safety
249 //
250 // Safe as long as the `stackvec.len() >= 1`.
251 (@1 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
252 $fn(unsafe { index_unchecked!($rview[0]) as $t })
253 }};
254
255 // # Safety
256 //
257 // Safe as long as the `stackvec.len() >= 2`.
258 (@2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
259 let r0 = unsafe { index_unchecked!($rview[0]) as $t };
260 let r1 = unsafe { index_unchecked!($rview[1]) as $t };
261 $fn(r0, r1)
262 }};
263
264 // # Safety
265 //
266 // Safe as long as the `stackvec.len() >= 2`.
267 (@nonzero2 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
268 let (v, n) = hi!(@2 $self, $rview, $t, $fn);
269 (v, n || unsafe { nonzero($self, 2 ) })
270 }};
271
272 // # Safety
273 //
274 // Safe as long as the `stackvec.len() >= 3`.
275 (@3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
276 let r0 = unsafe { index_unchecked!($rview[0]) as $t };
277 let r1 = unsafe { index_unchecked!($rview[1]) as $t };
278 let r2 = unsafe { index_unchecked!($rview[2]) as $t };
279 $fn(r0, r1, r2)
280 }};
281
282 // # Safety
283 //
284 // Safe as long as the `stackvec.len() >= 3`.
285 (@nonzero3 $self:ident, $rview:ident, $t:ident, $fn:ident) => {{
286 let (v, n) = hi!(@3 $self, $rview, $t, $fn);
287 (v, n || unsafe { nonzero($self, 3 ) })
288 }};
289}
290
291impl<const SIZE: usize> StackVec<SIZE> {
292 /// Construct an empty vector.
293 #[inline]
294 pub const fn new() -> Self {
295 Self {
296 length: 0,
297 data: [mem::MaybeUninit::uninit(); SIZE],
298 }
299 }
300
301 /// Get a mutable ptr to the current start of the big integer.
302 #[inline]
303 pub fn as_mut_ptr(&mut self) -> *mut Limb {
304 self.data.as_mut_ptr().cast::<Limb>()
305 }
306
307 /// Get a ptr to the current start of the big integer.
308 #[inline]
309 pub fn as_ptr(&self) -> *const Limb {
310 self.data.as_ptr().cast::<Limb>()
311 }
312
313 /// Construct a vector from an existing slice.
314 #[inline]
315 pub fn try_from(x: &[Limb]) -> Option<Self> {
316 let mut vec = Self::new();
317 vec.try_extend(x)?;
318 Some(vec)
319 }
320
321 /// Sets the length of a vector.
322 ///
323 /// This will explicitly set the size of the vector, without actually
324 /// modifying its buffers, so it is up to the caller to ensure that the
325 /// vector is actually the specified size.
326 ///
327 /// # Safety
328 ///
329 /// Safe as long as `len` is less than `SIZE`.
330 #[inline]
331 pub unsafe fn set_len(&mut self, len: usize) {
332 debug_assert!(len <= u16::MAX as usize);
333 debug_assert!(len <= SIZE);
334 self.length = len as u16;
335 }
336
337 /// The number of elements stored in the vector.
338 #[inline]
339 pub const fn len(&self) -> usize {
340 self.length as usize
341 }
342
343 /// If the vector is empty.
344 #[inline]
345 pub const fn is_empty(&self) -> bool {
346 self.len() == 0
347 }
348
349 /// The number of items the vector can hold.
350 #[inline]
351 pub const fn capacity(&self) -> usize {
352 SIZE as usize
353 }
354
355 /// Append an item to the vector, without bounds checking.
356 ///
357 /// # Safety
358 ///
359 /// Safe if `self.len() < self.capacity()`.
360 #[inline]
361 pub unsafe fn push_unchecked(&mut self, value: Limb) {
362 debug_assert!(self.len() < self.capacity());
363 // SAFETY: safe, capacity is less than the current size.
364 unsafe {
365 let len = self.len();
366 let ptr = self.as_mut_ptr().add(len);
367 ptr.write(value);
368 self.length += 1;
369 }
370 }
371
372 /// Append an item to the vector.
373 #[inline]
374 pub fn try_push(&mut self, value: Limb) -> Option<()> {
375 if self.len() < self.capacity() {
376 // SAFETY: safe, capacity is less than the current size.
377 unsafe { self.push_unchecked(value) };
378 Some(())
379 } else {
380 None
381 }
382 }
383
384 /// Remove an item from the end of a vector, without bounds checking.
385 ///
386 /// # Safety
387 ///
388 /// Safe if `self.len() > 0`.
389 #[inline]
390 pub unsafe fn pop_unchecked(&mut self) -> Limb {
391 debug_assert!(!self.is_empty());
392 // SAFETY: safe if `self.length > 0`.
393 // We have a trivial drop and copy, so this is safe.
394 self.length -= 1;
395 unsafe { ptr::read(self.as_mut_ptr().add(self.len())) }
396 }
397
398 /// Remove an item from the end of the vector and return it, or None if empty.
399 #[inline]
400 pub fn pop(&mut self) -> Option<Limb> {
401 if self.is_empty() {
402 None
403 } else {
404 // SAFETY: safe, since `self.len() > 0`.
405 unsafe { Some(self.pop_unchecked()) }
406 }
407 }
408
409 /// Add items from a slice to the vector, without bounds checking.
410 ///
411 /// # Safety
412 ///
413 /// Safe if `self.len() + slc.len() <= self.capacity()`.
414 #[inline]
415 pub unsafe fn extend_unchecked(&mut self, slc: &[Limb]) {
416 let index = self.len();
417 let new_len = index + slc.len();
418 debug_assert!(self.len() + slc.len() <= self.capacity());
419 let src = slc.as_ptr();
420 // SAFETY: safe if `self.len() + slc.len() <= self.capacity()`.
421 unsafe {
422 let dst = self.as_mut_ptr().add(index);
423 ptr::copy_nonoverlapping(src, dst, slc.len());
424 self.set_len(new_len);
425 }
426 }
427
428 /// Copy elements from a slice and append them to the vector.
429 #[inline]
430 pub fn try_extend(&mut self, slc: &[Limb]) -> Option<()> {
431 if self.len() + slc.len() <= self.capacity() {
432 // SAFETY: safe, since `self.len() + slc.len() <= self.capacity()`.
433 unsafe { self.extend_unchecked(slc) };
434 Some(())
435 } else {
436 None
437 }
438 }
439
440 /// Truncate vector to new length, dropping any items after `len`.
441 ///
442 /// # Safety
443 ///
444 /// Safe as long as `len <= self.capacity()`.
445 unsafe fn truncate_unchecked(&mut self, len: usize) {
446 debug_assert!(len <= self.capacity());
447 self.length = len as u16;
448 }
449
450 /// Resize the buffer, without bounds checking.
451 ///
452 /// # Safety
453 ///
454 /// Safe as long as `len <= self.capacity()`.
455 #[inline]
456 pub unsafe fn resize_unchecked(&mut self, len: usize, value: Limb) {
457 debug_assert!(len <= self.capacity());
458 let old_len = self.len();
459 if len > old_len {
460 // We have a trivial drop, so there's no worry here.
461 // Just, don't set the length until all values have been written,
462 // so we don't accidentally read uninitialized memory.
463
464 // SAFETY: safe if `len < self.capacity()`.
465 let count = len - old_len;
466 for index in 0..count {
467 unsafe {
468 let dst = self.as_mut_ptr().add(old_len + index);
469 ptr::write(dst, value);
470 }
471 }
472 self.length = len as u16;
473 } else {
474 // SAFETY: safe since `len < self.len()`.
475 unsafe { self.truncate_unchecked(len) };
476 }
477 }
478
479 /// Try to resize the buffer.
480 ///
481 /// If the new length is smaller than the current length, truncate
482 /// the input. If it's larger, then append elements to the buffer.
483 #[inline]
484 pub fn try_resize(&mut self, len: usize, value: Limb) -> Option<()> {
485 if len > self.capacity() {
486 None
487 } else {
488 // SAFETY: safe, since `len <= self.capacity()`.
489 unsafe { self.resize_unchecked(len, value) };
490 Some(())
491 }
492 }
493
494 // HI
495
496 /// Get the high 16 bits from the vector.
497 #[inline(always)]
498 pub fn hi16(&self) -> (u16, bool) {
499 let rview = self.rview();
500 // SAFETY: the buffer must be at least length bytes long.
501 match self.len() {
502 0 => (0, false),
503 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi16_1),
504 1 => hi!(@1 self, rview, u64, u64_to_hi16_1),
505 _ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi16_2),
506 _ => hi!(@nonzero2 self, rview, u64, u64_to_hi16_2),
507 }
508 }
509
510 /// Get the high 32 bits from the vector.
511 #[inline(always)]
512 pub fn hi32(&self) -> (u32, bool) {
513 let rview = self.rview();
514 // SAFETY: the buffer must be at least length bytes long.
515 match self.len() {
516 0 => (0, false),
517 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi32_1),
518 1 => hi!(@1 self, rview, u64, u64_to_hi32_1),
519 _ if LIMB_BITS == 32 => hi!(@nonzero2 self, rview, u32, u32_to_hi32_2),
520 _ => hi!(@nonzero2 self, rview, u64, u64_to_hi32_2),
521 }
522 }
523
524 /// Get the high 64 bits from the vector.
525 #[inline(always)]
526 pub fn hi64(&self) -> (u64, bool) {
527 let rview = self.rview();
528 // SAFETY: the buffer must be at least length bytes long.
529 match self.len() {
530 0 => (0, false),
531 1 if LIMB_BITS == 32 => hi!(@1 self, rview, u32, u32_to_hi64_1),
532 1 => hi!(@1 self, rview, u64, u64_to_hi64_1),
533 2 if LIMB_BITS == 32 => hi!(@2 self, rview, u32, u32_to_hi64_2),
534 2 => hi!(@2 self, rview, u64, u64_to_hi64_2),
535 _ if LIMB_BITS == 32 => hi!(@nonzero3 self, rview, u32, u32_to_hi64_3),
536 _ => hi!(@nonzero2 self, rview, u64, u64_to_hi64_2),
537 }
538 }
539
540 // FROM
541
542 /// Create StackVec from u16 value.
543 #[inline(always)]
544 pub fn from_u16(x: u16) -> Self {
545 let mut vec = Self::new();
546 assert!(1 <= vec.capacity());
547 // SAFETY: safe since we can always add 1 item.
548 unsafe { vec.push_unchecked(x as Limb) };
549 vec.normalize();
550 vec
551 }
552
553 /// Create StackVec from u32 value.
554 #[inline(always)]
555 pub fn from_u32(x: u32) -> Self {
556 let mut vec = Self::new();
557 assert!(1 <= vec.capacity());
558 // SAFETY: safe since we can always add 1 item.
559 unsafe { vec.push_unchecked(x as Limb) };
560 vec.normalize();
561 vec
562 }
563
564 /// Create StackVec from u64 value.
565 #[inline(always)]
566 pub fn from_u64(x: u64) -> Self {
567 let mut vec = Self::new();
568 assert!(2 <= vec.capacity());
569 if LIMB_BITS == 32 {
570 // SAFETY: safe since we can always add 2 items.
571 unsafe {
572 vec.push_unchecked(x as Limb);
573 vec.push_unchecked((x >> 32) as Limb);
574 }
575 } else {
576 // SAFETY: safe since we can always add 1 item.
577 unsafe { vec.push_unchecked(x as Limb) };
578 }
579 vec.normalize();
580 vec
581 }
582
583 // INDEX
584
585 /// Create a reverse view of the vector for indexing.
586 #[inline]
587 pub fn rview(&self) -> ReverseView<Limb> {
588 ReverseView {
589 inner: &*self,
590 }
591 }
592
593 // MATH
594
595 /// Normalize the integer, so any leading zero values are removed.
596 #[inline]
597 pub fn normalize(&mut self) {
598 // We don't care if this wraps: the index is bounds-checked.
599 while let Some(&value) = self.get(self.len().wrapping_sub(1)) {
600 if value == 0 {
601 self.length -= 1;
602 } else {
603 break;
604 }
605 }
606 }
607
608 /// Get if the big integer is normalized.
609 #[inline]
610 #[allow(clippy::match_like_matches_macro)]
611 pub fn is_normalized(&self) -> bool {
612 // We don't care if this wraps: the index is bounds-checked.
613 match self.get(self.len().wrapping_sub(1)) {
614 Some(&0) => false,
615 _ => true,
616 }
617 }
618
619 /// Calculate the fast quotient for a single limb-bit quotient.
620 ///
621 /// This requires a non-normalized divisor, where there at least
622 /// `integral_binary_factor` 0 bits set, to ensure at maximum a single
623 /// digit will be produced for a single base.
624 ///
625 /// Warning: This is not a general-purpose division algorithm,
626 /// it is highly specialized for peeling off singular digits.
627 #[inline]
628 #[cfg(feature = "radix")]
629 pub fn quorem(&mut self, y: &Self) -> Limb {
630 large_quorem(self, y)
631 }
632
633 /// AddAssign small integer.
634 #[inline]
635 pub fn add_small(&mut self, y: Limb) -> Option<()> {
636 small_add(self, y)
637 }
638
639 /// MulAssign small integer.
640 #[inline]
641 pub fn mul_small(&mut self, y: Limb) -> Option<()> {
642 small_mul(self, y)
643 }
644}
645
646impl<const SIZE: usize> PartialEq for StackVec<SIZE> {
647 #[inline]
648 #[allow(clippy::op_ref)]
649 fn eq(&self, other: &Self) -> bool {
650 use core::ops::Deref;
651 self.len() == other.len() && self.deref() == other.deref()
652 }
653}
654
655impl<const SIZE: usize> Eq for StackVec<SIZE> {
656}
657
658impl<const SIZE: usize> cmp::PartialOrd for StackVec<SIZE> {
659 #[inline]
660 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
661 Some(compare(self, other))
662 }
663}
664
665impl<const SIZE: usize> cmp::Ord for StackVec<SIZE> {
666 #[inline]
667 fn cmp(&self, other: &Self) -> cmp::Ordering {
668 compare(self, other)
669 }
670}
671
672impl<const SIZE: usize> ops::Deref for StackVec<SIZE> {
673 type Target = [Limb];
674 #[inline]
675 fn deref(&self) -> &[Limb] {
676 // SAFETY: safe since `self.data[..self.len()]` must be initialized
677 // and `self.len() <= self.capacity()`.
678 unsafe {
679 let ptr = self.data.as_ptr() as *const Limb;
680 slice::from_raw_parts(ptr, self.len())
681 }
682 }
683}
684
685impl<const SIZE: usize> ops::DerefMut for StackVec<SIZE> {
686 #[inline]
687 fn deref_mut(&mut self) -> &mut [Limb] {
688 // SAFETY: safe since `self.data[..self.len()]` must be initialized
689 // and `self.len() <= self.capacity()`.
690 unsafe {
691 let ptr = self.data.as_mut_ptr() as *mut Limb;
692 slice::from_raw_parts_mut(ptr, self.len())
693 }
694 }
695}
696
697impl<const SIZE: usize> ops::MulAssign<&[Limb]> for StackVec<SIZE> {
698 #[inline]
699 fn mul_assign(&mut self, rhs: &[Limb]) {
700 large_mul(self, rhs).unwrap();
701 }
702}
703
704/// REVERSE VIEW
705
706/// Reverse, immutable view of a sequence.
707pub struct ReverseView<'a, T: 'a> {
708 inner: &'a [T],
709}
710
711impl<'a, T: 'a> ReverseView<'a, T> {
712 /// Get a reference to a value, without bounds checking.
713 ///
714 /// # Safety
715 ///
716 /// Safe if forward indexing would be safe for the type,
717 /// or `index < self.inner.len()`.
718 #[inline(always)]
719 pub unsafe fn get_unchecked(&self, index: usize) -> &T {
720 debug_assert!(index < self.inner.len());
721 let len = self.inner.len();
722 unsafe { self.inner.get_unchecked(len - index - 1) }
723 }
724
725 /// Get a reference to a value.
726 #[inline(always)]
727 pub fn get(&self, index: usize) -> Option<&T> {
728 let len = self.inner.len();
729 // We don't care if this wraps: the index is bounds-checked.
730 self.inner.get(len.wrapping_sub(index + 1))
731 }
732}
733
734impl<'a, T> ops::Index<usize> for ReverseView<'a, T> {
735 type Output = T;
736
737 #[inline]
738 fn index(&self, index: usize) -> &T {
739 let len = self.inner.len();
740 &(*self.inner)[len - index - 1]
741 }
742}
743
744// HI
745// --
746
747/// Check if any of the remaining bits are non-zero.
748///
749/// # Safety
750///
751/// Safe as long as `rindex <= x.len()`.
752#[inline]
753pub unsafe fn nonzero(x: &[Limb], rindex: usize) -> bool {
754 debug_assert!(rindex <= x.len());
755
756 let len = x.len();
757 // SAFETY: safe if `rindex < x.len()`, since then `x.len() - rindex < x.len()`.
758 let slc = unsafe { &index_unchecked!(x[..len - rindex]) };
759 slc.iter().rev().any(|&x| x != 0)
760}
761
762// These return the high X bits and if the bits were truncated.
763
764/// Shift 32-bit integer to high 16-bits.
765#[inline]
766pub const fn u32_to_hi16_1(r0: u32) -> (u16, bool) {
767 let r0 = u32_to_hi32_1(r0).0;
768 ((r0 >> 16) as u16, r0 as u16 != 0)
769}
770
771/// Shift 2 32-bit integers to high 16-bits.
772#[inline]
773pub const fn u32_to_hi16_2(r0: u32, r1: u32) -> (u16, bool) {
774 let (r0, n) = u32_to_hi32_2(r0, r1);
775 ((r0 >> 16) as u16, n || r0 as u16 != 0)
776}
777
778/// Shift 32-bit integer to high 32-bits.
779#[inline]
780pub const fn u32_to_hi32_1(r0: u32) -> (u32, bool) {
781 let ls = r0.leading_zeros();
782 (r0 << ls, false)
783}
784
785/// Shift 2 32-bit integers to high 32-bits.
786#[inline]
787pub const fn u32_to_hi32_2(r0: u32, r1: u32) -> (u32, bool) {
788 let ls = r0.leading_zeros();
789 let rs = 32 - ls;
790 let v = match ls {
791 0 => r0,
792 _ => (r0 << ls) | (r1 >> rs),
793 };
794 let n = r1 << ls != 0;
795 (v, n)
796}
797
798/// Shift 32-bit integer to high 64-bits.
799#[inline]
800pub const fn u32_to_hi64_1(r0: u32) -> (u64, bool) {
801 u64_to_hi64_1(r0 as u64)
802}
803
804/// Shift 2 32-bit integers to high 64-bits.
805#[inline]
806pub const fn u32_to_hi64_2(r0: u32, r1: u32) -> (u64, bool) {
807 let r0 = (r0 as u64) << 32;
808 let r1 = r1 as u64;
809 u64_to_hi64_1(r0 | r1)
810}
811
812/// Shift 3 32-bit integers to high 64-bits.
813#[inline]
814pub const fn u32_to_hi64_3(r0: u32, r1: u32, r2: u32) -> (u64, bool) {
815 let r0 = r0 as u64;
816 let r1 = (r1 as u64) << 32;
817 let r2 = r2 as u64;
818 u64_to_hi64_2(r0, r1 | r2)
819}
820
821/// Shift 64-bit integer to high 16-bits.
822#[inline]
823pub const fn u64_to_hi16_1(r0: u64) -> (u16, bool) {
824 let r0 = u64_to_hi64_1(r0).0;
825 ((r0 >> 48) as u16, r0 as u16 != 0)
826}
827
828/// Shift 2 64-bit integers to high 16-bits.
829#[inline]
830pub const fn u64_to_hi16_2(r0: u64, r1: u64) -> (u16, bool) {
831 let (r0, n) = u64_to_hi64_2(r0, r1);
832 ((r0 >> 48) as u16, n || r0 as u16 != 0)
833}
834
835/// Shift 64-bit integer to high 32-bits.
836#[inline]
837pub const fn u64_to_hi32_1(r0: u64) -> (u32, bool) {
838 let r0 = u64_to_hi64_1(r0).0;
839 ((r0 >> 32) as u32, r0 as u32 != 0)
840}
841
842/// Shift 2 64-bit integers to high 32-bits.
843#[inline]
844pub const fn u64_to_hi32_2(r0: u64, r1: u64) -> (u32, bool) {
845 let (r0, n) = u64_to_hi64_2(r0, r1);
846 ((r0 >> 32) as u32, n || r0 as u32 != 0)
847}
848
849/// Shift 64-bit integer to high 64-bits.
850#[inline]
851pub const fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
852 let ls = r0.leading_zeros();
853 (r0 << ls, false)
854}
855
856/// Shift 2 64-bit integers to high 64-bits.
857#[inline]
858pub const fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
859 let ls = r0.leading_zeros();
860 let rs = 64 - ls;
861 let v = match ls {
862 0 => r0,
863 _ => (r0 << ls) | (r1 >> rs),
864 };
865 let n = r1 << ls != 0;
866 (v, n)
867}
868
869// POWERS
870// ------
871
872/// MulAssign by a power.
873///
874/// Theoretically...
875///
876/// Use an exponentiation by squaring method, since it reduces the time
877/// complexity of the multiplication to ~`O(log(n))` for the squaring,
878/// and `O(n*m)` for the result. Since `m` is typically a lower-order
879/// factor, this significantly reduces the number of multiplications
880/// we need to do. Iteratively multiplying by small powers follows
881/// the nth triangular number series, which scales as `O(p^2)`, but
882/// where `p` is `n+m`. In short, it scales very poorly.
883///
884/// Practically....
885///
886/// Exponentiation by Squaring:
887/// running 2 tests
888/// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78)
889/// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007)
890///
891/// Exponentiation by Iterative Small Powers:
892/// running 2 tests
893/// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31)
894/// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47)
895///
896/// Exponentiation by Iterative Large Powers (of 2):
897/// running 2 tests
898/// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31)
899/// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47)
900///
901/// The following benchmarks were run on `1 * 5^300`, using native `pow`,
902/// a version with only small powers, and one with pre-computed powers
903/// of `5^(3 * max_exp)`, rather than `5^(5 * max_exp)`.
904///
905/// However, using large powers is crucial for good performance for higher
906/// powers.
907/// pow/default time: [426.20 ns 427.96 ns 429.89 ns]
908/// pow/small time: [2.9270 us 2.9411 us 2.9565 us]
909/// pow/large:3 time: [838.51 ns 842.21 ns 846.27 ns]
910///
911/// Even using worst-case scenarios, exponentiation by squaring is
912/// significantly slower for our workloads. Just multiply by small powers,
913/// in simple cases, and use precalculated large powers in other cases.
914///
915/// Furthermore, using sufficiently big large powers is also crucial for
916/// performance. This is a tradeoff of binary size and performance, and
917/// using a single value at ~`5^(5 * max_exp)` seems optimal.
918pub fn pow<const SIZE: usize>(x: &mut StackVec<SIZE>, base: u32, mut exp: u32) -> Option<()> {
919 // Minimize the number of iterations for large exponents: just
920 // do a few steps with a large powers.
921 #[cfg(not(feature = "compact"))]
922 {
923 let (large, step) = get_large_int_power(base);
924 while exp >= step {
925 large_mul(x, large)?;
926 exp -= step;
927 }
928 }
929
930 // Now use our pre-computed small powers iteratively.
931 let small_step = if LIMB_BITS == 32 {
932 u32_power_limit(base)
933 } else {
934 u64_power_limit(base)
935 };
936 let max_native = (base as Limb).pow(small_step);
937 while exp >= small_step {
938 small_mul(x, max_native)?;
939 exp -= small_step;
940 }
941 if exp != 0 {
942 // SAFETY: safe, since `exp < small_step`.
943 let small_power = unsafe { f64::int_pow_fast_path(exp as usize, base) };
944 small_mul(x, small_power as Limb)?;
945 }
946 Some(())
947}
948
949// SCALAR
950// ------
951
952/// Add two small integers and return the resulting value and if overflow happens.
953#[inline(always)]
954pub fn scalar_add(x: Limb, y: Limb) -> (Limb, bool) {
955 x.overflowing_add(y)
956}
957
958/// Multiply two small integers (with carry) (and return the overflow contribution).
959///
960/// Returns the (low, high) components.
961#[inline(always)]
962pub fn scalar_mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
963 // Cannot overflow, as long as wide is 2x as wide. This is because
964 // the following is always true:
965 // `Wide::MAX - (Narrow::MAX * Narrow::MAX) >= Narrow::MAX`
966 let z: Wide = (x as Wide) * (y as Wide) + (carry as Wide);
967 (z as Limb, (z >> LIMB_BITS) as Limb)
968}
969
970// SMALL
971// -----
972
973/// Add small integer to bigint starting from offset.
974#[inline]
975pub fn small_add_from<const SIZE: usize>(
976 x: &mut StackVec<SIZE>,
977 y: Limb,
978 start: usize,
979) -> Option<()> {
980 let mut index = start;
981 let mut carry = y;
982 while carry != 0 && index < x.len() {
983 // SAFETY: safe, since `index < x.len()`.
984 let result = scalar_add(unsafe { index_unchecked!(x[index]) }, carry);
985 unsafe { index_unchecked_mut!(x[index]) = result.0 };
986 carry = result.1 as Limb;
987 index += 1;
988 }
989 // If we carried past all the elements, add to the end of the buffer.
990 if carry != 0 {
991 x.try_push(carry)?;
992 }
993 Some(())
994}
995
996/// Add small integer to bigint.
997#[inline(always)]
998pub fn small_add<const SIZE: usize>(x: &mut StackVec<SIZE>, y: Limb) -> Option<()> {
999 small_add_from(x, y, 0)
1000}
1001
1002/// Multiply bigint by small integer.
1003#[inline]
1004pub fn small_mul<const SIZE: usize>(x: &mut StackVec<SIZE>, y: Limb) -> Option<()> {
1005 let mut carry = 0;
1006 for xi in x.iter_mut() {
1007 let result = scalar_mul(*xi, y, carry);
1008 *xi = result.0;
1009 carry = result.1;
1010 }
1011 // If we carried past all the elements, add to the end of the buffer.
1012 if carry != 0 {
1013 x.try_push(carry)?;
1014 }
1015 Some(())
1016}
1017
1018// LARGE
1019// -----
1020
1021/// Add bigint to bigint starting from offset.
1022pub fn large_add_from<const SIZE: usize>(
1023 x: &mut StackVec<SIZE>,
1024 y: &[Limb],
1025 start: usize,
1026) -> Option<()> {
1027 // The effective x buffer is from `xstart..x.len()`, so we need to treat
1028 // that as the current range. If the effective y buffer is longer, need
1029 // to resize to that, + the start index.
1030 if y.len() > x.len().saturating_sub(start) {
1031 // Ensure we panic if we can't extend the buffer.
1032 // This avoids any unsafe behavior afterwards.
1033 x.try_resize(y.len() + start, 0)?;
1034 }
1035
1036 // Iteratively add elements from y to x.
1037 let mut carry = false;
1038 for index in 0..y.len() {
1039 // SAFETY: safe since `start + index < x.len()`.
1040 // We panicked in `try_resize` if this wasn't true.
1041 let xi = unsafe { &mut index_unchecked_mut!(x[start + index]) };
1042 // SAFETY: safe since `index < y.len()`.
1043 let yi = unsafe { index_unchecked!(y[index]) };
1044
1045 // Only one op of the two ops can overflow, since we added at max
1046 // Limb::max_value() + Limb::max_value(). Add the previous carry,
1047 // and store the current carry for the next.
1048 let result = scalar_add(*xi, yi);
1049 *xi = result.0;
1050 let mut tmp = result.1;
1051 if carry {
1052 let result = scalar_add(*xi, 1);
1053 *xi = result.0;
1054 tmp |= result.1;
1055 }
1056 carry = tmp;
1057 }
1058
1059 // Handle overflow.
1060 if carry {
1061 small_add_from(x, 1, y.len() + start)?;
1062 }
1063 Some(())
1064}
1065
1066/// Add bigint to bigint.
1067#[inline(always)]
1068pub fn large_add<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Option<()> {
1069 large_add_from(x, y, 0)
1070}
1071
1072/// Grade-school multiplication algorithm.
1073///
1074/// Slow, naive algorithm, using limb-bit bases and just shifting left for
1075/// each iteration. This could be optimized with numerous other algorithms,
1076/// but it's extremely simple, and works in O(n*m) time, which is fine
1077/// by me. Each iteration, of which there are `m` iterations, requires
1078/// `n` multiplications, and `n` additions, or grade-school multiplication.
1079///
1080/// Don't use Karatsuba multiplication, since out implementation seems to
1081/// be slower asymptotically, which is likely just due to the small sizes
1082/// we deal with here. For example, running on the following data:
1083///
1084/// ```text
1085/// const SMALL_X: &[u32] = &[
1086/// 766857581, 3588187092, 1583923090, 2204542082, 1564708913, 2695310100, 3676050286,
1087/// 1022770393, 468044626, 446028186
1088/// ];
1089/// const SMALL_Y: &[u32] = &[
1090/// 3945492125, 3250752032, 1282554898, 1708742809, 1131807209, 3171663979, 1353276095,
1091/// 1678845844, 2373924447, 3640713171
1092/// ];
1093/// const LARGE_X: &[u32] = &[
1094/// 3647536243, 2836434412, 2154401029, 1297917894, 137240595, 790694805, 2260404854,
1095/// 3872698172, 690585094, 99641546, 3510774932, 1672049983, 2313458559, 2017623719,
1096/// 638180197, 1140936565, 1787190494, 1797420655, 14113450, 2350476485, 3052941684,
1097/// 1993594787, 2901001571, 4156930025, 1248016552, 848099908, 2660577483, 4030871206,
1098/// 692169593, 2835966319, 1781364505, 4266390061, 1813581655, 4210899844, 2137005290,
1099/// 2346701569, 3715571980, 3386325356, 1251725092, 2267270902, 474686922, 2712200426,
1100/// 197581715, 3087636290, 1379224439, 1258285015, 3230794403, 2759309199, 1494932094,
1101/// 326310242
1102/// ];
1103/// const LARGE_Y: &[u32] = &[
1104/// 1574249566, 868970575, 76716509, 3198027972, 1541766986, 1095120699, 3891610505,
1105/// 2322545818, 1677345138, 865101357, 2650232883, 2831881215, 3985005565, 2294283760,
1106/// 3468161605, 393539559, 3665153349, 1494067812, 106699483, 2596454134, 797235106,
1107/// 705031740, 1209732933, 2732145769, 4122429072, 141002534, 790195010, 4014829800,
1108/// 1303930792, 3649568494, 308065964, 1233648836, 2807326116, 79326486, 1262500691,
1109/// 621809229, 2258109428, 3819258501, 171115668, 1139491184, 2979680603, 1333372297,
1110/// 1657496603, 2790845317, 4090236532, 4220374789, 601876604, 1828177209, 2372228171,
1111/// 2247372529
1112/// ];
1113/// ```
1114///
1115/// We get the following results:
1116
1117/// ```text
1118/// mul/small:long time: [220.23 ns 221.47 ns 222.81 ns]
1119/// Found 4 outliers among 100 measurements (4.00%)
1120/// 2 (2.00%) high mild
1121/// 2 (2.00%) high severe
1122/// mul/small:karatsuba time: [233.88 ns 234.63 ns 235.44 ns]
1123/// Found 11 outliers among 100 measurements (11.00%)
1124/// 8 (8.00%) high mild
1125/// 3 (3.00%) high severe
1126/// mul/large:long time: [1.9365 us 1.9455 us 1.9558 us]
1127/// Found 12 outliers among 100 measurements (12.00%)
1128/// 7 (7.00%) high mild
1129/// 5 (5.00%) high severe
1130/// mul/large:karatsuba time: [4.4250 us 4.4515 us 4.4812 us]
1131/// ```
1132///
1133/// In short, Karatsuba multiplication is never worthwhile for out use-case.
1134#[allow(clippy::needless_range_loop)]
1135pub fn long_mul<const SIZE: usize>(x: &[Limb], y: &[Limb]) -> Option<StackVec<SIZE>> {
1136 // Using the immutable value, multiply by all the scalars in y, using
1137 // the algorithm defined above. Use a single buffer to avoid
1138 // frequent reallocations. Handle the first case to avoid a redundant
1139 // addition, since we know y.len() >= 1.
1140 let mut z = StackVec::<SIZE>::try_from(x)?;
1141 if !y.is_empty() {
1142 // SAFETY: safe, since `y.len() > 0`.
1143 let y0 = unsafe { index_unchecked!(y[0]) };
1144 small_mul(&mut z, y0)?;
1145
1146 for index in 1..y.len() {
1147 // SAFETY: safe, since `index < y.len()`.
1148 let yi = unsafe { index_unchecked!(y[index]) };
1149 if yi != 0 {
1150 let mut zi = StackVec::<SIZE>::try_from(x)?;
1151 small_mul(&mut zi, yi)?;
1152 large_add_from(&mut z, &zi, index)?;
1153 }
1154 }
1155 }
1156
1157 z.normalize();
1158 Some(z)
1159}
1160
1161/// Multiply bigint by bigint using grade-school multiplication algorithm.
1162#[inline(always)]
1163pub fn large_mul<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Option<()> {
1164 // Karatsuba multiplication never makes sense, so just use grade school
1165 // multiplication.
1166 if y.len() == 1 {
1167 // SAFETY: safe since `y.len() == 1`.
1168 small_mul(x, unsafe { index_unchecked!(y[0]) })?;
1169 } else {
1170 *x = long_mul(y, x)?;
1171 }
1172 Some(())
1173}
1174
1175/// Emit a single digit for the quotient and store the remainder in-place.
1176///
1177/// An extremely efficient division algorithm for small quotients, requiring
1178/// you to know the full range of the quotient prior to use. For example,
1179/// with a quotient that can range from [0, 10), you must have 4 leading
1180/// zeros in the divisor, so we can use a single-limb division to get
1181/// an accurate estimate of the quotient. Since we always underestimate
1182/// the quotient, we can add 1 and then emit the digit.
1183///
1184/// Requires a non-normalized denominator, with at least [1-6] leading
1185/// zeros, depending on the base (for example, 1 for base2, 6 for base36).
1186///
1187/// Adapted from David M. Gay's dtoa, and therefore under an MIT license:
1188/// www.netlib.org/fp/dtoa.c
1189#[cfg(feature = "radix")]
1190#[allow(clippy::many_single_char_names)]
1191pub fn large_quorem<const SIZE: usize>(x: &mut StackVec<SIZE>, y: &[Limb]) -> Limb {
1192 // If we have an empty divisor, error out early.
1193 assert!(!y.is_empty(), "large_quorem:: division by zero error.");
1194 assert!(x.len() <= y.len(), "large_quorem:: oversized numerator.");
1195 let mask = Limb::max_value() as Wide;
1196
1197 // Numerator is smaller the denominator, quotient always 0.
1198 let m = x.len();
1199 let n = y.len();
1200 if m < n {
1201 return 0;
1202 }
1203
1204 // Calculate our initial estimate for q.
1205 // SAFETY: safe since `m > 0 && m == x.len()`, since `m > n && n > 0`.
1206 let xm_1 = unsafe { index_unchecked!(x[m - 1]) };
1207 // SAFETY: safe since `n > 0 && n == y.len()`.
1208 let yn_1 = unsafe { index_unchecked!(y[n - 1]) };
1209 let mut q = xm_1 / (yn_1 + 1);
1210
1211 // Need to calculate the remainder if we don't have a 0 quotient.
1212 if q != 0 {
1213 let mut borrow: Wide = 0;
1214 let mut carry: Wide = 0;
1215 for j in 0..m {
1216 // SAFETY: safe, since `j < n && n == y.len()`.
1217 let yj = unsafe { index_unchecked!(y[j]) } as Wide;
1218 let p = yj * q as Wide + carry;
1219 carry = p >> LIMB_BITS;
1220 // SAFETY: safe, since `j < m && m == x.len()`.
1221 let xj = unsafe { index_unchecked!(x[j]) } as Wide;
1222 let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
1223 borrow = (t >> LIMB_BITS) & 1;
1224 // SAFETY: safe, since `j < m && m == x.len()`.
1225 unsafe { index_unchecked_mut!(x[j]) = t as Limb };
1226 }
1227 x.normalize();
1228 }
1229
1230 // Check if we under-estimated x.
1231 if compare(x, y) != cmp::Ordering::Less {
1232 q += 1;
1233 let mut borrow: Wide = 0;
1234 let mut carry: Wide = 0;
1235 for j in 0..m {
1236 // SAFETY: safe, since `j < n && n == y.len()`.
1237 let yj = unsafe { index_unchecked!(y[j]) } as Wide;
1238 let p = yj + carry;
1239 carry = p >> LIMB_BITS;
1240 // SAFETY: safe, since `j < m && m == x.len()`.
1241 let xj = unsafe { index_unchecked!(x[j]) } as Wide;
1242 let t = xj.wrapping_sub(p & mask).wrapping_sub(borrow);
1243 borrow = (t >> LIMB_BITS) & 1;
1244 // SAFETY: safe, since `j < m && m == x.len()`.
1245 unsafe { index_unchecked_mut!(x[j]) = t as Limb };
1246 }
1247 x.normalize();
1248 }
1249
1250 q
1251}
1252
1253// COMPARE
1254// -------
1255
1256/// Compare `x` to `y`, in little-endian order.
1257#[inline]
1258pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
1259 match x.len().cmp(&y.len()) {
1260 cmp::Ordering::Equal => {
1261 let iter = x.iter().rev().zip(y.iter().rev());
1262 for (&xi, yi) in iter {
1263 match xi.cmp(yi) {
1264 cmp::Ordering::Equal => (),
1265 ord => return ord,
1266 }
1267 }
1268 // Equal case.
1269 cmp::Ordering::Equal
1270 },
1271 ord => ord,
1272 }
1273}
1274
1275// SHIFT
1276// -----
1277
1278/// Shift-left `n` bits inside a buffer.
1279#[inline]
1280pub fn shl_bits<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
1281 debug_assert!(n != 0);
1282
1283 // Internally, for each item, we shift left by n, and add the previous
1284 // right shifted limb-bits.
1285 // For example, we transform (for u8) shifted left 2, to:
1286 // b10100100 b01000010
1287 // b10 b10010001 b00001000
1288 debug_assert!(n < LIMB_BITS);
1289 let rshift = LIMB_BITS - n;
1290 let lshift = n;
1291 let mut prev: Limb = 0;
1292 for xi in x.iter_mut() {
1293 let tmp = *xi;
1294 *xi <<= lshift;
1295 *xi |= prev >> rshift;
1296 prev = tmp;
1297 }
1298
1299 // Always push the carry, even if it creates a non-normal result.
1300 let carry = prev >> rshift;
1301 if carry != 0 {
1302 x.try_push(carry)?;
1303 }
1304
1305 Some(())
1306}
1307
1308/// Shift-left `n` limbs inside a buffer.
1309#[inline]
1310pub fn shl_limbs<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
1311 debug_assert!(n != 0);
1312 if n + x.len() > x.capacity() {
1313 None
1314 } else if !x.is_empty() {
1315 let len = n + x.len();
1316 // SAFE: since x is not empty, and `x.len() + n <= x.capacity()`.
1317 unsafe {
1318 // Move the elements.
1319 let x_len = x.len();
1320 let ptr = x.as_mut_ptr();
1321 let src = ptr;
1322 let dst = ptr.add(n);
1323 ptr::copy(src, dst, x_len);
1324 // Write our 0s.
1325 ptr::write_bytes(ptr, 0, n);
1326 x.set_len(len);
1327 }
1328 Some(())
1329 } else {
1330 Some(())
1331 }
1332}
1333
1334/// Shift-left buffer by n bits.
1335#[inline]
1336pub fn shl<const SIZE: usize>(x: &mut StackVec<SIZE>, n: usize) -> Option<()> {
1337 let rem = n % LIMB_BITS;
1338 let div = n / LIMB_BITS;
1339 if rem != 0 {
1340 shl_bits(x, rem)?;
1341 }
1342 if div != 0 {
1343 shl_limbs(x, div)?;
1344 }
1345 Some(())
1346}
1347
1348/// Get number of leading zero bits in the storage.
1349#[inline]
1350pub fn leading_zeros(x: &[Limb]) -> u32 {
1351 let length = x.len();
1352 // wrapping_sub is fine, since it'll just return None.
1353 if let Some(&value) = x.get(length.wrapping_sub(1)) {
1354 value.leading_zeros()
1355 } else {
1356 0
1357 }
1358}
1359
1360/// Calculate the bit-length of the big-integer.
1361#[inline]
1362pub fn bit_length(x: &[Limb]) -> u32 {
1363 let nlz = leading_zeros(x);
1364 LIMB_BITS as u32 * x.len() as u32 - nlz
1365}
1366
1367// RADIX
1368// -----
1369
1370/// Get the base, odd radix, and the power-of-two for the type.
1371#[inline(always)]
1372pub const fn split_radix(radix: u32) -> (u32, u32) {
1373 match radix {
1374 // Is also needed for decimal floats, due to `negative_digit_comp`.
1375 2 => (0, 1),
1376 3 if cfg!(feature = "radix") => (3, 0),
1377 4 if cfg!(feature = "power-of-two") => (0, 2),
1378 // Is also needed for decimal floats, due to `negative_digit_comp`.
1379 5 => (5, 0),
1380 6 if cfg!(feature = "radix") => (3, 1),
1381 7 if cfg!(feature = "radix") => (7, 0),
1382 8 if cfg!(feature = "power-of-two") => (0, 3),
1383 9 if cfg!(feature = "radix") => (9, 0),
1384 10 => (5, 1),
1385 11 if cfg!(feature = "radix") => (11, 0),
1386 12 if cfg!(feature = "radix") => (6, 1),
1387 13 if cfg!(feature = "radix") => (13, 0),
1388 14 if cfg!(feature = "radix") => (7, 1),
1389 15 if cfg!(feature = "radix") => (15, 0),
1390 16 if cfg!(feature = "power-of-two") => (0, 4),
1391 17 if cfg!(feature = "radix") => (17, 0),
1392 18 if cfg!(feature = "radix") => (9, 1),
1393 19 if cfg!(feature = "radix") => (19, 0),
1394 20 if cfg!(feature = "radix") => (5, 2),
1395 21 if cfg!(feature = "radix") => (21, 0),
1396 22 if cfg!(feature = "radix") => (11, 1),
1397 23 if cfg!(feature = "radix") => (23, 0),
1398 24 if cfg!(feature = "radix") => (3, 3),
1399 25 if cfg!(feature = "radix") => (25, 0),
1400 26 if cfg!(feature = "radix") => (13, 1),
1401 27 if cfg!(feature = "radix") => (27, 0),
1402 28 if cfg!(feature = "radix") => (7, 2),
1403 29 if cfg!(feature = "radix") => (29, 0),
1404 30 if cfg!(feature = "radix") => (15, 1),
1405 31 if cfg!(feature = "radix") => (31, 0),
1406 32 if cfg!(feature = "power-of-two") => (0, 5),
1407 33 if cfg!(feature = "radix") => (33, 0),
1408 34 if cfg!(feature = "radix") => (17, 1),
1409 35 if cfg!(feature = "radix") => (35, 0),
1410 36 if cfg!(feature = "radix") => (9, 2),
1411 // Any other radix should be unreachable.
1412 _ => (0, 0),
1413 }
1414}
1415
1416// LIMB
1417// ----
1418
1419// Type for a single limb of the big integer.
1420//
1421// A limb is analogous to a digit in base10, except, it stores 32-bit
1422// or 64-bit numbers instead. We want types where 64-bit multiplication
1423// is well-supported by the architecture, rather than emulated in 3
1424// instructions. The quickest way to check this support is using a
1425// cross-compiler for numerous architectures, along with the following
1426// source file and command:
1427//
1428// Compile with `gcc main.c -c -S -O3 -masm=intel`
1429//
1430// And the source code is:
1431// ```text
1432// #include <stdint.h>
1433//
1434// struct i128 {
1435// uint64_t hi;
1436// uint64_t lo;
1437// };
1438//
1439// // Type your code here, or load an example.
1440// struct i128 square(uint64_t x, uint64_t y) {
1441// __int128 prod = (__int128)x * (__int128)y;
1442// struct i128 z;
1443// z.hi = (uint64_t)(prod >> 64);
1444// z.lo = (uint64_t)prod;
1445// return z;
1446// }
1447// ```
1448//
1449// If the result contains `call __multi3`, then the multiplication
1450// is emulated by the compiler. Otherwise, it's natively supported.
1451//
1452// This should be all-known 64-bit platforms supported by Rust.
1453// https://forge.rust-lang.org/platform-support.html
1454//
1455// # Supported
1456//
1457// Platforms where native 128-bit multiplication is explicitly supported:
1458// - x86_64 (Supported via `MUL`).
1459// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
1460// - s390x (Supported via `MLGR`).
1461//
1462// # Efficient
1463//
1464// Platforms where native 64-bit multiplication is supported and
1465// you can extract hi-lo for 64-bit multiplications.
1466// - aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
1467// - powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
1468// - riscv64 (Requires `MUL` and `MULH` to capture high and low bits).
1469//
1470// # Unsupported
1471//
1472// Platforms where native 128-bit multiplication is not supported,
1473// requiring software emulation.
1474// sparc64 (`UMUL` only supports double-word arguments).
1475// sparcv9 (Same as sparc64).
1476//
1477// These tests are run via `xcross`, my own library for C cross-compiling,
1478// which supports numerous targets (far in excess of Rust's tier 1 support,
1479// or rust-embedded/cross's list). xcross may be found here:
1480// https://github.com/Alexhuszagh/xcross
1481//
1482// To compile for the given target, run:
1483// `xcross gcc main.c -c -S -O3 --target $target`
1484//
1485// All 32-bit architectures inherently do not have support. That means
1486// we can essentially look for 64-bit architectures that are not SPARC.
1487
1488#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
1489pub type Limb = u64;
1490#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
1491pub type Wide = u128;
1492#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
1493pub type SignedWide = i128;
1494#[cfg(all(target_pointer_width = "64", not(target_arch = "sparc")))]
1495pub const LIMB_BITS: usize = 64;
1496
1497#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
1498pub type Limb = u32;
1499#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
1500pub type Wide = u64;
1501#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
1502pub type SignedWide = i64;
1503#[cfg(not(all(target_pointer_width = "64", not(target_arch = "sparc"))))]
1504pub const LIMB_BITS: usize = 32;