[no description]
Diffstat (limited to 'src/ap.rs')
-rw-r--r--src/ap.rs555
1 files changed, 555 insertions, 0 deletions
diff --git a/src/ap.rs b/src/ap.rs
new file mode 100644
index 0000000..8379fb3
--- /dev/null
+++ b/src/ap.rs
@@ -0,0 +1,555 @@
+use std::borrow::Cow;
+use std::cmp::Ordering;
+use std::fmt::{Debug, Display, Write};
+use std::intrinsics::{const_deallocate, const_eval_select};
+use std::marker::Destruct;
+use std::mem::{ManuallyDrop as MD, forget, replace, transmute};
+use std::ops::{
+ Add, AddAssign, Deref, DerefMut, Div, DivAssign, Mul, MulAssign, Neg,
+ Rem, Shl, Shr, Sub, SubAssign,
+};
+#[cfg(test)]
+mod test;
+
+use crate::xp::{self, XP};
+#[repr(C)]
+/// Arbitratry precision integer, usable in const
+pub struct AP {
+ // -1 | +1
+ sign: i8,
+ ndigits: u32,
+ digits: MD<Storage>,
+}
+impl const Eq for AP {}
+impl const PartialEq for AP {
+ fn eq(&self, other: &Self) -> bool {
+ self.sign == other.sign
+ && self.ndigits == other.ndigits
+ && self.dgr()[..self.ndigits as usize]
+ == other.dgr()[..other.ndigits as usize]
+ }
+}
+
+impl const Drop for Storage {
+ fn drop(&mut self) {
+ const fn drop_c(x: &mut MD<Vec<u8>>) {
+ unsafe { const_deallocate(x.as_mut_ptr(), x.capacity(), 0) };
+ }
+ fn drop_r(x: &mut MD<Vec<u8>>) {
+ unsafe { MD::drop(x) };
+ }
+
+ match self {
+ Self::V(x) => {
+ const_eval_select((x,), drop_c, drop_r);
+ }
+ _ => {}
+ }
+ }
+}
+
+impl const DerefMut for Storage {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ match self {
+ Self::V(x) => x.as_mut_slice(),
+ Self::Leaked(items) => {
+ let mut v = Vec::new();
+ let mut i = 0;
+ while i < items.len() {
+ v.push(items[i]);
+ }
+ *self = Storage::V(MD::new(v));
+ &mut *self
+ }
+ }
+ }
+}
+
+impl const Deref for Storage {
+ type Target = [u8];
+ fn deref(&self) -> &Self::Target {
+ match self {
+ Self::V(x) => x.as_slice(),
+ Self::Leaked(items) => *items,
+ }
+ }
+}
+
+impl const Clone for AP {
+ fn clone(&self) -> Self {
+ Self {
+ sign: self.sign.clone(),
+ ndigits: self.ndigits.clone(),
+ digits: MD::new((&*self.digits).clone()),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub enum Storage {
+ V(MD<Vec<u8>>),
+ Leaked(&'static [u8]),
+}
+impl const Clone for Storage {
+ fn clone(&self) -> Self {
+ match self {
+ Self::V(x) => {
+ let mut i = 0;
+ let mut v = Vec::with_capacity(x.len());
+ while i < x.len() {
+ v.push(x.as_slice()[i]);
+ i += 1;
+ }
+ Self::V(MD::new(v))
+ }
+ Self::Leaked(x) => Self::Leaked(x),
+ }
+ }
+}
+impl const Default for AP {
+ fn default() -> Self {
+ Self {
+ sign: 1,
+ ndigits: 1,
+ digits: MD::new(Storage::Leaked(&[0])),
+ }
+ }
+}
+
+impl Debug for AP {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if self.sign == -1 {
+ f.write_char('-')?;
+ }
+ let mut x = Vec::with_capacity(1024);
+ f.write_str(unsafe {
+ std::str::from_utf8_unchecked(xp::to_str(
+ &mut x,
+ 10,
+ self.ndigits,
+ self.clone().dg(),
+ ))
+ })
+ }
+}
+impl Display for AP {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if self.sign == -1 {
+ f.write_char('-')?;
+ }
+ let mut x = Vec::with_capacity(1024);
+ f.write_str(unsafe {
+ std::str::from_utf8_unchecked(xp::to_str(
+ &mut x,
+ 10,
+ self.ndigits,
+ self.clone().dg(),
+ ))
+ })
+ }
+}
+
+impl AP {
+ const fn sz(&self) -> u32 {
+ self.digits.len() as _
+ }
+ const fn dg(&mut self) -> &mut XP {
+ &mut self.digits
+ }
+ pub const fn new(n: i128) -> Self {
+ let mut x = Self::alloc(8);
+ x.set(n);
+ x
+ }
+ const fn dgr(&self) -> &XP {
+ &self.digits
+ }
+ pub const fn to_int(&self) -> i128 {
+ let u =
+ xp::to_int(self.ndigits, self.dgr()) % (i128::MAX as u128 + 1);
+ if self.sign == -1 { -(u as i128) } else { u as i128 }
+ }
+ pub const fn set(&mut self, n: i128) {
+ if (n == i128::MIN) {
+ xp::from_int(self.sz(), self.dg(), (i128::MAX as u128 + 1));
+ } else if (n < 0) {
+ xp::from_int(self.sz(), self.dg(), (-n) as _);
+ } else {
+ xp::from_int(self.sz(), self.dg(), (n) as _);
+ self.sign = if n < 0 { -1 } else { 1 };
+ }
+ self.sign = if n < 0 { -1 } else { 1 };
+ self.normalize(self.sz());
+ }
+ const fn normalize(&mut self, n: u32) {
+ self.ndigits = xp::len(n, self.dgr());
+ }
+ const fn iszero(&self) -> bool {
+ self.ndigits == 1 && self.dgr()[0] == 0
+ }
+ pub const fn alloc(size: u32) -> Self {
+ assert!(size > 0);
+ let mut v = Vec::with_capacity(size as _);
+ let mut i = 0;
+ while i < size {
+ v.push(0);
+ i += 1;
+ }
+
+ Self {
+ sign: 1,
+ ndigits: 1,
+ digits: MD::new(Storage::V(MD::new(v))),
+ }
+ }
+ const fn add(z: &mut Self, x: &Self, y: &Self) {
+ let mut n = y.ndigits;
+ if x.ndigits < n {
+ Self::add(z, y, x)
+ } else if (x.ndigits > n) {
+ let carry = xp::add(n, z.dg(), x.dgr(), y.dgr(), false);
+ let sz = z.sz() as usize;
+ z.dg()[sz - 1] = unsafe {
+ xp::sum(
+ x.ndigits - n,
+ &mut z.dg()[n as usize..],
+ &x.dgr()[n as usize..],
+ carry as _,
+ )
+ };
+ } else {
+ z.dg()[n as usize] =
+ xp::add(n, z.dg(), x.dgr(), y.dgr(), false) as _;
+ }
+ z.normalize(z.sz());
+ }
+ const fn sub(z: &mut Self, x: &Self, y: &Self) {
+ let mut n = y.ndigits;
+ // int borrow, n = y->ndigits;
+ let mut borrow = xp::sub(n, z.dg(), x.dgr(), y.dgr(), false) as u8;
+ if (x.ndigits > n) {
+ borrow = xp::diff(
+ x.ndigits - n,
+ &mut z.dg()[n as usize..],
+ &x.dgr()[n as usize..],
+ borrow,
+ );
+ }
+ assert!(borrow == 0);
+ z.normalize(z.sz());
+ // assert(borrow == 0);
+ // return normalize(z, z->size);
+ }
+ const fn cmp(x: &Self, y: &Self) -> Ordering {
+ if x.ndigits != y.ndigits {
+ x.ndigits.cmp(&y.ndigits)
+ } else {
+ xp::cmp(x.ndigits, x.dgr(), y.dgr())
+ }
+ }
+ const fn smsn(x: &Self, y: &Self) -> bool {
+ (x.sign ^ y.sign) == 0
+ }
+ pub const fn divmod(x: &Self, y: &Self) -> (AP, AP) {
+ assert!(!y.iszero());
+ let mut q = AP::alloc(x.ndigits);
+ let mut r = AP::alloc(y.ndigits);
+ xp::div(
+ x.ndigits,
+ q.dg(),
+ x.dgr(),
+ y.ndigits,
+ y.dgr(),
+ r.dg(),
+ AP::alloc(x.ndigits + y.ndigits + 2).dg(),
+ );
+ q.normalize(q.sz());
+ r.normalize(r.sz());
+ q.sign = if q.iszero() || AP::smsn(x, y) { 1 } else { -1 };
+ if !AP::smsn(x, y) && !r.iszero() {
+ let carry = unsafe {
+ xp::sum_p(q.sz(), q.dg().as_mut_ptr(), q.dgr().as_ptr(), 1)
+ };
+ assert!(carry == 0);
+ q.normalize(q.sz());
+ assert!(unsafe {
+ !xp::subp(
+ r.sz(),
+ r.dg().as_mut_ptr(),
+ y.dgr().as_ptr(),
+ r.dgr().as_ptr(),
+ false,
+ )
+ });
+ r.normalize(r.sz());
+ };
+ (q, r)
+ }
+ const fn isone(&self) -> bool {
+ self.ndigits == 1 && self.dgr()[0] == 1
+ }
+ pub const fn is_even(&self) -> bool {
+ (self.dgr()[0] & 1) == 0
+ }
+ const fn mulmod(x: &Self, y: &Self, p: &Self) -> AP {
+ let xy = x * y;
+ &xy % p
+ }
+ pub const fn pow(&self, y: &Self) -> AP {
+ assert!(y.sign == 1);
+ if self.iszero() {
+ return AP::new(0);
+ }
+ if y.iszero() {
+ return AP::new(1);
+ }
+ if self.isone() {
+ return AP::new(if y.is_even() { 1 } else { self.sign as _ });
+ }
+ if y.isone() {
+ return self + 0;
+ }
+ let y2 = y >> 1;
+ let t = AP::pow(self, &y2);
+ let mut z = &t * &t;
+ if !y.is_even() {
+ z = self * &z;
+ }
+ z
+ }
+ pub const fn pow_mod(&self, y: &Self, modulo: &Self) -> Self {
+ assert!(y.sign == 1);
+ assert!(modulo.sign == 1 && !modulo.iszero() && !modulo.isone());
+ if self.iszero() {
+ return AP::new(0);
+ }
+ if y.iszero() {
+ return AP::new(1);
+ }
+ if self.isone() {
+ return AP::new(if y.is_even() { 1 } else { self.sign as _ });
+ }
+ if y.isone() {
+ return self + 0;
+ }
+ let y2 = y >> 1;
+ let t = AP::pow_mod(self, &y2, modulo);
+ let mut z = AP::mulmod(&t, &t, modulo);
+ if !y.is_even() {
+ z = AP::mulmod(&(self % modulo), &z, modulo);
+ }
+ z
+ }
+ const fn cmp_(x: &Self, y: &Self) -> Ordering {
+ if x.ndigits != y.ndigits {
+ x.ndigits.cmp(&y.ndigits)
+ } else {
+ xp::cmp(x.ndigits, x.dgr(), y.dgr())
+ }
+ }
+ /// call before storing.
+ pub const fn globalize(mut self) -> Self {
+ match &mut *self.digits {
+ Storage::V(x) =>
+ self.digits = MD::new(Storage::Leaked(
+ unsafe { MD::take(x) }.const_make_global(),
+ )),
+ _ => {}
+ };
+ self
+ }
+}
+
+impl const Drop for AP {
+ fn drop(&mut self) {
+ unsafe { MD::drop(&mut self.digits) };
+ }
+}
+
+impl const Neg for AP {
+ type Output = AP;
+
+ fn neg(mut self) -> Self::Output {
+ self.sign = if self.iszero() { 1 } else { -self.sign };
+ self
+ }
+}
+
+impl const Mul for &AP {
+ type Output = AP;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ let mut z = AP::alloc(self.ndigits + rhs.ndigits);
+ xp::mul(z.dg(), self.ndigits, self.dgr(), rhs.ndigits, rhs.dgr());
+ z.normalize(z.sz());
+ z.sign = if z.iszero() || AP::smsn(self, rhs) { 1 } else { -1 };
+ z
+ }
+}
+impl const MulAssign<&AP> for AP {
+ fn mul_assign(&mut self, rhs: &Self) {
+ *self = &*self * rhs;
+ }
+}
+impl const Add for &AP {
+ type Output = AP;
+
+ fn add(self, rhs: Self) -> Self::Output {
+ let mut z;
+ if AP::smsn(&self, &rhs) {
+ z = AP::alloc(self.ndigits.max(rhs.ndigits) + 1);
+ AP::add(&mut z, &self, &rhs);
+ z.sign = if z.iszero() { 1 } else { self.sign };
+ } else if AP::cmp(self, rhs).is_gt() {
+ z = AP::alloc(self.ndigits);
+ AP::sub(&mut z, self, rhs);
+ z.sign = if z.iszero() { 1 } else { self.sign };
+ } else {
+ // lesser ?
+ z = AP::alloc(rhs.ndigits);
+ AP::sub(&mut z, rhs, self);
+ z.sign = if z.iszero() { 1 } else { -self.sign };
+ }
+ z
+ }
+}
+impl const Sub for &AP {
+ type Output = AP;
+
+ fn sub(self, rhs: Self) -> Self::Output {
+ let mut z;
+ if !AP::smsn(self, rhs) {
+ z = AP::alloc(self.ndigits.max(rhs.ndigits) + 1);
+
+ AP::add(&mut z, self, rhs);
+ z.sign = if z.iszero() { 1 } else { self.sign };
+ } else if AP::cmp(self, rhs).is_gt() {
+ z = AP::alloc(self.ndigits);
+ AP::sub(&mut z, self, rhs);
+ z.sign = if z.iszero() { 1 } else { self.sign };
+ } else {
+ z = AP::alloc(rhs.ndigits);
+ AP::sub(&mut z, rhs, self);
+ z.sign = if z.iszero() { 1 } else { -self.sign };
+ }
+ z
+ }
+}
+impl const SubAssign<&AP> for AP {
+ fn sub_assign(&mut self, rhs: &AP) {
+ *self = &*self - rhs;
+ }
+}
+impl const AddAssign<&AP> for AP {
+ fn add_assign(&mut self, rhs: &AP) {
+ *self = &*self + rhs;
+ }
+}
+impl const Div for &AP {
+ type Output = AP;
+
+ fn div(self, rhs: Self) -> Self::Output {
+ AP::divmod(self, rhs).0
+ }
+}
+impl const DivAssign<&AP> for AP {
+ fn div_assign(&mut self, rhs: &AP) {
+ *self = &*self / rhs;
+ }
+}
+impl const Rem for &AP {
+ type Output = AP;
+
+ fn rem(self, rhs: Self) -> Self::Output {
+ AP::divmod(self, rhs).1
+ }
+}
+
+impl const Shl<u32> for &AP {
+ type Output = AP;
+
+ fn shl(self, s: u32) -> Self::Output {
+ let mut z = AP::alloc(self.ndigits + ((s + 7) & !7) / 8);
+ xp::shl(z.sz(), z.dg(), self.ndigits, self.dgr(), s, 0);
+ z.sign = self.sign;
+ z.normalize(z.sz());
+ z
+ }
+}
+impl const Shr<u32> for &AP {
+ type Output = AP;
+
+ /// this is a truncating, not flooring, shr.
+ fn shr(self, s: u32) -> Self::Output {
+ if s >= 8 * self.ndigits {
+ return AP::new(0);
+ }
+
+ let mut z = AP::alloc(self.ndigits - (s / 8));
+ let c = xp::shr(z.sz(), z.dg(), self.ndigits, self.dgr(), s, 0);
+
+ z.sign = if z.iszero() { 1 } else { self.sign };
+ z.normalize(z.sz());
+ z
+ }
+}
+macro_rules! fni {
+ ($t:ident, $fname:ident => $($for:ident)+) => {
+ impl const $t<&AP> for AP {
+ type Output = AP;
+ fn $fname(self, other: &AP) -> AP {
+ <&AP as $t<&AP>>::$fname(&self, other)
+ }
+ }
+ impl const $t<AP> for AP {
+ type Output = AP;
+ fn $fname(self, other: AP) -> AP {
+ <&AP as $t<&AP>>::$fname(&self, &other)
+ }
+ }
+
+ impl const $t<AP> for &AP {
+ type Output = AP;
+ fn $fname(self, other: AP) -> AP {
+ <&AP as $t<&AP>>::$fname(self, &other)
+ }
+ }
+
+ $(impl const $t<$for> for &AP {
+ type Output = AP;
+ fn $fname(self, other: $for) -> AP {
+ let other = AP::new(other as _);
+ <&AP as $t<&AP>>::$fname(self, &other)
+ }
+ })+
+ $(impl const $t<$for> for AP {
+ type Output = AP;
+ fn $fname(self, other: $for) -> AP {
+ let other = AP::new(other as _);
+ <&AP as $t<&AP>>::$fname(&self, &other)
+ }
+ })+
+ };
+}
+fni!(Add, add => u64 u32 u16 u8 i128 i64 i32 i16 i8);
+fni!(Mul, mul => u64 u32 u16 u8 i128 i64 i32 i16 i8);
+fni!(Div, div => u64 u32 u16 u8 i128 i64 i32 i16 i8);
+fni!(Sub, sub => u64 u32 u16 u8 i128 i64 i32 i16 i8);
+fni!(Rem, rem => u64 u32 u16 u8 i128 i64 i32 i16 i8);
+
+impl const PartialOrd for AP {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl const Ord for AP {
+ fn cmp(&self, other: &Self) -> Ordering {
+ if !AP::smsn(self, other) {
+ unsafe { transmute(self.sign) }
+ } else if self.sign == 1 {
+ AP::cmp_(self, other)
+ } else {
+ AP::cmp_(other, self)
+ }
+ }
+}