aboutsummaryrefslogtreecommitdiff
path: root/libcore/num
diff options
context:
space:
mode:
authorpravic <[email protected]>2016-04-12 17:47:49 +0300
committerpravic <[email protected]>2016-04-12 17:47:49 +0300
commit91d227b219446d3a8b13f5bf7eb87bfc78a8b339 (patch)
tree0e438aefd2b3cf07354a68595d5aa4ed73f81f15 /libcore/num
parentadd native import libraries (diff)
downloadkmd-env-rs-91d227b219446d3a8b13f5bf7eb87bfc78a8b339.tar.xz
kmd-env-rs-91d227b219446d3a8b13f5bf7eb87bfc78a8b339.zip
add libcore from 2016-04-11 nightly
Diffstat (limited to 'libcore/num')
-rw-r--r--libcore/num/bignum.rs499
-rw-r--r--libcore/num/dec2flt/algorithm.rs357
-rw-r--r--libcore/num/dec2flt/mod.rs332
-rw-r--r--libcore/num/dec2flt/num.rs94
-rw-r--r--libcore/num/dec2flt/parse.rs134
-rw-r--r--libcore/num/dec2flt/rawfp.rs368
-rw-r--r--libcore/num/dec2flt/table.rs1281
-rw-r--r--libcore/num/diy_float.rs71
-rw-r--r--libcore/num/f32.rs272
-rw-r--r--libcore/num/f64.rs272
-rw-r--r--libcore/num/float_macros.rs20
-rw-r--r--libcore/num/flt2dec/decoder.rs100
-rw-r--r--libcore/num/flt2dec/estimator.rs25
-rw-r--r--libcore/num/flt2dec/mod.rs662
-rw-r--r--libcore/num/flt2dec/strategy/dragon.rs329
-rw-r--r--libcore/num/flt2dec/strategy/grisu.rs696
-rw-r--r--libcore/num/i16.rs17
-rw-r--r--libcore/num/i32.rs17
-rw-r--r--libcore/num/i64.rs17
-rw-r--r--libcore/num/i8.rs17
-rw-r--r--libcore/num/int_macros.rs27
-rw-r--r--libcore/num/isize.rs20
-rw-r--r--libcore/num/mod.rs2516
-rw-r--r--libcore/num/u16.rs17
-rw-r--r--libcore/num/u32.rs17
-rw-r--r--libcore/num/u64.rs17
-rw-r--r--libcore/num/u8.rs17
-rw-r--r--libcore/num/uint_macros.rs22
-rw-r--r--libcore/num/usize.rs20
-rw-r--r--libcore/num/wrapping.rs309
30 files changed, 8562 insertions, 0 deletions
diff --git a/libcore/num/bignum.rs b/libcore/num/bignum.rs
new file mode 100644
index 0000000..66c6deb
--- /dev/null
+++ b/libcore/num/bignum.rs
@@ -0,0 +1,499 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Custom arbitrary-precision number (bignum) implementation.
+//!
+//! This is designed to avoid the heap allocation at expense of stack memory.
+//! The most used bignum type, `Big32x40`, is limited by 32 × 40 = 1,280 bits
+//! and will take at most 160 bytes of stack memory. This is more than enough
+//! for round-tripping all possible finite `f64` values.
+//!
+//! In principle it is possible to have multiple bignum types for different
+//! inputs, but we don't do so to avoid the code bloat. Each bignum is still
+//! tracked for the actual usages, so it normally doesn't matter.
+
+// This module is only for dec2flt and flt2dec, and only public because of libcoretest.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(feature = "core_private_bignum",
+ reason = "internal routines only exposed for testing",
+ issue = "0")]
+#![macro_use]
+
+use prelude::v1::*;
+
+use mem;
+use intrinsics;
+
+/// Arithmetic operations required by bignums.
+pub trait FullOps {
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self + other + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_add(self, other: Self, carry: bool) -> (bool /*carry*/, Self);
+
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_mul(self, other: Self, carry: Self) -> (Self /*carry*/, Self);
+
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + other2 + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_mul_add(self, other: Self, other2: Self, carry: Self) -> (Self /*carry*/, Self);
+
+ /// Returns `(quo, rem)` such that `borrow * 2^W + self = quo * other + rem`
+ /// and `0 <= rem < other`, where `W` is the number of bits in `Self`.
+ fn full_div_rem(self, other: Self, borrow: Self) -> (Self /*quotient*/, Self /*remainder*/);
+}
+
+macro_rules! impl_full_ops {
+ ($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
+ $(
+ impl FullOps for $ty {
+ fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
+ // this cannot overflow, the output is between 0 and 2*2^nbits - 1
+ // FIXME will LLVM optimize this into ADC or similar???
+ let (v, carry1) = unsafe { intrinsics::add_with_overflow(self, other) };
+ let (v, carry2) = unsafe {
+ intrinsics::add_with_overflow(v, if carry {1} else {0})
+ };
+ (carry1 || carry2, v)
+ }
+
+ fn full_mul(self, other: $ty, carry: $ty) -> ($ty, $ty) {
+ // this cannot overflow, the output is between 0 and 2^nbits * (2^nbits - 1)
+ let nbits = mem::size_of::<$ty>() * 8;
+ let v = (self as $bigty) * (other as $bigty) + (carry as $bigty);
+ ((v >> nbits) as $ty, v as $ty)
+ }
+
+ fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
+ // this cannot overflow, the output is between 0 and 2^(2*nbits) - 1
+ let nbits = mem::size_of::<$ty>() * 8;
+ let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
+ (carry as $bigty);
+ ((v >> nbits) as $ty, v as $ty)
+ }
+
+ fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
+ debug_assert!(borrow < other);
+ // this cannot overflow, the dividend is between 0 and other * 2^nbits - 1
+ let nbits = mem::size_of::<$ty>() * 8;
+ let lhs = ((borrow as $bigty) << nbits) | (self as $bigty);
+ let rhs = other as $bigty;
+ ((lhs / rhs) as $ty, (lhs % rhs) as $ty)
+ }
+ }
+ )*
+ )
+}
+
+impl_full_ops! {
+ u8: add(intrinsics::u8_add_with_overflow), mul/div(u16);
+ u16: add(intrinsics::u16_add_with_overflow), mul/div(u32);
+ u32: add(intrinsics::u32_add_with_overflow), mul/div(u64);
+// u64: add(intrinsics::u64_add_with_overflow), mul/div(u128); // see RFC #521 for enabling this.
+}
+
+/// Table of powers of 5 representable in digits. Specifically, the largest {u8, u16, u32} value
+/// that's a power of five, plus the corresponding exponent. Used in `mul_pow5`.
+const SMALL_POW5: [(u64, usize); 3] = [
+ (125, 3),
+ (15625, 6),
+ (1_220_703_125, 13),
+];
+
+macro_rules! define_bignum {
+ ($name:ident: type=$ty:ty, n=$n:expr) => (
+ /// Stack-allocated arbitrary-precision (up to certain limit) integer.
+ ///
+ /// This is backed by a fixed-size array of given type ("digit").
+ /// While the array is not very large (normally some hundred bytes),
+ /// copying it recklessly may result in the performance hit.
+ /// Thus this is intentionally not `Copy`.
+ ///
+ /// All operations available to bignums panic in the case of over/underflows.
+ /// The caller is responsible to use large enough bignum types.
+ pub struct $name {
+ /// One plus the offset to the maximum "digit" in use.
+ /// This does not decrease, so be aware of the computation order.
+ /// `base[size..]` should be zero.
+ size: usize,
+ /// Digits. `[a, b, c, ...]` represents `a + b*2^W + c*2^(2W) + ...`
+ /// where `W` is the number of bits in the digit type.
+ base: [$ty; $n]
+ }
+
+ impl $name {
+ /// Makes a bignum from one digit.
+ pub fn from_small(v: $ty) -> $name {
+ let mut base = [0; $n];
+ base[0] = v;
+ $name { size: 1, base: base }
+ }
+
+ /// Makes a bignum from `u64` value.
+ pub fn from_u64(mut v: u64) -> $name {
+ use mem;
+
+ let mut base = [0; $n];
+ let mut sz = 0;
+ while v > 0 {
+ base[sz] = v as $ty;
+ v >>= mem::size_of::<$ty>() * 8;
+ sz += 1;
+ }
+ $name { size: sz, base: base }
+ }
+
+ /// Return the internal digits as a slice `[a, b, c, ...]` such that the numeric
+ /// value is `a + b * 2^W + c * 2^(2W) + ...` where `W` is the number of bits in
+ /// the digit type.
+ pub fn digits(&self) -> &[$ty] {
+ &self.base[..self.size]
+ }
+
+ /// Return the `i`-th bit where bit 0 is the least significant one.
+ /// In other words, the bit with weight `2^i`.
+ pub fn get_bit(&self, i: usize) -> u8 {
+ use mem;
+
+ let digitbits = mem::size_of::<$ty>() * 8;
+ let d = i / digitbits;
+ let b = i % digitbits;
+ ((self.base[d] >> b) & 1) as u8
+ }
+
+ /// Returns true if the bignum is zero.
+ pub fn is_zero(&self) -> bool {
+ self.digits().iter().all(|&v| v == 0)
+ }
+
+ /// Returns the number of bits necessary to represent this value. Note that zero
+ /// is considered to need 0 bits.
+ pub fn bit_length(&self) -> usize {
+ use mem;
+
+ // Skip over the most significant digits which are zero.
+ let digits = self.digits();
+ let zeros = digits.iter().rev().take_while(|&&x| x == 0).count();
+ let end = digits.len() - zeros;
+ let nonzero = &digits[..end];
+
+ if nonzero.is_empty() {
+ // There are no non-zero digits, i.e. the number is zero.
+ return 0;
+ }
+ // This could be optimized with leading_zeros() and bit shifts, but that's
+ // probably not worth the hassle.
+ let digitbits = mem::size_of::<$ty>()* 8;
+ let mut i = nonzero.len() * digitbits - 1;
+ while self.get_bit(i) == 0 {
+ i -= 1;
+ }
+ i + 1
+ }
+
+ /// Adds `other` to itself and returns its own mutable reference.
+ pub fn add<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use cmp;
+ use num::bignum::FullOps;
+
+ let mut sz = cmp::max(self.size, other.size);
+ let mut carry = false;
+ for (a, b) in self.base[..sz].iter_mut().zip(&other.base[..sz]) {
+ let (c, v) = (*a).full_add(*b, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry {
+ self.base[sz] = 1;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ pub fn add_small(&mut self, other: $ty) -> &mut $name {
+ use num::bignum::FullOps;
+
+ let (mut carry, v) = self.base[0].full_add(other, false);
+ self.base[0] = v;
+ let mut i = 1;
+ while carry {
+ let (c, v) = self.base[i].full_add(0, carry);
+ self.base[i] = v;
+ carry = c;
+ i += 1;
+ }
+ if i > self.size {
+ self.size = i;
+ }
+ self
+ }
+
+ /// Subtracts `other` from itself and returns its own mutable reference.
+ pub fn sub<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use cmp;
+ use num::bignum::FullOps;
+
+ let sz = cmp::max(self.size, other.size);
+ let mut noborrow = true;
+ for (a, b) in self.base[..sz].iter_mut().zip(&other.base[..sz]) {
+ let (c, v) = (*a).full_add(!*b, noborrow);
+ *a = v;
+ noborrow = c;
+ }
+ assert!(noborrow);
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by a digit-sized `other` and returns its own
+ /// mutable reference.
+ pub fn mul_small(&mut self, other: $ty) -> &mut $name {
+ use num::bignum::FullOps;
+
+ let mut sz = self.size;
+ let mut carry = 0;
+ for a in &mut self.base[..sz] {
+ let (c, v) = (*a).full_mul(other, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry > 0 {
+ self.base[sz] = carry;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `2^bits` and returns its own mutable reference.
+ pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
+ use mem;
+
+ let digitbits = mem::size_of::<$ty>() * 8;
+ let digits = bits / digitbits;
+ let bits = bits % digitbits;
+
+ assert!(digits < $n);
+ debug_assert!(self.base[$n-digits..].iter().all(|&v| v == 0));
+ debug_assert!(bits == 0 || (self.base[$n-digits-1] >> (digitbits - bits)) == 0);
+
+ // shift by `digits * digitbits` bits
+ for i in (0..self.size).rev() {
+ self.base[i+digits] = self.base[i];
+ }
+ for i in 0..digits {
+ self.base[i] = 0;
+ }
+
+ // shift by `bits` bits
+ let mut sz = self.size + digits;
+ if bits > 0 {
+ let last = sz;
+ let overflow = self.base[last-1] >> (digitbits - bits);
+ if overflow > 0 {
+ self.base[last] = overflow;
+ sz += 1;
+ }
+ for i in (digits+1..last).rev() {
+ self.base[i] = (self.base[i] << bits) |
+ (self.base[i-1] >> (digitbits - bits));
+ }
+ self.base[digits] <<= bits;
+ // self.base[..digits] is zero, no need to shift
+ }
+
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `5^e` and returns its own mutable reference.
+ pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name {
+ use mem;
+ use num::bignum::SMALL_POW5;
+
+ // There are exactly n trailing zeros on 2^n, and the only relevant digit sizes
+ // are consecutive powers of two, so this is well suited index for the table.
+ let table_index = mem::size_of::<$ty>().trailing_zeros() as usize;
+ let (small_power, small_e) = SMALL_POW5[table_index];
+ let small_power = small_power as $ty;
+
+ // Multiply with the largest single-digit power as long as possible ...
+ while e >= small_e {
+ self.mul_small(small_power);
+ e -= small_e;
+ }
+
+ // ... then finish off the remainder.
+ let mut rest_power = 1;
+ for _ in 0..e {
+ rest_power *= 5;
+ }
+ self.mul_small(rest_power);
+
+ self
+ }
+
+
+ /// Multiplies itself by a number described by `other[0] + other[1] * 2^W +
+ /// other[2] * 2^(2W) + ...` (where `W` is the number of bits in the digit type)
+ /// and returns its own mutable reference.
+ pub fn mul_digits<'a>(&'a mut self, other: &[$ty]) -> &'a mut $name {
+ // the internal routine. works best when aa.len() <= bb.len().
+ fn mul_inner(ret: &mut [$ty; $n], aa: &[$ty], bb: &[$ty]) -> usize {
+ use num::bignum::FullOps;
+
+ let mut retsz = 0;
+ for (i, &a) in aa.iter().enumerate() {
+ if a == 0 { continue; }
+ let mut sz = bb.len();
+ let mut carry = 0;
+ for (j, &b) in bb.iter().enumerate() {
+ let (c, v) = a.full_mul_add(b, ret[i + j], carry);
+ ret[i + j] = v;
+ carry = c;
+ }
+ if carry > 0 {
+ ret[i + sz] = carry;
+ sz += 1;
+ }
+ if retsz < i + sz {
+ retsz = i + sz;
+ }
+ }
+ retsz
+ }
+
+ let mut ret = [0; $n];
+ let retsz = if self.size < other.len() {
+ mul_inner(&mut ret, &self.digits(), other)
+ } else {
+ mul_inner(&mut ret, other, &self.digits())
+ };
+ self.base = ret;
+ self.size = retsz;
+ self
+ }
+
+ /// Divides itself by a digit-sized `other` and returns its own
+ /// mutable reference *and* the remainder.
+ pub fn div_rem_small(&mut self, other: $ty) -> (&mut $name, $ty) {
+ use num::bignum::FullOps;
+
+ assert!(other > 0);
+
+ let sz = self.size;
+ let mut borrow = 0;
+ for a in self.base[..sz].iter_mut().rev() {
+ let (q, r) = (*a).full_div_rem(other, borrow);
+ *a = q;
+ borrow = r;
+ }
+ (self, borrow)
+ }
+
+ /// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
+ /// remainder.
+ pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
+ use mem;
+
+ // Stupid slow base-2 long division taken from
+ // https://en.wikipedia.org/wiki/Division_algorithm
+ // FIXME use a greater base ($ty) for the long division.
+ assert!(!d.is_zero());
+ let digitbits = mem::size_of::<$ty>() * 8;
+ for digit in &mut q.base[..] {
+ *digit = 0;
+ }
+ for digit in &mut r.base[..] {
+ *digit = 0;
+ }
+ r.size = d.size;
+ q.size = 1;
+ let mut q_is_zero = true;
+ let end = self.bit_length();
+ for i in (0..end).rev() {
+ r.mul_pow2(1);
+ r.base[0] |= self.get_bit(i) as $ty;
+ if &*r >= d {
+ r.sub(d);
+ // Set bit `i` of q to 1.
+ let digit_idx = i / digitbits;
+ let bit_idx = i % digitbits;
+ if q_is_zero {
+ q.size = digit_idx + 1;
+ q_is_zero = false;
+ }
+ q.base[digit_idx] |= 1 << bit_idx;
+ }
+ }
+ debug_assert!(q.base[q.size..].iter().all(|&d| d == 0));
+ debug_assert!(r.base[r.size..].iter().all(|&d| d == 0));
+ }
+ }
+
+ impl ::cmp::PartialEq for $name {
+ fn eq(&self, other: &$name) -> bool { self.base[..] == other.base[..] }
+ }
+
+ impl ::cmp::Eq for $name {
+ }
+
+ impl ::cmp::PartialOrd for $name {
+ fn partial_cmp(&self, other: &$name) -> ::option::Option<::cmp::Ordering> {
+ ::option::Option::Some(self.cmp(other))
+ }
+ }
+
+ impl ::cmp::Ord for $name {
+ fn cmp(&self, other: &$name) -> ::cmp::Ordering {
+ use cmp::max;
+ let sz = max(self.size, other.size);
+ let lhs = self.base[..sz].iter().cloned().rev();
+ let rhs = other.base[..sz].iter().cloned().rev();
+ lhs.cmp(rhs)
+ }
+ }
+
+ impl ::clone::Clone for $name {
+ fn clone(&self) -> $name {
+ $name { size: self.size, base: self.base }
+ }
+ }
+
+ impl ::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result {
+ use mem;
+
+ let sz = if self.size < 1 {1} else {self.size};
+ let digitlen = mem::size_of::<$ty>() * 2;
+
+ try!(write!(f, "{:#x}", self.base[sz-1]));
+ for &v in self.base[..sz-1].iter().rev() {
+ try!(write!(f, "_{:01$x}", v, digitlen));
+ }
+ ::result::Result::Ok(())
+ }
+ }
+ )
+}
+
+/// The digit type for `Big32x40`.
+pub type Digit32 = u32;
+
+define_bignum!(Big32x40: type=Digit32, n=40);
+
+// this one is used for testing only.
+#[doc(hidden)]
+pub mod tests {
+ use prelude::v1::*;
+ define_bignum!(Big8x3: type=u8, n=3);
+}
diff --git a/libcore/num/dec2flt/algorithm.rs b/libcore/num/dec2flt/algorithm.rs
new file mode 100644
index 0000000..e33c281
--- /dev/null
+++ b/libcore/num/dec2flt/algorithm.rs
@@ -0,0 +1,357 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The various algorithms from the paper.
+
+use prelude::v1::*;
+use cmp::min;
+use cmp::Ordering::{Less, Equal, Greater};
+use num::diy_float::Fp;
+use num::dec2flt::table;
+use num::dec2flt::rawfp::{self, Unpacked, RawFloat, fp_to_float, next_float, prev_float};
+use num::dec2flt::num::{self, Big};
+
+/// Number of significand bits in Fp
+const P: u32 = 64;
+
+// We simply store the best approximation for *all* exponents, so the variable "h" and the
+// associated conditions can be omitted. This trades performance for a couple kilobytes of space.
+
+fn power_of_ten(e: i16) -> Fp {
+ assert!(e >= table::MIN_E);
+ let i = e - table::MIN_E;
+ let sig = table::POWERS.0[i as usize];
+ let exp = table::POWERS.1[i as usize];
+ Fp { f: sig, e: exp }
+}
+
+/// The fast path of Bellerophon using machine-sized integers and floats.
+///
+/// This is extracted into a separate function so that it can be attempted before constructing
+/// a bignum.
+///
+/// The fast path crucially depends on arithmetic being correctly rounded, so on x86
+/// without SSE or SSE2 it will be **wrong** (as in, off by one ULP occasionally), because the x87
+/// FPU stack will round to 80 bit first before rounding to 64/32 bit. However, as such hardware
+/// is extremely rare nowadays and in fact all in-tree target triples assume an SSE2-capable
+/// microarchitecture, there is little incentive to deal with that. There's a test that will fail
+/// when SSE or SSE2 is disabled, so people building their own non-SSE copy will get a heads up.
+///
+/// FIXME: It would nevertheless be nice if we had a good way to detect and deal with x87.
+pub fn fast_path<T: RawFloat>(integral: &[u8], fractional: &[u8], e: i64) -> Option<T> {
+ let num_digits = integral.len() + fractional.len();
+ // log_10(f64::max_sig) ~ 15.95. We compare the exact value to max_sig near the end,
+ // this is just a quick, cheap rejection (and also frees the rest of the code from
+ // worrying about underflow).
+ if num_digits > 16 {
+ return None;
+ }
+ if e.abs() >= T::ceil_log5_of_max_sig() as i64 {
+ return None;
+ }
+ let f = num::from_str_unchecked(integral.iter().chain(fractional.iter()));
+ if f > T::max_sig() {
+ return None;
+ }
+ // The case e < 0 cannot be folded into the other branch. Negative powers result in
+ // a repeating fractional part in binary, which are rounded, which causes real
+ // (and occasioally quite significant!) errors in the final result.
+ if e >= 0 {
+ Some(T::from_int(f) * T::short_fast_pow10(e as usize))
+ } else {
+ Some(T::from_int(f) / T::short_fast_pow10(e.abs() as usize))
+ }
+}
+
+/// Algorithm Bellerophon is trivial code justified by non-trivial numeric analysis.
+///
+/// It rounds ``f`` to a float with 64 bit significand and multiplies it by the best approximation
+/// of `10^e` (in the same floating point format). This is often enough to get the correct result.
+/// However, when the result is close to halfway between two adjecent (ordinary) floats, the
+/// compound rounding error from multiplying two approximation means the result may be off by a
+/// few bits. When this happens, the iterative Algorithm R fixes things up.
+///
+/// The hand-wavy "close to halfway" is made precise by the numeric analysis in the paper.
+/// In the words of Clinger:
+///
+/// > Slop, expressed in units of the least significant bit, is an inclusive bound for the error
+/// > accumulated during the floating point calculation of the approximation to f * 10^e. (Slop is
+/// > not a bound for the true error, but bounds the difference between the approximation z and
+/// > the best possible approximation that uses p bits of significand.)
+pub fn bellerophon<T: RawFloat>(f: &Big, e: i16) -> T {
+ let slop;
+ if f <= &Big::from_u64(T::max_sig()) {
+ // The cases abs(e) < log5(2^N) are in fast_path()
+ slop = if e >= 0 { 0 } else { 3 };
+ } else {
+ slop = if e >= 0 { 1 } else { 4 };
+ }
+ let z = rawfp::big_to_fp(f).mul(&power_of_ten(e)).normalize();
+ let exp_p_n = 1 << (P - T::sig_bits() as u32);
+ let lowbits: i64 = (z.f % exp_p_n) as i64;
+ // Is the slop large enough to make a difference when
+ // rounding to n bits?
+ if (lowbits - exp_p_n as i64 / 2).abs() <= slop {
+ algorithm_r(f, e, fp_to_float(z))
+ } else {
+ fp_to_float(z)
+ }
+}
+
+/// An iterative algorithm that improves a floating point approximation of `f * 10^e`.
+///
+/// Each iteration gets one unit in the last place closer, which of course takes terribly long to
+/// converge if `z0` is even mildly off. Luckily, when used as fallback for Bellerophon, the
+/// starting approximation is off by at most one ULP.
+fn algorithm_r<T: RawFloat>(f: &Big, e: i16, z0: T) -> T {
+ let mut z = z0;
+ loop {
+ let raw = z.unpack();
+ let (m, k) = (raw.sig, raw.k);
+ let mut x = f.clone();
+ let mut y = Big::from_u64(m);
+
+ // Find positive integers `x`, `y` such that `x / y` is exactly `(f * 10^e) / (m * 2^k)`.
+ // This not only avoids dealing with the signs of `e` and `k`, we also eliminate the
+ // power of two common to `10^e` and `2^k` to make the numbers smaller.
+ make_ratio(&mut x, &mut y, e, k);
+
+ let m_digits = [(m & 0xFF_FF_FF_FF) as u32, (m >> 32) as u32];
+ // This is written a bit awkwardly because our bignums don't support
+ // negative numbers, so we use the absolute value + sign information.
+ // The multiplication with m_digits can't overflow. If `x` or `y` are large enough that
+ // we need to worry about overflow, then they are also large enough that `make_ratio` has
+ // reduced the fraction by a factor of 2^64 or more.
+ let (d2, d_negative) = if x >= y {
+ // Don't need x any more, save a clone().
+ x.sub(&y).mul_pow2(1).mul_digits(&m_digits);
+ (x, false)
+ } else {
+ // Still need y - make a copy.
+ let mut y = y.clone();
+ y.sub(&x).mul_pow2(1).mul_digits(&m_digits);
+ (y, true)
+ };
+
+ if d2 < y {
+ let mut d2_double = d2;
+ d2_double.mul_pow2(1);
+ if m == T::min_sig() && d_negative && d2_double > y {
+ z = prev_float(z);
+ } else {
+ return z;
+ }
+ } else if d2 == y {
+ if m % 2 == 0 {
+ if m == T::min_sig() && d_negative {
+ z = prev_float(z);
+ } else {
+ return z;
+ }
+ } else if d_negative {
+ z = prev_float(z);
+ } else {
+ z = next_float(z);
+ }
+ } else if d_negative {
+ z = prev_float(z);
+ } else {
+ z = next_float(z);
+ }
+ }
+}
+
+/// Given `x = f` and `y = m` where `f` represent input decimal digits as usual and `m` is the
+/// significand of a floating point approximation, make the ratio `x / y` equal to
+/// `(f * 10^e) / (m * 2^k)`, possibly reduced by a power of two both have in common.
+fn make_ratio(x: &mut Big, y: &mut Big, e: i16, k: i16) {
+ let (e_abs, k_abs) = (e.abs() as usize, k.abs() as usize);
+ if e >= 0 {
+ if k >= 0 {
+ // x = f * 10^e, y = m * 2^k, except that we reduce the fraction by some power of two.
+ let common = min(e_abs, k_abs);
+ x.mul_pow5(e_abs).mul_pow2(e_abs - common);
+ y.mul_pow2(k_abs - common);
+ } else {
+ // x = f * 10^e * 2^abs(k), y = m
+ // This can't overflow because it requires positive `e` and negative `k`, which can
+ // only happen for values extremely close to 1, which means that `e` and `k` will be
+ // comparatively tiny.
+ x.mul_pow5(e_abs).mul_pow2(e_abs + k_abs);
+ }
+ } else {
+ if k >= 0 {
+ // x = f, y = m * 10^abs(e) * 2^k
+ // This can't overflow either, see above.
+ y.mul_pow5(e_abs).mul_pow2(k_abs + e_abs);
+ } else {
+ // x = f * 2^abs(k), y = m * 10^abs(e), again reducing by a common power of two.
+ let common = min(e_abs, k_abs);
+ x.mul_pow2(k_abs - common);
+ y.mul_pow5(e_abs).mul_pow2(e_abs - common);
+ }
+ }
+}
+
+/// Conceptually, Algorithm M is the simplest way to convert a decimal to a float.
+///
+/// We form a ratio that is equal to `f * 10^e`, then throwing in powers of two until it gives
+/// a valid float significand. The binary exponent `k` is the number of times we multiplied
+/// numerator or denominator by two, i.e., at all times `f * 10^e` equals `(u / v) * 2^k`.
+/// When we have found out significand, we only need to round by inspecting the remainder of the
+/// division, which is done in helper functions further below.
+///
+/// This algorithm is super slow, even with the optimization described in `quick_start()`.
+/// However, it's the simplest of the algorithms to adapt for overflow, underflow, and subnormal
+/// results. This implementation takes over when Bellerophon and Algorithm R are overwhelmed.
+/// Detecting underflow and overflow is easy: The ratio still isn't an in-range significand,
+/// yet the minimum/maximum exponent has been reached. In the case of overflow, we simply return
+/// infinity.
+///
+/// Handling underflow and subnormals is trickier. One big problem is that, with the minimum
+/// exponent, the ratio might still be too large for a significand. See underflow() for details.
+pub fn algorithm_m<T: RawFloat>(f: &Big, e: i16) -> T {
+ let mut u;
+ let mut v;
+ let e_abs = e.abs() as usize;
+ let mut k = 0;
+ if e < 0 {
+ u = f.clone();
+ v = Big::from_small(1);
+ v.mul_pow5(e_abs).mul_pow2(e_abs);
+ } else {
+ // FIXME possible optimization: generalize big_to_fp so that we can do the equivalent of
+ // fp_to_float(big_to_fp(u)) here, only without the double rounding.
+ u = f.clone();
+ u.mul_pow5(e_abs).mul_pow2(e_abs);
+ v = Big::from_small(1);
+ }
+ quick_start::<T>(&mut u, &mut v, &mut k);
+ let mut rem = Big::from_small(0);
+ let mut x = Big::from_small(0);
+ let min_sig = Big::from_u64(T::min_sig());
+ let max_sig = Big::from_u64(T::max_sig());
+ loop {
+ u.div_rem(&v, &mut x, &mut rem);
+ if k == T::min_exp_int() {
+ // We have to stop at the minimum exponent, if we wait until `k < T::min_exp_int()`,
+ // then we'd be off by a factor of two. Unfortunately this means we have to special-
+ // case normal numbers with the minimum exponent.
+ // FIXME find a more elegant formulation, but run the `tiny-pow10` test to make sure
+ // that it's actually correct!
+ if x >= min_sig && x <= max_sig {
+ break;
+ }
+ return underflow(x, v, rem);
+ }
+ if k > T::max_exp_int() {
+ return T::infinity();
+ }
+ if x < min_sig {
+ u.mul_pow2(1);
+ k -= 1;
+ } else if x > max_sig {
+ v.mul_pow2(1);
+ k += 1;
+ } else {
+ break;
+ }
+ }
+ let q = num::to_u64(&x);
+ let z = rawfp::encode_normal(Unpacked::new(q, k));
+ round_by_remainder(v, rem, q, z)
+}
+
+/// Skip over most AlgorithmM iterations by checking the bit length.
+fn quick_start<T: RawFloat>(u: &mut Big, v: &mut Big, k: &mut i16) {
+ // The bit length is an estimate of the base two logarithm, and log(u / v) = log(u) - log(v).
+ // The estimate is off by at most 1, but always an under-estimate, so the error on log(u)
+ // and log(v) are of the same sign and cancel out (if both are large). Therefore the error
+ // for log(u / v) is at most one as well.
+ // The target ratio is one where u/v is in an in-range significand. Thus our termination
+ // condition is log2(u / v) being the significand bits, plus/minus one.
+ // FIXME Looking at the second bit could improve the estimate and avoid some more divisions.
+ let target_ratio = T::sig_bits() as i16;
+ let log2_u = u.bit_length() as i16;
+ let log2_v = v.bit_length() as i16;
+ let mut u_shift: i16 = 0;
+ let mut v_shift: i16 = 0;
+ assert!(*k == 0);
+ loop {
+ if *k == T::min_exp_int() {
+ // Underflow or subnormal. Leave it to the main function.
+ break;
+ }
+ if *k == T::max_exp_int() {
+ // Overflow. Leave it to the main function.
+ break;
+ }
+ let log2_ratio = (log2_u + u_shift) - (log2_v + v_shift);
+ if log2_ratio < target_ratio - 1 {
+ u_shift += 1;
+ *k -= 1;
+ } else if log2_ratio > target_ratio + 1 {
+ v_shift += 1;
+ *k += 1;
+ } else {
+ break;
+ }
+ }
+ u.mul_pow2(u_shift as usize);
+ v.mul_pow2(v_shift as usize);
+}
+
+fn underflow<T: RawFloat>(x: Big, v: Big, rem: Big) -> T {
+ if x < Big::from_u64(T::min_sig()) {
+ let q = num::to_u64(&x);
+ let z = rawfp::encode_subnormal(q);
+ return round_by_remainder(v, rem, q, z);
+ }
+ // Ratio isn't an in-range significand with the minimum exponent, so we need to round off
+ // excess bits and adjust the exponent accordingly. The real value now looks like this:
+ //
+ // x lsb
+ // /--------------\/
+ // 1010101010101010.10101010101010 * 2^k
+ // \-----/\-------/ \------------/
+ // q trunc. (represented by rem)
+ //
+ // Therefore, when the rounded-off bits are != 0.5 ULP, they decide the rounding
+ // on their own. When they are equal and the remainder is non-zero, the value still
+ // needs to be rounded up. Only when the rounded off bits are 1/2 and the remainer
+ // is zero, we have a half-to-even situation.
+ let bits = x.bit_length();
+ let lsb = bits - T::sig_bits() as usize;
+ let q = num::get_bits(&x, lsb, bits);
+ let k = T::min_exp_int() + lsb as i16;
+ let z = rawfp::encode_normal(Unpacked::new(q, k));
+ let q_even = q % 2 == 0;
+ match num::compare_with_half_ulp(&x, lsb) {
+ Greater => next_float(z),
+ Less => z,
+ Equal if rem.is_zero() && q_even => z,
+ Equal => next_float(z),
+ }
+}
+
+/// Ordinary round-to-even, obfuscated by having to round based on the remainder of a division.
+fn round_by_remainder<T: RawFloat>(v: Big, r: Big, q: u64, z: T) -> T {
+ let mut v_minus_r = v;
+ v_minus_r.sub(&r);
+ if r < v_minus_r {
+ z
+ } else if r > v_minus_r {
+ next_float(z)
+ } else if q % 2 == 0 {
+ z
+ } else {
+ next_float(z)
+ }
+}
diff --git a/libcore/num/dec2flt/mod.rs b/libcore/num/dec2flt/mod.rs
new file mode 100644
index 0000000..022bd84
--- /dev/null
+++ b/libcore/num/dec2flt/mod.rs
@@ -0,0 +1,332 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Converting decimal strings into IEEE 754 binary floating point numbers.
+//!
+//! # Problem statement
+//!
+//! We are given a decimal string such as `12.34e56`. This string consists of integral (`12`),
+//! fractional (`45`), and exponent (`56`) parts. All parts are optional and interpreted as zero
+//! when missing.
+//!
+//! We seek the IEEE 754 floating point number that is closest to the exact value of the decimal
+//! string. It is well-known that many decimal strings do not have terminating representations in
+//! base two, so we round to 0.5 units in the last place (in other words, as well as possible).
+//! Ties, decimal values exactly half-way between two consecutive floats, are resolved with the
+//! half-to-even strategy, also known as banker's rounding.
+//!
+//! Needless to say, this is quite hard, both in terms of implementation complexity and in terms
+//! of CPU cycles taken.
+//!
+//! # Implementation
+//!
+//! First, we ignore signs. Or rather, we remove it at the very beginning of the conversion
+//! process and re-apply it at the very end. This is correct in all edge cases since IEEE
+//! floats are symmetric around zero, negating one simply flips the first bit.
+//!
+//! Then we remove the decimal point by adjusting the exponent: Conceptually, `12.34e56` turns
+//! into `1234e54`, which we describe with a positive integer `f = 1234` and an integer `e = 54`.
+//! The `(f, e)` representation is used by almost all code past the parsing stage.
+//!
+//! We then try a long chain of progressively more general and expensive special cases using
+//! machine-sized integers and small, fixed-sized floating point numbers (first `f32`/`f64`, then
+//! a type with 64 bit significand, `Fp`). When all these fail, we bite the bullet and resort to a
+//! simple but very slow algorithm that involved computing `f * 10^e` fully and doing an iterative
+//! search for the best approximation.
+//!
+//! Primarily, this module and its children implement the algorithms described in:
+//! "How to Read Floating Point Numbers Accurately" by William D. Clinger,
+//! available online: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.4152
+//!
+//! In addition, there are numerous helper functions that are used in the paper but not available
+//! in Rust (or at least in core). Our version is additionally complicated by the need to handle
+//! overflow and underflow and the desire to handle subnormal numbers. Bellerophon and
+//! Algorithm R have trouble with overflow, subnormals, and underflow. We conservatively switch to
+//! Algorithm M (with the modifications described in section 8 of the paper) well before the
+//! inputs get into the critical region.
+//!
+//! Another aspect that needs attention is the ``RawFloat`` trait by which almost all functions
+//! are parametrized. One might think that it's enough to parse to `f64` and cast the result to
+//! `f32`. Unfortunately this is not the world we live in, and this has nothing to do with using
+//! base two or half-to-even rounding.
+//!
+//! Consider for example two types `d2` and `d4` representing a decimal type with two decimal
+//! digits and four decimal digits each and take "0.01499" as input. Let's use half-up rounding.
+//! Going directly to two decimal digits gives `0.01`, but if we round to four digits first,
+//! we get `0.0150`, which is then rounded up to `0.02`. The same principle applies to other
+//! operations as well, if you want 0.5 ULP accuracy you need to do *everything* in full precision
+//! and round *exactly once, at the end*, by considering all truncated bits at once.
+//!
+//! FIXME Although some code duplication is necessary, perhaps parts of the code could be shuffled
+//! around such that less code is duplicated. Large parts of the algorithms are independent of the
+//! float type to output, or only needs access to a few constants, which could be passed in as
+//! parameters.
+//!
+//! # Other
+//!
+//! The conversion should *never* panic. There are assertions and explicit panics in the code,
+//! but they should never be triggered and only serve as internal sanity checks. Any panics should
+//! be considered a bug.
+//!
+//! There are unit tests but they are woefully inadequate at ensuring correctness, they only cover
+//! a small percentage of possible errors. Far more extensive tests are located in the directory
+//! `src/etc/test-float-parse` as a Python script.
+//!
+//! A note on integer overflow: Many parts of this file perform arithmetic with the decimal
+//! exponent `e`. Primarily, we shift the decimal point around: Before the first decimal digit,
+//! after the last decimal digit, and so on. This could overflow if done carelessly. We rely on
+//! the parsing submodule to only hand out sufficiently small exponents, where "sufficient" means
+//! "such that the exponent +/- the number of decimal digits fits into a 64 bit integer".
+//! Larger exponents are accepted, but we don't do arithmetic with them, they are immediately
+//! turned into {positive,negative} {zero,infinity}.
+
+#![doc(hidden)]
+#![unstable(feature = "dec2flt",
+ reason = "internal routines only exposed for testing",
+ issue = "0")]
+
+use prelude::v1::*;
+use fmt;
+use str::FromStr;
+
+use self::parse::{parse_decimal, Decimal, Sign, ParseResult};
+use self::num::digits_to_big;
+use self::rawfp::RawFloat;
+
+mod algorithm;
+mod table;
+mod num;
+// These two have their own tests.
+pub mod rawfp;
+pub mod parse;
+
+macro_rules! from_str_float_impl {
+ ($t:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseFloatError;
+
+ /// Converts a string in base 10 to a float.
+ /// Accepts an optional decimal exponent.
+ ///
+ /// This function accepts strings such as
+ ///
+ /// * '3.14'
+ /// * '-3.14'
+ /// * '2.5E10', or equivalently, '2.5e10'
+ /// * '2.5E-10'
+ /// * '.' (understood as 0)
+ /// * '5.'
+ /// * '.5', or, equivalently, '0.5'
+ /// * 'inf', '-inf', 'NaN'
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Arguments
+ ///
+ /// * src - A string
+ ///
+ /// # Return value
+ ///
+ /// `Err(ParseFloatError)` if the string did not represent a valid
+ /// number. Otherwise, `Ok(n)` where `n` is the floating-point
+ /// number represented by `src`.
+ #[inline]
+ fn from_str(src: &str) -> Result<Self, ParseFloatError> {
+ dec2flt(src)
+ }
+ }
+ }
+}
+from_str_float_impl!(f32);
+from_str_float_impl!(f64);
+
+/// An error which can be returned when parsing a float.
+///
+/// This error is used as the error type for the [`FromStr`] implementation
+/// for [`f32`] and [`f64`].
+///
+/// [`FromStr`]: ../str/trait.FromStr.html
+/// [`f32`]: ../../std/primitive.f32.html
+/// [`f64`]: ../../std/primitive.f64.html
+#[derive(Debug, Clone, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseFloatError {
+ kind: FloatErrorKind
+}
+
+#[derive(Debug, Clone, PartialEq)]
+enum FloatErrorKind {
+ Empty,
+ Invalid,
+}
+
+impl ParseFloatError {
+ #[unstable(feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "0")]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ FloatErrorKind::Empty => "cannot parse float from empty string",
+ FloatErrorKind::Invalid => "invalid float literal",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseFloatError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+fn pfe_empty() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Empty }
+}
+
+fn pfe_invalid() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Invalid }
+}
+
+/// Split decimal string into sign and the rest, without inspecting or validating the rest.
+fn extract_sign(s: &str) -> (Sign, &str) {
+ match s.as_bytes()[0] {
+ b'+' => (Sign::Positive, &s[1..]),
+ b'-' => (Sign::Negative, &s[1..]),
+ // If the string is invalid, we never use the sign, so we don't need to validate here.
+ _ => (Sign::Positive, s),
+ }
+}
+
+/// Convert a decimal string into a floating point number.
+fn dec2flt<T: RawFloat>(s: &str) -> Result<T, ParseFloatError> {
+ if s.is_empty() {
+ return Err(pfe_empty())
+ }
+ let (sign, s) = extract_sign(s);
+ let flt = match parse_decimal(s) {
+ ParseResult::Valid(decimal) => convert(decimal)?,
+ ParseResult::ShortcutToInf => T::infinity(),
+ ParseResult::ShortcutToZero => T::zero(),
+ ParseResult::Invalid => match s {
+ "inf" => T::infinity(),
+ "NaN" => T::nan(),
+ _ => { return Err(pfe_invalid()); }
+ }
+ };
+
+ match sign {
+ Sign::Positive => Ok(flt),
+ Sign::Negative => Ok(-flt),
+ }
+}
+
+/// The main workhorse for the decimal-to-float conversion: Orchestrate all the preprocessing
+/// and figure out which algorithm should do the actual conversion.
+fn convert<T: RawFloat>(mut decimal: Decimal) -> Result<T, ParseFloatError> {
+ simplify(&mut decimal);
+ if let Some(x) = trivial_cases(&decimal) {
+ return Ok(x);
+ }
+ // Remove/shift out the decimal point.
+ let e = decimal.exp - decimal.fractional.len() as i64;
+ if let Some(x) = algorithm::fast_path(decimal.integral, decimal.fractional, e) {
+ return Ok(x);
+ }
+ // Big32x40 is limited to 1280 bits, which translates to about 385 decimal digits.
+ // If we exceed this, we'll crash, so we error out before getting too close (within 10^10).
+ let upper_bound = bound_intermediate_digits(&decimal, e);
+ if upper_bound > 375 {
+ return Err(pfe_invalid());
+ }
+ let f = digits_to_big(decimal.integral, decimal.fractional);
+
+ // Now the exponent certainly fits in 16 bit, which is used throughout the main algorithms.
+ let e = e as i16;
+ // FIXME These bounds are rather conservative. A more careful analysis of the failure modes
+ // of Bellerophon could allow using it in more cases for a massive speed up.
+ let exponent_in_range = table::MIN_E <= e && e <= table::MAX_E;
+ let value_in_range = upper_bound <= T::max_normal_digits() as u64;
+ if exponent_in_range && value_in_range {
+ Ok(algorithm::bellerophon(&f, e))
+ } else {
+ Ok(algorithm::algorithm_m(&f, e))
+ }
+}
+
+// As written, this optimizes badly (see #27130, though it refers to an old version of the code).
+// `inline(always)` is a workaround for that. There are only two call sites overall and it doesn't
+// make code size worse.
+
+/// Strip zeros where possible, even when this requires changing the exponent
+#[inline(always)]
+fn simplify(decimal: &mut Decimal) {
+ let is_zero = &|&&d: &&u8| -> bool { d == b'0' };
+ // Trimming these zeros does not change anything but may enable the fast path (< 15 digits).
+ let leading_zeros = decimal.integral.iter().take_while(is_zero).count();
+ decimal.integral = &decimal.integral[leading_zeros..];
+ let trailing_zeros = decimal.fractional.iter().rev().take_while(is_zero).count();
+ let end = decimal.fractional.len() - trailing_zeros;
+ decimal.fractional = &decimal.fractional[..end];
+ // Simplify numbers of the form 0.0...x and x...0.0, adjusting the exponent accordingly.
+ // This may not always be a win (possibly pushes some numbers out of the fast path), but it
+ // simplifies other parts significantly (notably, approximating the magnitude of the value).
+ if decimal.integral.is_empty() {
+ let leading_zeros = decimal.fractional.iter().take_while(is_zero).count();
+ decimal.fractional = &decimal.fractional[leading_zeros..];
+ decimal.exp -= leading_zeros as i64;
+ } else if decimal.fractional.is_empty() {
+ let trailing_zeros = decimal.integral.iter().rev().take_while(is_zero).count();
+ let end = decimal.integral.len() - trailing_zeros;
+ decimal.integral = &decimal.integral[..end];
+ decimal.exp += trailing_zeros as i64;
+ }
+}
+
+/// Quick and dirty upper bound on the size (log10) of the largest value that Algorithm R and
+/// Algorithm M will compute while working on the given decimal.
+fn bound_intermediate_digits(decimal: &Decimal, e: i64) -> u64 {
+ // We don't need to worry too much about overflow here thanks to trivial_cases() and the
+ // parser, which filter out the most extreme inputs for us.
+ let f_len: u64 = decimal.integral.len() as u64 + decimal.fractional.len() as u64;
+ if e >= 0 {
+ // In the case e >= 0, both algorithms compute about `f * 10^e`. Algorithm R proceeds to
+ // do some complicated calculations with this but we can ignore that for the upper bound
+ // because it also reduces the fraction beforehand, so we have plenty of buffer there.
+ f_len + (e as u64)
+ } else {
+ // If e < 0, Algorithm R does roughly the same thing, but Algorithm M differs:
+ // It tries to find a positive number k such that `f << k / 10^e` is an in-range
+ // significand. This will result in about `2^53 * f * 10^e` < `10^17 * f * 10^e`.
+ // One input that triggers this is 0.33...33 (375 x 3).
+ f_len + (e.abs() as u64) + 17
+ }
+}
+
+/// Detect obvious overflows and underflows without even looking at the decimal digits.
+fn trivial_cases<T: RawFloat>(decimal: &Decimal) -> Option<T> {
+ // There were zeros but they were stripped by simplify()
+ if decimal.integral.is_empty() && decimal.fractional.is_empty() {
+ return Some(T::zero());
+ }
+ // This is a crude approximation of ceil(log10(the real value)). We don't need to worry too
+ // much about overflow here because the input length is tiny (at least compared to 2^64) and
+ // the parser already handles exponents whose absolute value is greater than 10^18
+ // (which is still 10^19 short of 2^64).
+ let max_place = decimal.exp + decimal.integral.len() as i64;
+ if max_place > T::inf_cutoff() {
+ return Some(T::infinity());
+ } else if max_place < T::zero_cutoff() {
+ return Some(T::zero());
+ }
+ None
+}
diff --git a/libcore/num/dec2flt/num.rs b/libcore/num/dec2flt/num.rs
new file mode 100644
index 0000000..81e7856
--- /dev/null
+++ b/libcore/num/dec2flt/num.rs
@@ -0,0 +1,94 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Utility functions for bignums that don't make too much sense to turn into methods.
+
+// FIXME This module's name is a bit unfortunate, since other modules also import `core::num`.
+
+use prelude::v1::*;
+use cmp::Ordering::{self, Less, Equal, Greater};
+
+pub use num::bignum::Big32x40 as Big;
+
+/// Test whether truncating all bits less significant than `ones_place` introduces
+/// a relative error less, equal, or greater than 0.5 ULP.
+pub fn compare_with_half_ulp(f: &Big, ones_place: usize) -> Ordering {
+ if ones_place == 0 {
+ return Less;
+ }
+ let half_bit = ones_place - 1;
+ if f.get_bit(half_bit) == 0 {
+ // < 0.5 ULP
+ return Less;
+ }
+ // If all remaining bits are zero, it's = 0.5 ULP, otherwise > 0.5
+ // If there are no more bits (half_bit == 0), the below also correctly returns Equal.
+ for i in 0..half_bit {
+ if f.get_bit(i) == 1 {
+ return Greater;
+ }
+ }
+ Equal
+}
+
+/// Convert an ASCII string containing only decimal digits to a `u64`.
+///
+/// Does not perform checks for overflow or invalid characters, so if the caller is not careful,
+/// the result is bogus and can panic (though it won't be `unsafe`). Additionally, empty strings
+/// are treated as zero. This function exists because
+///
+/// 1. using `FromStr` on `&[u8]` requires `from_utf8_unchecked`, which is bad, and
+/// 2. piecing together the results of `integral.parse()` and `fractional.parse()` is
+/// more complicated than this entire function.
+pub fn from_str_unchecked<'a, T>(bytes: T) -> u64 where T : IntoIterator<Item=&'a u8> {
+ let mut result = 0;
+ for &c in bytes {
+ result = result * 10 + (c - b'0') as u64;
+ }
+ result
+}
+
+/// Convert a string of ASCII digits into a bignum.
+///
+/// Like `from_str_unchecked`, this function relies on the parser to weed out non-digits.
+pub fn digits_to_big(integral: &[u8], fractional: &[u8]) -> Big {
+ let mut f = Big::from_small(0);
+ for &c in integral.iter().chain(fractional) {
+ let n = (c - b'0') as u32;
+ f.mul_small(10);
+ f.add_small(n);
+ }
+ f
+}
+
+/// Unwraps a bignum into a 64 bit integer. Panics if the number is too large.
+pub fn to_u64(x: &Big) -> u64 {
+ assert!(x.bit_length() < 64);
+ let d = x.digits();
+ if d.len() < 2 {
+ d[0] as u64
+ } else {
+ (d[1] as u64) << 32 | d[0] as u64
+ }
+}
+
+
+/// Extract a range of bits.
+
+/// Index 0 is the least significant bit and the range is half-open as usual.
+/// Panics if asked to extract more bits than fit into the return type.
+pub fn get_bits(x: &Big, start: usize, end: usize) -> u64 {
+ assert!(end - start <= 64);
+ let mut result: u64 = 0;
+ for i in (start..end).rev() {
+ result = result << 1 | x.get_bit(i) as u64;
+ }
+ result
+}
diff --git a/libcore/num/dec2flt/parse.rs b/libcore/num/dec2flt/parse.rs
new file mode 100644
index 0000000..fce1c25
--- /dev/null
+++ b/libcore/num/dec2flt/parse.rs
@@ -0,0 +1,134 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Validating and decomposing a decimal string of the form:
+//!
+//! `(digits | digits? '.'? digits?) (('e' | 'E') ('+' | '-')? digits)?`
+//!
+//! In other words, standard floating-point syntax, with two exceptions: No sign, and no
+//! handling of "inf" and "NaN". These are handled by the driver function (super::dec2flt).
+//!
+//! Although recognizing valid inputs is relatively easy, this module also has to reject the
+//! countless invalid variations, never panic, and perform numerous checks that the other
+//! modules rely on to not panic (or overflow) in turn.
+//! To make matters worse, all that happens in a single pass over the input.
+//! So, be careful when modifying anything, and double-check with the other modules.
+use prelude::v1::*;
+use super::num;
+use self::ParseResult::{Valid, ShortcutToInf, ShortcutToZero, Invalid};
+
+#[derive(Debug)]
+pub enum Sign {
+ Positive,
+ Negative,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+/// The interesting parts of a decimal string.
+pub struct Decimal<'a> {
+ pub integral: &'a [u8],
+ pub fractional: &'a [u8],
+ /// The decimal exponent, guaranteed to have fewer than 18 decimal digits.
+ pub exp: i64,
+}
+
+impl<'a> Decimal<'a> {
+ pub fn new(integral: &'a [u8], fractional: &'a [u8], exp: i64) -> Decimal<'a> {
+ Decimal { integral: integral, fractional: fractional, exp: exp }
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum ParseResult<'a> {
+ Valid(Decimal<'a>),
+ ShortcutToInf,
+ ShortcutToZero,
+ Invalid,
+}
+
+/// Check if the input string is a valid floating point number and if so, locate the integral
+/// part, the fractional part, and the exponent in it. Does not handle signs.
+pub fn parse_decimal(s: &str) -> ParseResult {
+ if s.is_empty() {
+ return Invalid;
+ }
+
+ let s = s.as_bytes();
+ let (integral, s) = eat_digits(s);
+
+ match s.first() {
+ None => Valid(Decimal::new(integral, b"", 0)),
+ Some(&b'e') | Some(&b'E') => {
+ if integral.is_empty() {
+ return Invalid; // No digits before 'e'
+ }
+
+ parse_exp(integral, b"", &s[1..])
+ }
+ Some(&b'.') => {
+ let (fractional, s) = eat_digits(&s[1..]);
+ if integral.is_empty() && fractional.is_empty() && s.is_empty() {
+ return Invalid;
+ }
+
+ match s.first() {
+ None => Valid(Decimal::new(integral, fractional, 0)),
+ Some(&b'e') | Some(&b'E') => parse_exp(integral, fractional, &s[1..]),
+ _ => Invalid, // Trailing junk after fractional part
+ }
+ }
+ _ => Invalid, // Trailing junk after first digit string
+ }
+}
+
+/// Carve off decimal digits up to the first non-digit character.
+fn eat_digits(s: &[u8]) -> (&[u8], &[u8]) {
+ let mut i = 0;
+ while i < s.len() && b'0' <= s[i] && s[i] <= b'9' {
+ i += 1;
+ }
+ (&s[..i], &s[i..])
+}
+
+/// Exponent extraction and error checking.
+fn parse_exp<'a>(integral: &'a [u8], fractional: &'a [u8], rest: &'a [u8]) -> ParseResult<'a> {
+ let (sign, rest) = match rest.first() {
+ Some(&b'-') => (Sign::Negative, &rest[1..]),
+ Some(&b'+') => (Sign::Positive, &rest[1..]),
+ _ => (Sign::Positive, rest),
+ };
+ let (mut number, trailing) = eat_digits(rest);
+ if !trailing.is_empty() {
+ return Invalid; // Trailing junk after exponent
+ }
+ if number.is_empty() {
+ return Invalid; // Empty exponent
+ }
+ // At this point, we certainly have a valid string of digits. It may be too long to put into
+ // an `i64`, but if it's that huge, the input is certainly zero or infinity. Since each zero
+ // in the decimal digits only adjusts the exponent by +/- 1, at exp = 10^18 the input would
+ // have to be 17 exabyte (!) of zeros to get even remotely close to being finite.
+ // This is not exactly a use case we need to cater to.
+ while number.first() == Some(&b'0') {
+ number = &number[1..];
+ }
+ if number.len() >= 18 {
+ return match sign {
+ Sign::Positive => ShortcutToInf,
+ Sign::Negative => ShortcutToZero,
+ };
+ }
+ let abs_exp = num::from_str_unchecked(number);
+ let e = match sign {
+ Sign::Positive => abs_exp as i64,
+ Sign::Negative => -(abs_exp as i64),
+ };
+ Valid(Decimal::new(integral, fractional, e))
+}
diff --git a/libcore/num/dec2flt/rawfp.rs b/libcore/num/dec2flt/rawfp.rs
new file mode 100644
index 0000000..2099c6a
--- /dev/null
+++ b/libcore/num/dec2flt/rawfp.rs
@@ -0,0 +1,368 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Bit fiddling on positive IEEE 754 floats. Negative numbers aren't and needn't be handled.
+//! Normal floating point numbers have a canonical representation as (frac, exp) such that the
+//! value is 2^exp * (1 + sum(frac[N-i] / 2^i)) where N is the number of bits. Subnormals are
+//! slightly different and weird, but the same principle applies.
+//!
+//! Here, however, we represent them as (sig, k) with f positive, such that the value is f * 2^e.
+//! Besides making the "hidden bit" explicit, this changes the exponent by the so-called
+//! mantissa shift.
+//!
+//! Put another way, normally floats are written as (1) but here they are written as (2):
+//!
+//! 1. `1.101100...11 * 2^m`
+//! 2. `1101100...11 * 2^n`
+//!
+//! We call (1) the **fractional representation** and (2) the **integral representation**.
+//!
+//! Many functions in this module only handle normal numbers. The dec2flt routines conservatively
+//! take the universally-correct slow path (Algorithm M) for very small and very large numbers.
+//! That algorithm needs only next_float() which does handle subnormals and zeros.
+use prelude::v1::*;
+use u32;
+use cmp::Ordering::{Less, Equal, Greater};
+use ops::{Mul, Div, Neg};
+use fmt::{Debug, LowerExp};
+use mem::transmute;
+use num::diy_float::Fp;
+use num::FpCategory::{Infinite, Zero, Subnormal, Normal, Nan};
+use num::Float;
+use num::dec2flt::num::{self, Big};
+use num::dec2flt::table;
+
+#[derive(Copy, Clone, Debug)]
+pub struct Unpacked {
+ pub sig: u64,
+ pub k: i16,
+}
+
+impl Unpacked {
+ pub fn new(sig: u64, k: i16) -> Self {
+ Unpacked { sig: sig, k: k }
+ }
+}
+
+/// A helper trait to avoid duplicating basically all the conversion code for `f32` and `f64`.
+///
+/// See the parent module's doc comment for why this is necessary.
+///
+/// Should **never ever** be implemented for other types or be used outside the dec2flt module.
+/// Inherits from `Float` because there is some overlap, but all the reused methods are trivial.
+/// The "methods" (pseudo-constants) with default implementation should not be overriden.
+pub trait RawFloat : Float + Copy + Debug + LowerExp
+ + Mul<Output=Self> + Div<Output=Self> + Neg<Output=Self>
+{
+ /// Get the raw binary representation of the float.
+ fn transmute(self) -> u64;
+
+ /// Transmute the raw binary representation into a float.
+ fn from_bits(bits: u64) -> Self;
+
+ /// Decode the float.
+ fn unpack(self) -> Unpacked;
+
+ /// Cast from a small integer that can be represented exactly. Panic if the integer can't be
+ /// represented, the other code in this module makes sure to never let that happen.
+ fn from_int(x: u64) -> Self;
+
+ /// Get the value 10^e from a pre-computed table. Panics for e >= ceil_log5_of_max_sig().
+ fn short_fast_pow10(e: usize) -> Self;
+
+ // FIXME Everything that follows should be associated constants, but taking the value of an
+ // associated constant from a type parameter does not work (yet?)
+ // A possible workaround is having a `FloatInfo` struct for all the constants, but so far
+ // the methods aren't painful enough to rewrite.
+
+ /// What the name says. It's easier to hard code than juggling intrinsics and
+ /// hoping LLVM constant folds it.
+ fn ceil_log5_of_max_sig() -> i16;
+
+ // A conservative bound on the decimal digits of inputs that can't produce overflow or zero or
+ /// subnormals. Probably the decimal exponent of the maximum normal value, hence the name.
+ fn max_normal_digits() -> usize;
+
+ /// When the most significant decimal digit has a place value greater than this, the number
+ /// is certainly rounded to infinity.
+ fn inf_cutoff() -> i64;
+
+ /// When the most significant decimal digit has a place value less than this, the number
+ /// is certainly rounded to zero.
+ fn zero_cutoff() -> i64;
+
+ /// The number of bits in the exponent.
+ fn exp_bits() -> u8;
+
+ /// The number of bits in the singificand, *including* the hidden bit.
+ fn sig_bits() -> u8;
+
+ /// The number of bits in the singificand, *excluding* the hidden bit.
+ fn explicit_sig_bits() -> u8 {
+ Self::sig_bits() - 1
+ }
+
+ /// The maximum legal exponent in fractional representation.
+ fn max_exp() -> i16 {
+ (1 << (Self::exp_bits() - 1)) - 1
+ }
+
+ /// The minimum legal exponent in fractional representation, excluding subnormals.
+ fn min_exp() -> i16 {
+ -Self::max_exp() + 1
+ }
+
+ /// `MAX_EXP` for integral representation, i.e., with the shift applied.
+ fn max_exp_int() -> i16 {
+ Self::max_exp() - (Self::sig_bits() as i16 - 1)
+ }
+
+ /// `MAX_EXP` encoded (i.e., with offset bias)
+ fn max_encoded_exp() -> i16 {
+ (1 << Self::exp_bits()) - 1
+ }
+
+ /// `MIN_EXP` for integral representation, i.e., with the shift applied.
+ fn min_exp_int() -> i16 {
+ Self::min_exp() - (Self::sig_bits() as i16 - 1)
+ }
+
+ /// The maximum normalized singificand in integral representation.
+ fn max_sig() -> u64 {
+ (1 << Self::sig_bits()) - 1
+ }
+
+ /// The minimal normalized significand in integral representation.
+ fn min_sig() -> u64 {
+ 1 << (Self::sig_bits() - 1)
+ }
+}
+
+impl RawFloat for f32 {
+ fn sig_bits() -> u8 {
+ 24
+ }
+
+ fn exp_bits() -> u8 {
+ 8
+ }
+
+ fn ceil_log5_of_max_sig() -> i16 {
+ 11
+ }
+
+ fn transmute(self) -> u64 {
+ let bits: u32 = unsafe { transmute(self) };
+ bits as u64
+ }
+
+ fn from_bits(bits: u64) -> f32 {
+ assert!(bits < u32::MAX as u64, "f32::from_bits: too many bits");
+ unsafe { transmute(bits as u32) }
+ }
+
+ fn unpack(self) -> Unpacked {
+ let (sig, exp, _sig) = self.integer_decode();
+ Unpacked::new(sig, exp)
+ }
+
+ fn from_int(x: u64) -> f32 {
+ // rkruppe is uncertain whether `as` rounds correctly on all platforms.
+ debug_assert!(x as f32 == fp_to_float(Fp { f: x, e: 0 }));
+ x as f32
+ }
+
+ fn short_fast_pow10(e: usize) -> Self {
+ table::F32_SHORT_POWERS[e]
+ }
+
+ fn max_normal_digits() -> usize {
+ 35
+ }
+
+ fn inf_cutoff() -> i64 {
+ 40
+ }
+
+ fn zero_cutoff() -> i64 {
+ -48
+ }
+}
+
+
+impl RawFloat for f64 {
+ fn sig_bits() -> u8 {
+ 53
+ }
+
+ fn exp_bits() -> u8 {
+ 11
+ }
+
+ fn ceil_log5_of_max_sig() -> i16 {
+ 23
+ }
+
+ fn transmute(self) -> u64 {
+ let bits: u64 = unsafe { transmute(self) };
+ bits
+ }
+
+ fn from_bits(bits: u64) -> f64 {
+ unsafe { transmute(bits) }
+ }
+
+ fn unpack(self) -> Unpacked {
+ let (sig, exp, _sig) = self.integer_decode();
+ Unpacked::new(sig, exp)
+ }
+
+ fn from_int(x: u64) -> f64 {
+ // rkruppe is uncertain whether `as` rounds correctly on all platforms.
+ debug_assert!(x as f64 == fp_to_float(Fp { f: x, e: 0 }));
+ x as f64
+ }
+
+ fn short_fast_pow10(e: usize) -> Self {
+ table::F64_SHORT_POWERS[e]
+ }
+
+ fn max_normal_digits() -> usize {
+ 305
+ }
+
+ fn inf_cutoff() -> i64 {
+ 310
+ }
+
+ fn zero_cutoff() -> i64 {
+ -326
+ }
+
+}
+
+/// Convert an Fp to the closest f64. Only handles number that fit into a normalized f64.
+pub fn fp_to_float<T: RawFloat>(x: Fp) -> T {
+ let x = x.normalize();
+ // x.f is 64 bit, so x.e has a mantissa shift of 63
+ let e = x.e + 63;
+ if e > T::max_exp() {
+ panic!("fp_to_float: exponent {} too large", e)
+ } else if e > T::min_exp() {
+ encode_normal(round_normal::<T>(x))
+ } else {
+ panic!("fp_to_float: exponent {} too small", e)
+ }
+}
+
+/// Round the 64-bit significand to 53 bit with half-to-even. Does not handle exponent overflow.
+pub fn round_normal<T: RawFloat>(x: Fp) -> Unpacked {
+ let excess = 64 - T::sig_bits() as i16;
+ let half: u64 = 1 << (excess - 1);
+ let (q, rem) = (x.f >> excess, x.f & ((1 << excess) - 1));
+ assert_eq!(q << excess | rem, x.f);
+ // Adjust mantissa shift
+ let k = x.e + excess;
+ if rem < half {
+ Unpacked::new(q, k)
+ } else if rem == half && (q % 2) == 0 {
+ Unpacked::new(q, k)
+ } else if q == T::max_sig() {
+ Unpacked::new(T::min_sig(), k + 1)
+ } else {
+ Unpacked::new(q + 1, k)
+ }
+}
+
+/// Inverse of `RawFloat::unpack()` for normalized numbers.
+/// Panics if the significand or exponent are not valid for normalized numbers.
+pub fn encode_normal<T: RawFloat>(x: Unpacked) -> T {
+ debug_assert!(T::min_sig() <= x.sig && x.sig <= T::max_sig(),
+ "encode_normal: significand not normalized");
+ // Remove the hidden bit
+ let sig_enc = x.sig & !(1 << T::explicit_sig_bits());
+ // Adjust the exponent for exponent bias and mantissa shift
+ let k_enc = x.k + T::max_exp() + T::explicit_sig_bits() as i16;
+ debug_assert!(k_enc != 0 && k_enc < T::max_encoded_exp(),
+ "encode_normal: exponent out of range");
+ // Leave sign bit at 0 ("+"), our numbers are all positive
+ let bits = (k_enc as u64) << T::explicit_sig_bits() | sig_enc;
+ T::from_bits(bits)
+}
+
+/// Construct the subnormal. A mantissa of 0 is allowed and constructs zero.
+pub fn encode_subnormal<T: RawFloat>(significand: u64) -> T {
+ assert!(significand < T::min_sig(), "encode_subnormal: not actually subnormal");
+ // Encoded exponent is 0, the sign bit is 0, so we just have to reinterpret the bits.
+ T::from_bits(significand)
+}
+
+/// Approximate a bignum with an Fp. Rounds within 0.5 ULP with half-to-even.
+pub fn big_to_fp(f: &Big) -> Fp {
+ let end = f.bit_length();
+ assert!(end != 0, "big_to_fp: unexpectedly, input is zero");
+ let start = end.saturating_sub(64);
+ let leading = num::get_bits(f, start, end);
+ // We cut off all bits prior to the index `start`, i.e., we effectively right-shift by
+ // an amount of `start`, so this is also the exponent we need.
+ let e = start as i16;
+ let rounded_down = Fp { f: leading, e: e }.normalize();
+ // Round (half-to-even) depending on the truncated bits.
+ match num::compare_with_half_ulp(f, start) {
+ Less => rounded_down,
+ Equal if leading % 2 == 0 => rounded_down,
+ Equal | Greater => match leading.checked_add(1) {
+ Some(f) => Fp { f: f, e: e }.normalize(),
+ None => Fp { f: 1 << 63, e: e + 1 },
+ }
+ }
+}
+
+/// Find the largest floating point number strictly smaller than the argument.
+/// Does not handle subnormals, zero, or exponent underflow.
+pub fn prev_float<T: RawFloat>(x: T) -> T {
+ match x.classify() {
+ Infinite => panic!("prev_float: argument is infinite"),
+ Nan => panic!("prev_float: argument is NaN"),
+ Subnormal => panic!("prev_float: argument is subnormal"),
+ Zero => panic!("prev_float: argument is zero"),
+ Normal => {
+ let Unpacked { sig, k } = x.unpack();
+ if sig == T::min_sig() {
+ encode_normal(Unpacked::new(T::max_sig(), k - 1))
+ } else {
+ encode_normal(Unpacked::new(sig - 1, k))
+ }
+ }
+ }
+}
+
+// Find the smallest floating point number strictly larger than the argument.
+// This operation is saturating, i.e. next_float(inf) == inf.
+// Unlike most code in this module, this function does handle zero, subnormals, and infinities.
+// However, like all other code here, it does not deal with NaN and negative numbers.
+pub fn next_float<T: RawFloat>(x: T) -> T {
+ match x.classify() {
+ Nan => panic!("next_float: argument is NaN"),
+ Infinite => T::infinity(),
+ // This seems too good to be true, but it works.
+ // 0.0 is encoded as the all-zero word. Subnormals are 0x000m...m where m is the mantissa.
+ // In particular, the smallest subnormal is 0x0...01 and the largest is 0x000F...F.
+ // The smallest normal number is 0x0010...0, so this corner case works as well.
+ // If the increment overflows the mantissa, the carry bit increments the exponent as we
+ // want, and the mantissa bits become zero. Because of the hidden bit convention, this
+ // too is exactly what we want!
+ // Finally, f64::MAX + 1 = 7eff...f + 1 = 7ff0...0 = f64::INFINITY.
+ Zero | Subnormal | Normal => {
+ let bits: u64 = x.transmute();
+ T::from_bits(bits + 1)
+ }
+ }
+}
diff --git a/libcore/num/dec2flt/table.rs b/libcore/num/dec2flt/table.rs
new file mode 100644
index 0000000..cb8c943
--- /dev/null
+++ b/libcore/num/dec2flt/table.rs
@@ -0,0 +1,1281 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Tables of approximations of powers of ten.
+//! DO NOT MODIFY: Generated by `src/etc/dec2flt_table.py`
+
+pub const MIN_E: i16 = -305;
+pub const MAX_E: i16 = 305;
+
+pub const POWERS: ([u64; 611], [i16; 611]) = ([
+ 0xe0b62e2929aba83c,
+ 0x8c71dcd9ba0b4926,
+ 0xaf8e5410288e1b6f,
+ 0xdb71e91432b1a24b,
+ 0x892731ac9faf056f,
+ 0xab70fe17c79ac6ca,
+ 0xd64d3d9db981787d,
+ 0x85f0468293f0eb4e,
+ 0xa76c582338ed2622,
+ 0xd1476e2c07286faa,
+ 0x82cca4db847945ca,
+ 0xa37fce126597973d,
+ 0xcc5fc196fefd7d0c,
+ 0xff77b1fcbebcdc4f,
+ 0x9faacf3df73609b1,
+ 0xc795830d75038c1e,
+ 0xf97ae3d0d2446f25,
+ 0x9becce62836ac577,
+ 0xc2e801fb244576d5,
+ 0xf3a20279ed56d48a,
+ 0x9845418c345644d7,
+ 0xbe5691ef416bd60c,
+ 0xedec366b11c6cb8f,
+ 0x94b3a202eb1c3f39,
+ 0xb9e08a83a5e34f08,
+ 0xe858ad248f5c22ca,
+ 0x91376c36d99995be,
+ 0xb58547448ffffb2e,
+ 0xe2e69915b3fff9f9,
+ 0x8dd01fad907ffc3c,
+ 0xb1442798f49ffb4b,
+ 0xdd95317f31c7fa1d,
+ 0x8a7d3eef7f1cfc52,
+ 0xad1c8eab5ee43b67,
+ 0xd863b256369d4a41,
+ 0x873e4f75e2224e68,
+ 0xa90de3535aaae202,
+ 0xd3515c2831559a83,
+ 0x8412d9991ed58092,
+ 0xa5178fff668ae0b6,
+ 0xce5d73ff402d98e4,
+ 0x80fa687f881c7f8e,
+ 0xa139029f6a239f72,
+ 0xc987434744ac874f,
+ 0xfbe9141915d7a922,
+ 0x9d71ac8fada6c9b5,
+ 0xc4ce17b399107c23,
+ 0xf6019da07f549b2b,
+ 0x99c102844f94e0fb,
+ 0xc0314325637a193a,
+ 0xf03d93eebc589f88,
+ 0x96267c7535b763b5,
+ 0xbbb01b9283253ca3,
+ 0xea9c227723ee8bcb,
+ 0x92a1958a7675175f,
+ 0xb749faed14125d37,
+ 0xe51c79a85916f485,
+ 0x8f31cc0937ae58d3,
+ 0xb2fe3f0b8599ef08,
+ 0xdfbdcece67006ac9,
+ 0x8bd6a141006042be,
+ 0xaecc49914078536d,
+ 0xda7f5bf590966849,
+ 0x888f99797a5e012d,
+ 0xaab37fd7d8f58179,
+ 0xd5605fcdcf32e1d7,
+ 0x855c3be0a17fcd26,
+ 0xa6b34ad8c9dfc070,
+ 0xd0601d8efc57b08c,
+ 0x823c12795db6ce57,
+ 0xa2cb1717b52481ed,
+ 0xcb7ddcdda26da269,
+ 0xfe5d54150b090b03,
+ 0x9efa548d26e5a6e2,
+ 0xc6b8e9b0709f109a,
+ 0xf867241c8cc6d4c1,
+ 0x9b407691d7fc44f8,
+ 0xc21094364dfb5637,
+ 0xf294b943e17a2bc4,
+ 0x979cf3ca6cec5b5b,
+ 0xbd8430bd08277231,
+ 0xece53cec4a314ebe,
+ 0x940f4613ae5ed137,
+ 0xb913179899f68584,
+ 0xe757dd7ec07426e5,
+ 0x9096ea6f3848984f,
+ 0xb4bca50b065abe63,
+ 0xe1ebce4dc7f16dfc,
+ 0x8d3360f09cf6e4bd,
+ 0xb080392cc4349ded,
+ 0xdca04777f541c568,
+ 0x89e42caaf9491b61,
+ 0xac5d37d5b79b6239,
+ 0xd77485cb25823ac7,
+ 0x86a8d39ef77164bd,
+ 0xa8530886b54dbdec,
+ 0xd267caa862a12d67,
+ 0x8380dea93da4bc60,
+ 0xa46116538d0deb78,
+ 0xcd795be870516656,
+ 0x806bd9714632dff6,
+ 0xa086cfcd97bf97f4,
+ 0xc8a883c0fdaf7df0,
+ 0xfad2a4b13d1b5d6c,
+ 0x9cc3a6eec6311a64,
+ 0xc3f490aa77bd60fd,
+ 0xf4f1b4d515acb93c,
+ 0x991711052d8bf3c5,
+ 0xbf5cd54678eef0b7,
+ 0xef340a98172aace5,
+ 0x9580869f0e7aac0f,
+ 0xbae0a846d2195713,
+ 0xe998d258869facd7,
+ 0x91ff83775423cc06,
+ 0xb67f6455292cbf08,
+ 0xe41f3d6a7377eeca,
+ 0x8e938662882af53e,
+ 0xb23867fb2a35b28e,
+ 0xdec681f9f4c31f31,
+ 0x8b3c113c38f9f37f,
+ 0xae0b158b4738705f,
+ 0xd98ddaee19068c76,
+ 0x87f8a8d4cfa417ca,
+ 0xa9f6d30a038d1dbc,
+ 0xd47487cc8470652b,
+ 0x84c8d4dfd2c63f3b,
+ 0xa5fb0a17c777cf0a,
+ 0xcf79cc9db955c2cc,
+ 0x81ac1fe293d599c0,
+ 0xa21727db38cb0030,
+ 0xca9cf1d206fdc03c,
+ 0xfd442e4688bd304b,
+ 0x9e4a9cec15763e2f,
+ 0xc5dd44271ad3cdba,
+ 0xf7549530e188c129,
+ 0x9a94dd3e8cf578ba,
+ 0xc13a148e3032d6e8,
+ 0xf18899b1bc3f8ca2,
+ 0x96f5600f15a7b7e5,
+ 0xbcb2b812db11a5de,
+ 0xebdf661791d60f56,
+ 0x936b9fcebb25c996,
+ 0xb84687c269ef3bfb,
+ 0xe65829b3046b0afa,
+ 0x8ff71a0fe2c2e6dc,
+ 0xb3f4e093db73a093,
+ 0xe0f218b8d25088b8,
+ 0x8c974f7383725573,
+ 0xafbd2350644eead0,
+ 0xdbac6c247d62a584,
+ 0x894bc396ce5da772,
+ 0xab9eb47c81f5114f,
+ 0xd686619ba27255a3,
+ 0x8613fd0145877586,
+ 0xa798fc4196e952e7,
+ 0xd17f3b51fca3a7a1,
+ 0x82ef85133de648c5,
+ 0xa3ab66580d5fdaf6,
+ 0xcc963fee10b7d1b3,
+ 0xffbbcfe994e5c620,
+ 0x9fd561f1fd0f9bd4,
+ 0xc7caba6e7c5382c9,
+ 0xf9bd690a1b68637b,
+ 0x9c1661a651213e2d,
+ 0xc31bfa0fe5698db8,
+ 0xf3e2f893dec3f126,
+ 0x986ddb5c6b3a76b8,
+ 0xbe89523386091466,
+ 0xee2ba6c0678b597f,
+ 0x94db483840b717f0,
+ 0xba121a4650e4ddec,
+ 0xe896a0d7e51e1566,
+ 0x915e2486ef32cd60,
+ 0xb5b5ada8aaff80b8,
+ 0xe3231912d5bf60e6,
+ 0x8df5efabc5979c90,
+ 0xb1736b96b6fd83b4,
+ 0xddd0467c64bce4a1,
+ 0x8aa22c0dbef60ee4,
+ 0xad4ab7112eb3929e,
+ 0xd89d64d57a607745,
+ 0x87625f056c7c4a8b,
+ 0xa93af6c6c79b5d2e,
+ 0xd389b47879823479,
+ 0x843610cb4bf160cc,
+ 0xa54394fe1eedb8ff,
+ 0xce947a3da6a9273e,
+ 0x811ccc668829b887,
+ 0xa163ff802a3426a9,
+ 0xc9bcff6034c13053,
+ 0xfc2c3f3841f17c68,
+ 0x9d9ba7832936edc1,
+ 0xc5029163f384a931,
+ 0xf64335bcf065d37d,
+ 0x99ea0196163fa42e,
+ 0xc06481fb9bcf8d3a,
+ 0xf07da27a82c37088,
+ 0x964e858c91ba2655,
+ 0xbbe226efb628afeb,
+ 0xeadab0aba3b2dbe5,
+ 0x92c8ae6b464fc96f,
+ 0xb77ada0617e3bbcb,
+ 0xe55990879ddcaabe,
+ 0x8f57fa54c2a9eab7,
+ 0xb32df8e9f3546564,
+ 0xdff9772470297ebd,
+ 0x8bfbea76c619ef36,
+ 0xaefae51477a06b04,
+ 0xdab99e59958885c5,
+ 0x88b402f7fd75539b,
+ 0xaae103b5fcd2a882,
+ 0xd59944a37c0752a2,
+ 0x857fcae62d8493a5,
+ 0xa6dfbd9fb8e5b88f,
+ 0xd097ad07a71f26b2,
+ 0x825ecc24c8737830,
+ 0xa2f67f2dfa90563b,
+ 0xcbb41ef979346bca,
+ 0xfea126b7d78186bd,
+ 0x9f24b832e6b0f436,
+ 0xc6ede63fa05d3144,
+ 0xf8a95fcf88747d94,
+ 0x9b69dbe1b548ce7d,
+ 0xc24452da229b021c,
+ 0xf2d56790ab41c2a3,
+ 0x97c560ba6b0919a6,
+ 0xbdb6b8e905cb600f,
+ 0xed246723473e3813,
+ 0x9436c0760c86e30c,
+ 0xb94470938fa89bcf,
+ 0xe7958cb87392c2c3,
+ 0x90bd77f3483bb9ba,
+ 0xb4ecd5f01a4aa828,
+ 0xe2280b6c20dd5232,
+ 0x8d590723948a535f,
+ 0xb0af48ec79ace837,
+ 0xdcdb1b2798182245,
+ 0x8a08f0f8bf0f156b,
+ 0xac8b2d36eed2dac6,
+ 0xd7adf884aa879177,
+ 0x86ccbb52ea94baeb,
+ 0xa87fea27a539e9a5,
+ 0xd29fe4b18e88640f,
+ 0x83a3eeeef9153e89,
+ 0xa48ceaaab75a8e2b,
+ 0xcdb02555653131b6,
+ 0x808e17555f3ebf12,
+ 0xa0b19d2ab70e6ed6,
+ 0xc8de047564d20a8c,
+ 0xfb158592be068d2f,
+ 0x9ced737bb6c4183d,
+ 0xc428d05aa4751e4d,
+ 0xf53304714d9265e0,
+ 0x993fe2c6d07b7fac,
+ 0xbf8fdb78849a5f97,
+ 0xef73d256a5c0f77d,
+ 0x95a8637627989aae,
+ 0xbb127c53b17ec159,
+ 0xe9d71b689dde71b0,
+ 0x9226712162ab070e,
+ 0xb6b00d69bb55c8d1,
+ 0xe45c10c42a2b3b06,
+ 0x8eb98a7a9a5b04e3,
+ 0xb267ed1940f1c61c,
+ 0xdf01e85f912e37a3,
+ 0x8b61313bbabce2c6,
+ 0xae397d8aa96c1b78,
+ 0xd9c7dced53c72256,
+ 0x881cea14545c7575,
+ 0xaa242499697392d3,
+ 0xd4ad2dbfc3d07788,
+ 0x84ec3c97da624ab5,
+ 0xa6274bbdd0fadd62,
+ 0xcfb11ead453994ba,
+ 0x81ceb32c4b43fcf5,
+ 0xa2425ff75e14fc32,
+ 0xcad2f7f5359a3b3e,
+ 0xfd87b5f28300ca0e,
+ 0x9e74d1b791e07e48,
+ 0xc612062576589ddb,
+ 0xf79687aed3eec551,
+ 0x9abe14cd44753b53,
+ 0xc16d9a0095928a27,
+ 0xf1c90080baf72cb1,
+ 0x971da05074da7bef,
+ 0xbce5086492111aeb,
+ 0xec1e4a7db69561a5,
+ 0x9392ee8e921d5d07,
+ 0xb877aa3236a4b449,
+ 0xe69594bec44de15b,
+ 0x901d7cf73ab0acd9,
+ 0xb424dc35095cd80f,
+ 0xe12e13424bb40e13,
+ 0x8cbccc096f5088cc,
+ 0xafebff0bcb24aaff,
+ 0xdbe6fecebdedd5bf,
+ 0x89705f4136b4a597,
+ 0xabcc77118461cefd,
+ 0xd6bf94d5e57a42bc,
+ 0x8637bd05af6c69b6,
+ 0xa7c5ac471b478423,
+ 0xd1b71758e219652c,
+ 0x83126e978d4fdf3b,
+ 0xa3d70a3d70a3d70a,
+ 0xcccccccccccccccd,
+ 0x8000000000000000,
+ 0xa000000000000000,
+ 0xc800000000000000,
+ 0xfa00000000000000,
+ 0x9c40000000000000,
+ 0xc350000000000000,
+ 0xf424000000000000,
+ 0x9896800000000000,
+ 0xbebc200000000000,
+ 0xee6b280000000000,
+ 0x9502f90000000000,
+ 0xba43b74000000000,
+ 0xe8d4a51000000000,
+ 0x9184e72a00000000,
+ 0xb5e620f480000000,
+ 0xe35fa931a0000000,
+ 0x8e1bc9bf04000000,
+ 0xb1a2bc2ec5000000,
+ 0xde0b6b3a76400000,
+ 0x8ac7230489e80000,
+ 0xad78ebc5ac620000,
+ 0xd8d726b7177a8000,
+ 0x878678326eac9000,
+ 0xa968163f0a57b400,
+ 0xd3c21bcecceda100,
+ 0x84595161401484a0,
+ 0xa56fa5b99019a5c8,
+ 0xcecb8f27f4200f3a,
+ 0x813f3978f8940984,
+ 0xa18f07d736b90be5,
+ 0xc9f2c9cd04674edf,
+ 0xfc6f7c4045812296,
+ 0x9dc5ada82b70b59e,
+ 0xc5371912364ce305,
+ 0xf684df56c3e01bc7,
+ 0x9a130b963a6c115c,
+ 0xc097ce7bc90715b3,
+ 0xf0bdc21abb48db20,
+ 0x96769950b50d88f4,
+ 0xbc143fa4e250eb31,
+ 0xeb194f8e1ae525fd,
+ 0x92efd1b8d0cf37be,
+ 0xb7abc627050305ae,
+ 0xe596b7b0c643c719,
+ 0x8f7e32ce7bea5c70,
+ 0xb35dbf821ae4f38c,
+ 0xe0352f62a19e306f,
+ 0x8c213d9da502de45,
+ 0xaf298d050e4395d7,
+ 0xdaf3f04651d47b4c,
+ 0x88d8762bf324cd10,
+ 0xab0e93b6efee0054,
+ 0xd5d238a4abe98068,
+ 0x85a36366eb71f041,
+ 0xa70c3c40a64e6c52,
+ 0xd0cf4b50cfe20766,
+ 0x82818f1281ed44a0,
+ 0xa321f2d7226895c8,
+ 0xcbea6f8ceb02bb3a,
+ 0xfee50b7025c36a08,
+ 0x9f4f2726179a2245,
+ 0xc722f0ef9d80aad6,
+ 0xf8ebad2b84e0d58c,
+ 0x9b934c3b330c8577,
+ 0xc2781f49ffcfa6d5,
+ 0xf316271c7fc3908b,
+ 0x97edd871cfda3a57,
+ 0xbde94e8e43d0c8ec,
+ 0xed63a231d4c4fb27,
+ 0x945e455f24fb1cf9,
+ 0xb975d6b6ee39e437,
+ 0xe7d34c64a9c85d44,
+ 0x90e40fbeea1d3a4b,
+ 0xb51d13aea4a488dd,
+ 0xe264589a4dcdab15,
+ 0x8d7eb76070a08aed,
+ 0xb0de65388cc8ada8,
+ 0xdd15fe86affad912,
+ 0x8a2dbf142dfcc7ab,
+ 0xacb92ed9397bf996,
+ 0xd7e77a8f87daf7fc,
+ 0x86f0ac99b4e8dafd,
+ 0xa8acd7c0222311bd,
+ 0xd2d80db02aabd62c,
+ 0x83c7088e1aab65db,
+ 0xa4b8cab1a1563f52,
+ 0xcde6fd5e09abcf27,
+ 0x80b05e5ac60b6178,
+ 0xa0dc75f1778e39d6,
+ 0xc913936dd571c84c,
+ 0xfb5878494ace3a5f,
+ 0x9d174b2dcec0e47b,
+ 0xc45d1df942711d9a,
+ 0xf5746577930d6501,
+ 0x9968bf6abbe85f20,
+ 0xbfc2ef456ae276e9,
+ 0xefb3ab16c59b14a3,
+ 0x95d04aee3b80ece6,
+ 0xbb445da9ca61281f,
+ 0xea1575143cf97227,
+ 0x924d692ca61be758,
+ 0xb6e0c377cfa2e12e,
+ 0xe498f455c38b997a,
+ 0x8edf98b59a373fec,
+ 0xb2977ee300c50fe7,
+ 0xdf3d5e9bc0f653e1,
+ 0x8b865b215899f46d,
+ 0xae67f1e9aec07188,
+ 0xda01ee641a708dea,
+ 0x884134fe908658b2,
+ 0xaa51823e34a7eedf,
+ 0xd4e5e2cdc1d1ea96,
+ 0x850fadc09923329e,
+ 0xa6539930bf6bff46,
+ 0xcfe87f7cef46ff17,
+ 0x81f14fae158c5f6e,
+ 0xa26da3999aef774a,
+ 0xcb090c8001ab551c,
+ 0xfdcb4fa002162a63,
+ 0x9e9f11c4014dda7e,
+ 0xc646d63501a1511e,
+ 0xf7d88bc24209a565,
+ 0x9ae757596946075f,
+ 0xc1a12d2fc3978937,
+ 0xf209787bb47d6b85,
+ 0x9745eb4d50ce6333,
+ 0xbd176620a501fc00,
+ 0xec5d3fa8ce427b00,
+ 0x93ba47c980e98ce0,
+ 0xb8a8d9bbe123f018,
+ 0xe6d3102ad96cec1e,
+ 0x9043ea1ac7e41393,
+ 0xb454e4a179dd1877,
+ 0xe16a1dc9d8545e95,
+ 0x8ce2529e2734bb1d,
+ 0xb01ae745b101e9e4,
+ 0xdc21a1171d42645d,
+ 0x899504ae72497eba,
+ 0xabfa45da0edbde69,
+ 0xd6f8d7509292d603,
+ 0x865b86925b9bc5c2,
+ 0xa7f26836f282b733,
+ 0xd1ef0244af2364ff,
+ 0x8335616aed761f1f,
+ 0xa402b9c5a8d3a6e7,
+ 0xcd036837130890a1,
+ 0x802221226be55a65,
+ 0xa02aa96b06deb0fe,
+ 0xc83553c5c8965d3d,
+ 0xfa42a8b73abbf48d,
+ 0x9c69a97284b578d8,
+ 0xc38413cf25e2d70e,
+ 0xf46518c2ef5b8cd1,
+ 0x98bf2f79d5993803,
+ 0xbeeefb584aff8604,
+ 0xeeaaba2e5dbf6785,
+ 0x952ab45cfa97a0b3,
+ 0xba756174393d88e0,
+ 0xe912b9d1478ceb17,
+ 0x91abb422ccb812ef,
+ 0xb616a12b7fe617aa,
+ 0xe39c49765fdf9d95,
+ 0x8e41ade9fbebc27d,
+ 0xb1d219647ae6b31c,
+ 0xde469fbd99a05fe3,
+ 0x8aec23d680043bee,
+ 0xada72ccc20054aea,
+ 0xd910f7ff28069da4,
+ 0x87aa9aff79042287,
+ 0xa99541bf57452b28,
+ 0xd3fa922f2d1675f2,
+ 0x847c9b5d7c2e09b7,
+ 0xa59bc234db398c25,
+ 0xcf02b2c21207ef2f,
+ 0x8161afb94b44f57d,
+ 0xa1ba1ba79e1632dc,
+ 0xca28a291859bbf93,
+ 0xfcb2cb35e702af78,
+ 0x9defbf01b061adab,
+ 0xc56baec21c7a1916,
+ 0xf6c69a72a3989f5c,
+ 0x9a3c2087a63f6399,
+ 0xc0cb28a98fcf3c80,
+ 0xf0fdf2d3f3c30b9f,
+ 0x969eb7c47859e744,
+ 0xbc4665b596706115,
+ 0xeb57ff22fc0c795a,
+ 0x9316ff75dd87cbd8,
+ 0xb7dcbf5354e9bece,
+ 0xe5d3ef282a242e82,
+ 0x8fa475791a569d11,
+ 0xb38d92d760ec4455,
+ 0xe070f78d3927556b,
+ 0x8c469ab843b89563,
+ 0xaf58416654a6babb,
+ 0xdb2e51bfe9d0696a,
+ 0x88fcf317f22241e2,
+ 0xab3c2fddeeaad25b,
+ 0xd60b3bd56a5586f2,
+ 0x85c7056562757457,
+ 0xa738c6bebb12d16d,
+ 0xd106f86e69d785c8,
+ 0x82a45b450226b39d,
+ 0xa34d721642b06084,
+ 0xcc20ce9bd35c78a5,
+ 0xff290242c83396ce,
+ 0x9f79a169bd203e41,
+ 0xc75809c42c684dd1,
+ 0xf92e0c3537826146,
+ 0x9bbcc7a142b17ccc,
+ 0xc2abf989935ddbfe,
+ 0xf356f7ebf83552fe,
+ 0x98165af37b2153df,
+ 0xbe1bf1b059e9a8d6,
+ 0xeda2ee1c7064130c,
+ 0x9485d4d1c63e8be8,
+ 0xb9a74a0637ce2ee1,
+ 0xe8111c87c5c1ba9a,
+ 0x910ab1d4db9914a0,
+ 0xb54d5e4a127f59c8,
+ 0xe2a0b5dc971f303a,
+ 0x8da471a9de737e24,
+ 0xb10d8e1456105dad,
+ 0xdd50f1996b947519,
+ 0x8a5296ffe33cc930,
+ 0xace73cbfdc0bfb7b,
+ 0xd8210befd30efa5a,
+ 0x8714a775e3e95c78,
+ 0xa8d9d1535ce3b396,
+ 0xd31045a8341ca07c,
+ 0x83ea2b892091e44e,
+ 0xa4e4b66b68b65d61,
+ 0xce1de40642e3f4b9,
+ 0x80d2ae83e9ce78f4,
+ 0xa1075a24e4421731,
+ 0xc94930ae1d529cfd,
+ 0xfb9b7cd9a4a7443c,
+ 0x9d412e0806e88aa6,
+ 0xc491798a08a2ad4f,
+ 0xf5b5d7ec8acb58a3,
+ 0x9991a6f3d6bf1766,
+ 0xbff610b0cc6edd3f,
+ 0xeff394dcff8a948f,
+ 0x95f83d0a1fb69cd9,
+ 0xbb764c4ca7a44410,
+ 0xea53df5fd18d5514,
+ 0x92746b9be2f8552c,
+ 0xb7118682dbb66a77,
+ 0xe4d5e82392a40515,
+ 0x8f05b1163ba6832d,
+ 0xb2c71d5bca9023f8,
+ 0xdf78e4b2bd342cf7,
+ 0x8bab8eefb6409c1a,
+ 0xae9672aba3d0c321,
+ 0xda3c0f568cc4f3e9,
+ 0x8865899617fb1871,
+ 0xaa7eebfb9df9de8e,
+ 0xd51ea6fa85785631,
+ 0x8533285c936b35df,
+ 0xa67ff273b8460357,
+ 0xd01fef10a657842c,
+ 0x8213f56a67f6b29c,
+ 0xa298f2c501f45f43,
+ 0xcb3f2f7642717713,
+ 0xfe0efb53d30dd4d8,
+ 0x9ec95d1463e8a507,
+ 0xc67bb4597ce2ce49,
+ 0xf81aa16fdc1b81db,
+ 0x9b10a4e5e9913129,
+ 0xc1d4ce1f63f57d73,
+ 0xf24a01a73cf2dcd0,
+ 0x976e41088617ca02,
+ 0xbd49d14aa79dbc82,
+ 0xec9c459d51852ba3,
+ 0x93e1ab8252f33b46,
+ 0xb8da1662e7b00a17,
+ 0xe7109bfba19c0c9d,
+ 0x906a617d450187e2,
+ 0xb484f9dc9641e9db,
+ 0xe1a63853bbd26451,
+ 0x8d07e33455637eb3,
+ 0xb049dc016abc5e60,
+ 0xdc5c5301c56b75f7,
+ 0x89b9b3e11b6329bb,
+ 0xac2820d9623bf429,
+ 0xd732290fbacaf134,
+ 0x867f59a9d4bed6c0,
+ 0xa81f301449ee8c70,
+ 0xd226fc195c6a2f8c,
+ 0x83585d8fd9c25db8,
+ 0xa42e74f3d032f526,
+ 0xcd3a1230c43fb26f,
+ 0x80444b5e7aa7cf85,
+ 0xa0555e361951c367,
+ 0xc86ab5c39fa63441,
+ 0xfa856334878fc151,
+ 0x9c935e00d4b9d8d2,
+ 0xc3b8358109e84f07,
+ 0xf4a642e14c6262c9,
+ 0x98e7e9cccfbd7dbe,
+ 0xbf21e44003acdd2d,
+ 0xeeea5d5004981478,
+ 0x95527a5202df0ccb,
+ 0xbaa718e68396cffe,
+ 0xe950df20247c83fd,
+ 0x91d28b7416cdd27e,
+], [
+ -1077,
+ -1073,
+ -1070,
+ -1067,
+ -1063,
+ -1060,
+ -1057,
+ -1053,
+ -1050,
+ -1047,
+ -1043,
+ -1040,
+ -1037,
+ -1034,
+ -1030,
+ -1027,
+ -1024,
+ -1020,
+ -1017,
+ -1014,
+ -1010,
+ -1007,
+ -1004,
+ -1000,
+ -997,
+ -994,
+ -990,
+ -987,
+ -984,
+ -980,
+ -977,
+ -974,
+ -970,
+ -967,
+ -964,
+ -960,
+ -957,
+ -954,
+ -950,
+ -947,
+ -944,
+ -940,
+ -937,
+ -934,
+ -931,
+ -927,
+ -924,
+ -921,
+ -917,
+ -914,
+ -911,
+ -907,
+ -904,
+ -901,
+ -897,
+ -894,
+ -891,
+ -887,
+ -884,
+ -881,
+ -877,
+ -874,
+ -871,
+ -867,
+ -864,
+ -861,
+ -857,
+ -854,
+ -851,
+ -847,
+ -844,
+ -841,
+ -838,
+ -834,
+ -831,
+ -828,
+ -824,
+ -821,
+ -818,
+ -814,
+ -811,
+ -808,
+ -804,
+ -801,
+ -798,
+ -794,
+ -791,
+ -788,
+ -784,
+ -781,
+ -778,
+ -774,
+ -771,
+ -768,
+ -764,
+ -761,
+ -758,
+ -754,
+ -751,
+ -748,
+ -744,
+ -741,
+ -738,
+ -735,
+ -731,
+ -728,
+ -725,
+ -721,
+ -718,
+ -715,
+ -711,
+ -708,
+ -705,
+ -701,
+ -698,
+ -695,
+ -691,
+ -688,
+ -685,
+ -681,
+ -678,
+ -675,
+ -671,
+ -668,
+ -665,
+ -661,
+ -658,
+ -655,
+ -651,
+ -648,
+ -645,
+ -642,
+ -638,
+ -635,
+ -632,
+ -628,
+ -625,
+ -622,
+ -618,
+ -615,
+ -612,
+ -608,
+ -605,
+ -602,
+ -598,
+ -595,
+ -592,
+ -588,
+ -585,
+ -582,
+ -578,
+ -575,
+ -572,
+ -568,
+ -565,
+ -562,
+ -558,
+ -555,
+ -552,
+ -549,
+ -545,
+ -542,
+ -539,
+ -535,
+ -532,
+ -529,
+ -525,
+ -522,
+ -519,
+ -515,
+ -512,
+ -509,
+ -505,
+ -502,
+ -499,
+ -495,
+ -492,
+ -489,
+ -485,
+ -482,
+ -479,
+ -475,
+ -472,
+ -469,
+ -465,
+ -462,
+ -459,
+ -455,
+ -452,
+ -449,
+ -446,
+ -442,
+ -439,
+ -436,
+ -432,
+ -429,
+ -426,
+ -422,
+ -419,
+ -416,
+ -412,
+ -409,
+ -406,
+ -402,
+ -399,
+ -396,
+ -392,
+ -389,
+ -386,
+ -382,
+ -379,
+ -376,
+ -372,
+ -369,
+ -366,
+ -362,
+ -359,
+ -356,
+ -353,
+ -349,
+ -346,
+ -343,
+ -339,
+ -336,
+ -333,
+ -329,
+ -326,
+ -323,
+ -319,
+ -316,
+ -313,
+ -309,
+ -306,
+ -303,
+ -299,
+ -296,
+ -293,
+ -289,
+ -286,
+ -283,
+ -279,
+ -276,
+ -273,
+ -269,
+ -266,
+ -263,
+ -259,
+ -256,
+ -253,
+ -250,
+ -246,
+ -243,
+ -240,
+ -236,
+ -233,
+ -230,
+ -226,
+ -223,
+ -220,
+ -216,
+ -213,
+ -210,
+ -206,
+ -203,
+ -200,
+ -196,
+ -193,
+ -190,
+ -186,
+ -183,
+ -180,
+ -176,
+ -173,
+ -170,
+ -166,
+ -163,
+ -160,
+ -157,
+ -153,
+ -150,
+ -147,
+ -143,
+ -140,
+ -137,
+ -133,
+ -130,
+ -127,
+ -123,
+ -120,
+ -117,
+ -113,
+ -110,
+ -107,
+ -103,
+ -100,
+ -97,
+ -93,
+ -90,
+ -87,
+ -83,
+ -80,
+ -77,
+ -73,
+ -70,
+ -67,
+ -63,
+ -60,
+ -57,
+ -54,
+ -50,
+ -47,
+ -44,
+ -40,
+ -37,
+ -34,
+ -30,
+ -27,
+ -24,
+ -20,
+ -17,
+ -14,
+ -10,
+ -7,
+ -4,
+ 0,
+ 3,
+ 6,
+ 10,
+ 13,
+ 16,
+ 20,
+ 23,
+ 26,
+ 30,
+ 33,
+ 36,
+ 39,
+ 43,
+ 46,
+ 49,
+ 53,
+ 56,
+ 59,
+ 63,
+ 66,
+ 69,
+ 73,
+ 76,
+ 79,
+ 83,
+ 86,
+ 89,
+ 93,
+ 96,
+ 99,
+ 103,
+ 106,
+ 109,
+ 113,
+ 116,
+ 119,
+ 123,
+ 126,
+ 129,
+ 132,
+ 136,
+ 139,
+ 142,
+ 146,
+ 149,
+ 152,
+ 156,
+ 159,
+ 162,
+ 166,
+ 169,
+ 172,
+ 176,
+ 179,
+ 182,
+ 186,
+ 189,
+ 192,
+ 196,
+ 199,
+ 202,
+ 206,
+ 209,
+ 212,
+ 216,
+ 219,
+ 222,
+ 226,
+ 229,
+ 232,
+ 235,
+ 239,
+ 242,
+ 245,
+ 249,
+ 252,
+ 255,
+ 259,
+ 262,
+ 265,
+ 269,
+ 272,
+ 275,
+ 279,
+ 282,
+ 285,
+ 289,
+ 292,
+ 295,
+ 299,
+ 302,
+ 305,
+ 309,
+ 312,
+ 315,
+ 319,
+ 322,
+ 325,
+ 328,
+ 332,
+ 335,
+ 338,
+ 342,
+ 345,
+ 348,
+ 352,
+ 355,
+ 358,
+ 362,
+ 365,
+ 368,
+ 372,
+ 375,
+ 378,
+ 382,
+ 385,
+ 388,
+ 392,
+ 395,
+ 398,
+ 402,
+ 405,
+ 408,
+ 412,
+ 415,
+ 418,
+ 422,
+ 425,
+ 428,
+ 431,
+ 435,
+ 438,
+ 441,
+ 445,
+ 448,
+ 451,
+ 455,
+ 458,
+ 461,
+ 465,
+ 468,
+ 471,
+ 475,
+ 478,
+ 481,
+ 485,
+ 488,
+ 491,
+ 495,
+ 498,
+ 501,
+ 505,
+ 508,
+ 511,
+ 515,
+ 518,
+ 521,
+ 524,
+ 528,
+ 531,
+ 534,
+ 538,
+ 541,
+ 544,
+ 548,
+ 551,
+ 554,
+ 558,
+ 561,
+ 564,
+ 568,
+ 571,
+ 574,
+ 578,
+ 581,
+ 584,
+ 588,
+ 591,
+ 594,
+ 598,
+ 601,
+ 604,
+ 608,
+ 611,
+ 614,
+ 617,
+ 621,
+ 624,
+ 627,
+ 631,
+ 634,
+ 637,
+ 641,
+ 644,
+ 647,
+ 651,
+ 654,
+ 657,
+ 661,
+ 664,
+ 667,
+ 671,
+ 674,
+ 677,
+ 681,
+ 684,
+ 687,
+ 691,
+ 694,
+ 697,
+ 701,
+ 704,
+ 707,
+ 711,
+ 714,
+ 717,
+ 720,
+ 724,
+ 727,
+ 730,
+ 734,
+ 737,
+ 740,
+ 744,
+ 747,
+ 750,
+ 754,
+ 757,
+ 760,
+ 764,
+ 767,
+ 770,
+ 774,
+ 777,
+ 780,
+ 784,
+ 787,
+ 790,
+ 794,
+ 797,
+ 800,
+ 804,
+ 807,
+ 810,
+ 813,
+ 817,
+ 820,
+ 823,
+ 827,
+ 830,
+ 833,
+ 837,
+ 840,
+ 843,
+ 847,
+ 850,
+ 853,
+ 857,
+ 860,
+ 863,
+ 867,
+ 870,
+ 873,
+ 877,
+ 880,
+ 883,
+ 887,
+ 890,
+ 893,
+ 897,
+ 900,
+ 903,
+ 907,
+ 910,
+ 913,
+ 916,
+ 920,
+ 923,
+ 926,
+ 930,
+ 933,
+ 936,
+ 940,
+ 943,
+ 946,
+ 950,
+]);
+
+pub const F32_SHORT_POWERS: [f32; 11] = [
+ 1e0,
+ 1e1,
+ 1e2,
+ 1e3,
+ 1e4,
+ 1e5,
+ 1e6,
+ 1e7,
+ 1e8,
+ 1e9,
+ 1e10,
+];
+
+pub const F64_SHORT_POWERS: [f64; 23] = [
+ 1e0,
+ 1e1,
+ 1e2,
+ 1e3,
+ 1e4,
+ 1e5,
+ 1e6,
+ 1e7,
+ 1e8,
+ 1e9,
+ 1e10,
+ 1e11,
+ 1e12,
+ 1e13,
+ 1e14,
+ 1e15,
+ 1e16,
+ 1e17,
+ 1e18,
+ 1e19,
+ 1e20,
+ 1e21,
+ 1e22,
+];
diff --git a/libcore/num/diy_float.rs b/libcore/num/diy_float.rs
new file mode 100644
index 0000000..7c369ee
--- /dev/null
+++ b/libcore/num/diy_float.rs
@@ -0,0 +1,71 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Extended precision "soft float", for internal use only.
+
+// This module is only for dec2flt and flt2dec, and only public because of libcoretest.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(feature = "core_private_diy_float",
+ reason = "internal routines only exposed for testing",
+ issue = "0")]
+
+/// A custom 64-bit floating point type, representing `f * 2^e`.
+#[derive(Copy, Clone, Debug)]
+#[doc(hidden)]
+pub struct Fp {
+ /// The integer mantissa.
+ pub f: u64,
+ /// The exponent in base 2.
+ pub e: i16,
+}
+
+impl Fp {
+ /// Returns a correctly rounded product of itself and `other`.
+ pub fn mul(&self, other: &Fp) -> Fp {
+ const MASK: u64 = 0xffffffff;
+ let a = self.f >> 32;
+ let b = self.f & MASK;
+ let c = other.f >> 32;
+ let d = other.f & MASK;
+ let ac = a * c;
+ let bc = b * c;
+ let ad = a * d;
+ let bd = b * d;
+ let tmp = (bd >> 32) + (ad & MASK) + (bc & MASK) + (1 << 31) /* round */;
+ let f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ let e = self.e + other.e + 64;
+ Fp { f: f, e: e }
+ }
+
+ /// Normalizes itself so that the resulting mantissa is at least `2^63`.
+ pub fn normalize(&self) -> Fp {
+ let mut f = self.f;
+ let mut e = self.e;
+ if f >> (64 - 32) == 0 { f <<= 32; e -= 32; }
+ if f >> (64 - 16) == 0 { f <<= 16; e -= 16; }
+ if f >> (64 - 8) == 0 { f <<= 8; e -= 8; }
+ if f >> (64 - 4) == 0 { f <<= 4; e -= 4; }
+ if f >> (64 - 2) == 0 { f <<= 2; e -= 2; }
+ if f >> (64 - 1) == 0 { f <<= 1; e -= 1; }
+ debug_assert!(f >= (1 >> 63));
+ Fp { f: f, e: e }
+ }
+
+ /// Normalizes itself to have the shared exponent.
+ /// It can only decrease the exponent (and thus increase the mantissa).
+ pub fn normalize_to(&self, e: i16) -> Fp {
+ let edelta = self.e - e;
+ assert!(edelta >= 0);
+ let edelta = edelta as usize;
+ assert_eq!(self.f << edelta >> edelta, self.f);
+ Fp { f: self.f << edelta, e: e }
+ }
+}
diff --git a/libcore/num/f32.rs b/libcore/num/f32.rs
new file mode 100644
index 0000000..c24eaa3
--- /dev/null
+++ b/libcore/num/f32.rs
@@ -0,0 +1,272 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Operations and constants for 32-bits floats (`f32` type)
+
+// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
+#![allow(overflowing_literals)]
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use intrinsics;
+use mem;
+use num::Float;
+use num::FpCategory as Fp;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const RADIX: u32 = 2;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MANTISSA_DIGITS: u32 = 24;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const DIGITS: u32 = 6;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const EPSILON: f32 = 1.19209290e-07_f32;
+
+/// Smallest finite f32 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN: f32 = -3.40282347e+38_f32;
+/// Smallest positive, normalized f32 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
+/// Largest finite f32 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: f32 = 3.40282347e+38_f32;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN_EXP: i32 = -125;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX_EXP: i32 = 128;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN_10_EXP: i32 = -37;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX_10_EXP: i32 = 38;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const NAN: f32 = 0.0_f32/0.0_f32;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const INFINITY: f32 = 1.0_f32/0.0_f32;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
+
+ /// pi/2.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
+
+ /// pi/3.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
+
+ /// pi/4.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
+
+ /// pi/6.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
+
+ /// pi/8.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
+
+ /// 1.0/pi
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
+
+ /// 2.0/pi
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
+
+ /// 2.0/sqrt(pi)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
+
+ /// sqrt(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
+
+ /// 1.0/sqrt(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
+
+ /// Euler's number
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f32 = 2.71828182845904523536028747135266250_f32;
+
+ /// log2(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
+
+ /// log10(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
+
+ /// ln(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
+
+ /// ln(10.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
+}
+
+#[unstable(feature = "core_float",
+ reason = "stable interface is via `impl f{32,64}` in later crates",
+ issue = "32110")]
+impl Float for f32 {
+ #[inline]
+ fn nan() -> f32 { NAN }
+
+ #[inline]
+ fn infinity() -> f32 { INFINITY }
+
+ #[inline]
+ fn neg_infinity() -> f32 { NEG_INFINITY }
+
+ #[inline]
+ fn zero() -> f32 { 0.0 }
+
+ #[inline]
+ fn neg_zero() -> f32 { -0.0 }
+
+ #[inline]
+ fn one() -> f32 { 1.0 }
+
+ /// Returns `true` if the number is NaN.
+ #[inline]
+ fn is_nan(self) -> bool { self != self }
+
+ /// Returns `true` if the number is infinite.
+ #[inline]
+ fn is_infinite(self) -> bool {
+ self == Float::infinity() || self == Float::neg_infinity()
+ }
+
+ /// Returns `true` if the number is neither infinite or NaN.
+ #[inline]
+ fn is_finite(self) -> bool {
+ !(self.is_nan() || self.is_infinite())
+ }
+
+ /// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
+ #[inline]
+ fn is_normal(self) -> bool {
+ self.classify() == Fp::Normal
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ fn classify(self) -> Fp {
+ const EXP_MASK: u32 = 0x7f800000;
+ const MAN_MASK: u32 = 0x007fffff;
+
+ let bits: u32 = unsafe { mem::transmute(self) };
+ match (bits & MAN_MASK, bits & EXP_MASK) {
+ (0, 0) => Fp::Zero,
+ (_, 0) => Fp::Subnormal,
+ (0, EXP_MASK) => Fp::Infinite,
+ (_, EXP_MASK) => Fp::Nan,
+ _ => Fp::Normal,
+ }
+ }
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits: u32 = unsafe { mem::transmute(self) };
+ let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0x7fffff) << 1
+ } else {
+ (bits & 0x7fffff) | 0x800000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+ }
+
+ /// Computes the absolute value of `self`. Returns `Float::nan()` if the
+ /// number is `Float::nan()`.
+ #[inline]
+ fn abs(self) -> f32 {
+ unsafe { intrinsics::fabsf32(self) }
+ }
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
+ /// - `Float::nan()` if the number is `Float::nan()`
+ #[inline]
+ fn signum(self) -> f32 {
+ if self.is_nan() {
+ Float::nan()
+ } else {
+ unsafe { intrinsics::copysignf32(1.0, self) }
+ }
+ }
+
+ /// Returns `true` if `self` is positive, including `+0.0` and
+ /// `Float::infinity()`.
+ #[inline]
+ fn is_sign_positive(self) -> bool {
+ self > 0.0 || (1.0 / self) == Float::infinity()
+ }
+
+ /// Returns `true` if `self` is negative, including `-0.0` and
+ /// `Float::neg_infinity()`.
+ #[inline]
+ fn is_sign_negative(self) -> bool {
+ self < 0.0 || (1.0 / self) == Float::neg_infinity()
+ }
+
+ /// Returns the reciprocal (multiplicative inverse) of the number.
+ #[inline]
+ fn recip(self) -> f32 { 1.0 / self }
+
+ #[inline]
+ fn powi(self, n: i32) -> f32 {
+ unsafe { intrinsics::powif32(self, n) }
+ }
+
+ /// Converts to degrees, assuming the number is in radians.
+ #[inline]
+ fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
+
+ /// Converts to radians, assuming the number is in degrees.
+ #[inline]
+ fn to_radians(self) -> f32 {
+ let value: f32 = consts::PI;
+ self * (value / 180.0f32)
+ }
+}
diff --git a/libcore/num/f64.rs b/libcore/num/f64.rs
new file mode 100644
index 0000000..beeee80
--- /dev/null
+++ b/libcore/num/f64.rs
@@ -0,0 +1,272 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Operations and constants for 64-bits floats (`f64` type)
+
+// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
+#![allow(overflowing_literals)]
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use intrinsics;
+use mem;
+use num::FpCategory as Fp;
+use num::Float;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const RADIX: u32 = 2;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MANTISSA_DIGITS: u32 = 53;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const DIGITS: u32 = 15;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
+
+/// Smallest finite f64 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN: f64 = -1.7976931348623157e+308_f64;
+/// Smallest positive, normalized f64 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64;
+/// Largest finite f64 value
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: f64 = 1.7976931348623157e+308_f64;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN_EXP: i32 = -1021;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX_EXP: i32 = 1024;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN_10_EXP: i32 = -307;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX_10_EXP: i32 = 308;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const NAN: f64 = 0.0_f64/0.0_f64;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const INFINITY: f64 = 1.0_f64/0.0_f64;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const NEG_INFINITY: f64 = -1.0_f64/0.0_f64;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f64 = 3.14159265358979323846264338327950288_f64;
+
+ /// pi/2.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f64 = 1.57079632679489661923132169163975144_f64;
+
+ /// pi/3.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f64 = 1.04719755119659774615421446109316763_f64;
+
+ /// pi/4.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f64 = 0.785398163397448309615660845819875721_f64;
+
+ /// pi/6.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f64 = 0.52359877559829887307710723054658381_f64;
+
+ /// pi/8.0
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f64 = 0.39269908169872415480783042290993786_f64;
+
+ /// 1.0/pi
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64;
+
+ /// 2.0/pi
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f64 = 0.636619772367581343075535053490057448_f64;
+
+ /// 2.0/sqrt(pi)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f64 = 1.12837916709551257389615890312154517_f64;
+
+ /// sqrt(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f64 = 1.41421356237309504880168872420969808_f64;
+
+ /// 1.0/sqrt(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64;
+
+ /// Euler's number
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f64 = 2.71828182845904523536028747135266250_f64;
+
+ /// log2(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f64 = 1.44269504088896340735992468100189214_f64;
+
+ /// log10(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f64 = 0.434294481903251827651128918916605082_f64;
+
+ /// ln(2.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f64 = 0.693147180559945309417232121458176568_f64;
+
+ /// ln(10.0)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f64 = 2.30258509299404568401799145468436421_f64;
+}
+
+#[unstable(feature = "core_float",
+ reason = "stable interface is via `impl f{32,64}` in later crates",
+ issue = "32110")]
+impl Float for f64 {
+ #[inline]
+ fn nan() -> f64 { NAN }
+
+ #[inline]
+ fn infinity() -> f64 { INFINITY }
+
+ #[inline]
+ fn neg_infinity() -> f64 { NEG_INFINITY }
+
+ #[inline]
+ fn zero() -> f64 { 0.0 }
+
+ #[inline]
+ fn neg_zero() -> f64 { -0.0 }
+
+ #[inline]
+ fn one() -> f64 { 1.0 }
+
+ /// Returns `true` if the number is NaN.
+ #[inline]
+ fn is_nan(self) -> bool { self != self }
+
+ /// Returns `true` if the number is infinite.
+ #[inline]
+ fn is_infinite(self) -> bool {
+ self == Float::infinity() || self == Float::neg_infinity()
+ }
+
+ /// Returns `true` if the number is neither infinite or NaN.
+ #[inline]
+ fn is_finite(self) -> bool {
+ !(self.is_nan() || self.is_infinite())
+ }
+
+ /// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
+ #[inline]
+ fn is_normal(self) -> bool {
+ self.classify() == Fp::Normal
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ fn classify(self) -> Fp {
+ const EXP_MASK: u64 = 0x7ff0000000000000;
+ const MAN_MASK: u64 = 0x000fffffffffffff;
+
+ let bits: u64 = unsafe { mem::transmute(self) };
+ match (bits & MAN_MASK, bits & EXP_MASK) {
+ (0, 0) => Fp::Zero,
+ (_, 0) => Fp::Subnormal,
+ (0, EXP_MASK) => Fp::Infinite,
+ (_, EXP_MASK) => Fp::Nan,
+ _ => Fp::Normal,
+ }
+ }
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits: u64 = unsafe { mem::transmute(self) };
+ let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+ }
+
+ /// Computes the absolute value of `self`. Returns `Float::nan()` if the
+ /// number is `Float::nan()`.
+ #[inline]
+ fn abs(self) -> f64 {
+ unsafe { intrinsics::fabsf64(self) }
+ }
+
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
+ /// - `Float::nan()` if the number is `Float::nan()`
+ #[inline]
+ fn signum(self) -> f64 {
+ if self.is_nan() {
+ Float::nan()
+ } else {
+ unsafe { intrinsics::copysignf64(1.0, self) }
+ }
+ }
+
+ /// Returns `true` if `self` is positive, including `+0.0` and
+ /// `Float::infinity()`.
+ #[inline]
+ fn is_sign_positive(self) -> bool {
+ self > 0.0 || (1.0 / self) == Float::infinity()
+ }
+
+ /// Returns `true` if `self` is negative, including `-0.0` and
+ /// `Float::neg_infinity()`.
+ #[inline]
+ fn is_sign_negative(self) -> bool {
+ self < 0.0 || (1.0 / self) == Float::neg_infinity()
+ }
+
+ /// Returns the reciprocal (multiplicative inverse) of the number.
+ #[inline]
+ fn recip(self) -> f64 { 1.0 / self }
+
+ #[inline]
+ fn powi(self, n: i32) -> f64 {
+ unsafe { intrinsics::powif64(self, n) }
+ }
+
+ /// Converts to degrees, assuming the number is in radians.
+ #[inline]
+ fn to_degrees(self) -> f64 { self * (180.0f64 / consts::PI) }
+
+ /// Converts to radians, assuming the number is in degrees.
+ #[inline]
+ fn to_radians(self) -> f64 {
+ let value: f64 = consts::PI;
+ self * (value / 180.0)
+ }
+}
diff --git a/libcore/num/float_macros.rs b/libcore/num/float_macros.rs
new file mode 100644
index 0000000..b3adef5
--- /dev/null
+++ b/libcore/num/float_macros.rs
@@ -0,0 +1,20 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![doc(hidden)]
+
+macro_rules! assert_approx_eq {
+ ($a:expr, $b:expr) => ({
+ use num::Float;
+ let (a, b) = (&$a, &$b);
+ assert!((*a - *b).abs() < 1.0e-6,
+ "{} is not approximately equal to {}", *a, *b);
+ })
+}
diff --git a/libcore/num/flt2dec/decoder.rs b/libcore/num/flt2dec/decoder.rs
new file mode 100644
index 0000000..6265691
--- /dev/null
+++ b/libcore/num/flt2dec/decoder.rs
@@ -0,0 +1,100 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Decodes a floating-point value into individual parts and error ranges.
+
+use prelude::v1::*;
+
+use {f32, f64};
+use num::{Float, FpCategory};
+
+/// Decoded unsigned finite value, such that:
+///
+/// - The original value equals to `mant * 2^exp`.
+///
+/// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will
+/// round to the original value. The range is inclusive only when
+/// `inclusive` is true.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct Decoded {
+ /// The scaled mantissa.
+ pub mant: u64,
+ /// The lower error range.
+ pub minus: u64,
+ /// The upper error range.
+ pub plus: u64,
+ /// The shared exponent in base 2.
+ pub exp: i16,
+ /// True when the error range is inclusive.
+ ///
+ /// In IEEE 754, this is true when the original mantissa was even.
+ pub inclusive: bool,
+}
+
+/// Decoded unsigned value.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum FullDecoded {
+ /// Not-a-number.
+ Nan,
+ /// Infinities, either positive or negative.
+ Infinite,
+ /// Zero, either positive or negative.
+ Zero,
+ /// Finite numbers with further decoded fields.
+ Finite(Decoded),
+}
+
+/// A floating point type which can be `decode`d.
+pub trait DecodableFloat: Float + Copy {
+ /// The minimum positive normalized value.
+ fn min_pos_norm_value() -> Self;
+}
+
+impl DecodableFloat for f32 {
+ fn min_pos_norm_value() -> Self { f32::MIN_POSITIVE }
+}
+
+impl DecodableFloat for f64 {
+ fn min_pos_norm_value() -> Self { f64::MIN_POSITIVE }
+}
+
+/// Returns a sign (true when negative) and `FullDecoded` value
+/// from given floating point number.
+pub fn decode<T: DecodableFloat>(v: T) -> (/*negative?*/ bool, FullDecoded) {
+ let (mant, exp, sign) = v.integer_decode();
+ let even = (mant & 1) == 0;
+ let decoded = match v.classify() {
+ FpCategory::Nan => FullDecoded::Nan,
+ FpCategory::Infinite => FullDecoded::Infinite,
+ FpCategory::Zero => FullDecoded::Zero,
+ FpCategory::Subnormal => {
+ // neighbors: (mant - 2, exp) -- (mant, exp) -- (mant + 2, exp)
+ // Float::integer_decode always preserves the exponent,
+ // so the mantissa is scaled for subnormals.
+ FullDecoded::Finite(Decoded { mant: mant, minus: 1, plus: 1,
+ exp: exp, inclusive: even })
+ }
+ FpCategory::Normal => {
+ let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode();
+ if mant == minnorm.0 {
+ // neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp)
+ // where maxmant = minnormmant * 2 - 1
+ FullDecoded::Finite(Decoded { mant: mant << 2, minus: 1, plus: 2,
+ exp: exp - 2, inclusive: even })
+ } else {
+ // neighbors: (mant - 1, exp) -- (mant, exp) -- (mant + 1, exp)
+ FullDecoded::Finite(Decoded { mant: mant << 1, minus: 1, plus: 1,
+ exp: exp - 1, inclusive: even })
+ }
+ }
+ };
+ (sign < 0, decoded)
+}
+
diff --git a/libcore/num/flt2dec/estimator.rs b/libcore/num/flt2dec/estimator.rs
new file mode 100644
index 0000000..d42e05a
--- /dev/null
+++ b/libcore/num/flt2dec/estimator.rs
@@ -0,0 +1,25 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The exponent estimator.
+
+/// Finds `k_0` such that `10^(k_0-1) < mant * 2^exp <= 10^(k_0+1)`.
+///
+/// This is used to approximate `k = ceil(log_10 (mant * 2^exp))`;
+/// the true `k` is either `k_0` or `k_0+1`.
+#[doc(hidden)]
+pub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 {
+ // 2^(nbits-1) < mant <= 2^nbits if mant > 0
+ let nbits = 64 - (mant - 1).leading_zeros() as i64;
+ // 1292913986 = floor(2^32 * log_10 2)
+ // therefore this always underestimates (or is exact), but not much.
+ (((nbits + exp as i64) * 1292913986) >> 32) as i16
+}
+
diff --git a/libcore/num/flt2dec/mod.rs b/libcore/num/flt2dec/mod.rs
new file mode 100644
index 0000000..b549f33
--- /dev/null
+++ b/libcore/num/flt2dec/mod.rs
@@ -0,0 +1,662 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+
+Floating-point number to decimal conversion routines.
+
+# Problem statement
+
+We are given the floating-point number `v = f * 2^e` with an integer `f`,
+and its bounds `minus` and `plus` such that any number between `v - minus` and
+`v + plus` will be rounded to `v`. For the simplicity we assume that
+this range is exclusive. Then we would like to get the unique decimal
+representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero.
+
+- It's correctly rounded when parsed back: `v - minus < V < v + plus`.
+ Furthermore it is shortest such one, i.e. there is no representation
+ with less than `n` digits that is correctly rounded.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Note that
+ there might be two representations satisfying this uniqueness requirement,
+ in which case some tie-breaking mechanism is used.
+
+We will call this mode of operation as to the *shortest* mode. This mode is used
+when there is no additional constraint, and can be thought as a "natural" mode
+as it matches the ordinary intuition (it at least prints `0.1f32` as "0.1").
+
+We have two more modes of operation closely related to each other. In these modes
+we are given either the number of significant digits `n` or the last-digit
+limitation `limit` (which determines the actual `n`), and we would like to get
+the representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero, unless `n` was zero in which case only `k` is returned.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Again,
+ there might be some tie-breaking mechanism.
+
+When `limit` is given but not `n`, we set `n` such that `k - n = limit`
+so that the last digit `d[n-1]` is scaled by `10^(k-n) = 10^limit`.
+If such `n` is negative, we clip it to zero so that we will only get `k`.
+We are also limited by the supplied buffer. This limitation is used to print
+the number up to given number of fractional digits without knowing
+the correct `k` beforehand.
+
+We will call the mode of operation requiring `n` as to the *exact* mode,
+and one requiring `limit` as to the *fixed* mode. The exact mode is a subset of
+the fixed mode: the sufficiently large last-digit limitation will eventually fill
+the supplied buffer and let the algorithm to return.
+
+# Implementation overview
+
+It is easy to get the floating point printing correct but slow (Russ Cox has
+[demonstrated](http://research.swtch.com/ftoa) how it's easy), or incorrect but
+fast (naïve division and modulo). But it is surprisingly hard to print
+floating point numbers correctly *and* efficiently.
+
+There are two classes of algorithms widely known to be correct.
+
+- The "Dragon" family of algorithm is first described by Guy L. Steele Jr. and
+ Jon L. White. They rely on the fixed-size big integer for their correctness.
+ A slight improvement was found later, which is posthumously described by
+ Robert G. Burger and R. Kent Dybvig. David Gay's `dtoa.c` routine is
+ a popular implementation of this strategy.
+
+- The "Grisu" family of algorithm is first described by Florian Loitsch.
+ They use very cheap integer-only procedure to determine the close-to-correct
+ representation which is at least guaranteed to be shortest. The variant,
+ Grisu3, actively detects if the resulting representation is incorrect.
+
+We implement both algorithms with necessary tweaks to suit our requirements.
+In particular, published literatures are short of the actual implementation
+difficulties like how to avoid arithmetic overflows. Each implementation,
+available in `strategy::dragon` and `strategy::grisu` respectively,
+extensively describes all necessary justifications and many proofs for them.
+(It is still difficult to follow though. You have been warned.)
+
+Both implementations expose two public functions:
+
+- `format_shortest(decoded, buf)`, which always needs at least
+ `MAX_SIG_DIGITS` digits of buffer. Implements the shortest mode.
+
+- `format_exact(decoded, buf, limit)`, which accepts as small as
+ one digit of buffer. Implements exact and fixed modes.
+
+They try to fill the `u8` buffer with digits and returns the number of digits
+written and the exponent `k`. They are total for all finite `f32` and `f64`
+inputs (Grisu internally falls back to Dragon if necessary).
+
+The rendered digits are formatted into the actual string form with
+four functions:
+
+- `to_shortest_str` prints the shortest representation, which can be padded by
+ zeroes to make *at least* given number of fractional digits.
+
+- `to_shortest_exp_str` prints the shortest representation, which can be
+ padded by zeroes when its exponent is in the specified ranges,
+ or can be printed in the exponential form such as `1.23e45`.
+
+- `to_exact_exp_str` prints the exact representation with given number of
+ digits in the exponential form.
+
+- `to_exact_fixed_str` prints the fixed representation with *exactly*
+ given number of fractional digits.
+
+They all return a slice of preallocated `Part` array, which corresponds to
+the individual part of strings: a fixed string, a part of rendered digits,
+a number of zeroes or a small (`u16`) number. The caller is expected to
+provide a large enough buffer and `Part` array, and to assemble the final
+string from resulting `Part`s itself.
+
+All algorithms and formatting functions are accompanied by extensive tests
+in `coretest::num::flt2dec` module. It also shows how to use individual
+functions.
+
+*/
+
+// while this is extensively documented, this is in principle private which is
+// only made public for testing. do not expose us.
+#![doc(hidden)]
+#![unstable(feature = "flt2dec",
+ reason = "internal routines only exposed for testing",
+ issue = "0")]
+
+use prelude::v1::*;
+use i16;
+pub use self::decoder::{decode, DecodableFloat, FullDecoded, Decoded};
+
+pub mod estimator;
+pub mod decoder;
+
+/// Digit-generation algorithms.
+pub mod strategy {
+ pub mod dragon;
+ pub mod grisu;
+}
+
+/// The minimum size of buffer necessary for the shortest mode.
+///
+/// It is a bit non-trivial to derive, but this is one plus the maximal number of
+/// significant decimal digits from formatting algorithms with the shortest result.
+/// The exact formula is `ceil(# bits in mantissa * log_10 2 + 1)`.
+pub const MAX_SIG_DIGITS: usize = 17;
+
+/// When `d[..n]` contains decimal digits, increase the last digit and propagate carry.
+/// Returns a next digit when it causes the length change.
+#[doc(hidden)]
+pub fn round_up(d: &mut [u8], n: usize) -> Option<u8> {
+ match d[..n].iter().rposition(|&c| c != b'9') {
+ Some(i) => { // d[i+1..n] is all nines
+ d[i] += 1;
+ for j in i+1..n { d[j] = b'0'; }
+ None
+ }
+ None if n > 0 => { // 999..999 rounds to 1000..000 with an increased exponent
+ d[0] = b'1';
+ for j in 1..n { d[j] = b'0'; }
+ Some(b'0')
+ }
+ None => { // an empty buffer rounds up (a bit strange but reasonable)
+ Some(b'1')
+ }
+ }
+}
+
+/// Formatted parts.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Part<'a> {
+ /// Given number of zero digits.
+ Zero(usize),
+ /// A literal number up to 5 digits.
+ Num(u16),
+ /// A verbatim copy of given bytes.
+ Copy(&'a [u8]),
+}
+
+impl<'a> Part<'a> {
+ /// Returns the exact byte length of given part.
+ pub fn len(&self) -> usize {
+ match *self {
+ Part::Zero(nzeroes) => nzeroes,
+ Part::Num(v) => if v < 1_000 { if v < 10 { 1 } else if v < 100 { 2 } else { 3 } }
+ else { if v < 10_000 { 4 } else { 5 } },
+ Part::Copy(buf) => buf.len(),
+ }
+ }
+
+ /// Writes a part into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ let len = self.len();
+ if out.len() >= len {
+ match *self {
+ Part::Zero(nzeroes) => {
+ for c in &mut out[..nzeroes] { *c = b'0'; }
+ }
+ Part::Num(mut v) => {
+ for c in out[..len].iter_mut().rev() {
+ *c = b'0' + (v % 10) as u8;
+ v /= 10;
+ }
+ }
+ Part::Copy(buf) => {
+ out[..buf.len()].copy_from_slice(buf);
+ }
+ }
+ Some(len)
+ } else {
+ None
+ }
+ }
+}
+
+/// Formatted result containing one or more parts.
+/// This can be written to the byte buffer or converted to the allocated string.
+#[allow(missing_debug_implementations)]
+#[derive(Clone)]
+pub struct Formatted<'a> {
+ /// A byte slice representing a sign, either `""`, `"-"` or `"+"`.
+ pub sign: &'static [u8],
+ /// Formatted parts to be rendered after a sign and optional zero padding.
+ pub parts: &'a [Part<'a>],
+}
+
+impl<'a> Formatted<'a> {
+ /// Returns the exact byte length of combined formatted result.
+ pub fn len(&self) -> usize {
+ let mut len = self.sign.len();
+ for part in self.parts {
+ len += part.len();
+ }
+ len
+ }
+
+ /// Writes all formatted parts into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ if out.len() < self.sign.len() { return None; }
+ out[..self.sign.len()].copy_from_slice(self.sign);
+
+ let mut written = self.sign.len();
+ for part in self.parts {
+ match part.write(&mut out[written..]) {
+ Some(len) => { written += len; }
+ None => { return None; }
+ }
+ }
+ Some(written)
+ }
+}
+
+/// Formats given decimal digits `0.<...buf...> * 10^exp` into the decimal form
+/// with at least given number of fractional digits. The result is stored to
+/// the supplied parts array and a slice of written parts is returned.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+fn digits_to_dec_str<'a>(buf: &'a [u8], exp: i16, frac_digits: usize,
+ parts: &'a mut [Part<'a>]) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 4);
+
+ // if there is the restriction on the last digit position, `buf` is assumed to be
+ // left-padded with the virtual zeroes. the number of virtual zeroes, `nzeroes`,
+ // equals to `max(0, exp + frac_digits - buf.len())`, so that the position of
+ // the last digit `exp - buf.len() - nzeroes` is no more than `-frac_digits`:
+ //
+ // |<-virtual->|
+ // |<---- buf ---->| zeroes | exp
+ // 0. 1 2 3 4 5 6 7 8 9 _ _ _ _ _ _ x 10
+ // | | |
+ // 10^exp 10^(exp-buf.len()) 10^(exp-buf.len()-nzeroes)
+ //
+ // `nzeroes` is individually calculated for each case in order to avoid overflow.
+
+ if exp <= 0 {
+ // the decimal point is before rendered digits: [0.][000...000][1234][____]
+ let minus_exp = -(exp as i32) as usize;
+ parts[0] = Part::Copy(b"0.");
+ parts[1] = Part::Zero(minus_exp);
+ parts[2] = Part::Copy(buf);
+ if frac_digits > buf.len() && frac_digits - buf.len() > minus_exp {
+ parts[3] = Part::Zero((frac_digits - buf.len()) - minus_exp);
+ &parts[..4]
+ } else {
+ &parts[..3]
+ }
+ } else {
+ let exp = exp as usize;
+ if exp < buf.len() {
+ // the decimal point is inside rendered digits: [12][.][34][____]
+ parts[0] = Part::Copy(&buf[..exp]);
+ parts[1] = Part::Copy(b".");
+ parts[2] = Part::Copy(&buf[exp..]);
+ if frac_digits > buf.len() - exp {
+ parts[3] = Part::Zero(frac_digits - (buf.len() - exp));
+ &parts[..4]
+ } else {
+ &parts[..3]
+ }
+ } else {
+ // the decimal point is after rendered digits: [1234][____0000] or [1234][__][.][__].
+ parts[0] = Part::Copy(buf);
+ parts[1] = Part::Zero(exp - buf.len());
+ if frac_digits > 0 {
+ parts[2] = Part::Copy(b".");
+ parts[3] = Part::Zero(frac_digits);
+ &parts[..4]
+ } else {
+ &parts[..2]
+ }
+ }
+ }
+}
+
+/// Formats given decimal digits `0.<...buf...> * 10^exp` into the exponential form
+/// with at least given number of significant digits. When `upper` is true,
+/// the exponent will be prefixed by `E`; otherwise that's `e`. The result is
+/// stored to the supplied parts array and a slice of written parts is returned.
+///
+/// `min_digits` can be less than the number of actual significant digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `min_digits` of 0 means that
+/// it will only print given digits and nothing else.
+fn digits_to_exp_str<'a>(buf: &'a [u8], exp: i16, min_ndigits: usize, upper: bool,
+ parts: &'a mut [Part<'a>]) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 6);
+
+ let mut n = 0;
+
+ parts[n] = Part::Copy(&buf[..1]);
+ n += 1;
+
+ if buf.len() > 1 || min_ndigits > 1 {
+ parts[n] = Part::Copy(b".");
+ parts[n + 1] = Part::Copy(&buf[1..]);
+ n += 2;
+ if min_ndigits > buf.len() {
+ parts[n] = Part::Zero(min_ndigits - buf.len());
+ n += 1;
+ }
+ }
+
+ // 0.1234 x 10^exp = 1.234 x 10^(exp-1)
+ let exp = exp as i32 - 1; // avoid underflow when exp is i16::MIN
+ if exp < 0 {
+ parts[n] = Part::Copy(if upper { b"E-" } else { b"e-" });
+ parts[n + 1] = Part::Num(-exp as u16);
+ } else {
+ parts[n] = Part::Copy(if upper { b"E" } else { b"e" });
+ parts[n + 1] = Part::Num(exp as u16);
+ }
+ &parts[..n + 2]
+}
+
+/// Sign formatting options.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Sign {
+ /// Prints `-` only for the negative non-zero values.
+ Minus, // -inf -1 0 0 1 inf nan
+ /// Prints `-` only for any negative values (including the negative zero).
+ MinusRaw, // -inf -1 -0 0 1 inf nan
+ /// Prints `-` for the negative non-zero values, or `+` otherwise.
+ MinusPlus, // -inf -1 +0 +0 +1 +inf nan
+ /// Prints `-` for any negative values (including the negative zero), or `+` otherwise.
+ MinusPlusRaw, // -inf -1 -0 +0 +1 +inf nan
+}
+
+/// Returns the static byte string corresponding to the sign to be formatted.
+/// It can be either `b""`, `b"+"` or `b"-"`.
+fn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static [u8] {
+ match (*decoded, sign) {
+ (FullDecoded::Nan, _) => b"",
+ (FullDecoded::Zero, Sign::Minus) => b"",
+ (FullDecoded::Zero, Sign::MinusRaw) => if negative { b"-" } else { b"" },
+ (FullDecoded::Zero, Sign::MinusPlus) => b"+",
+ (FullDecoded::Zero, Sign::MinusPlusRaw) => if negative { b"-" } else { b"+" },
+ (_, Sign::Minus) | (_, Sign::MinusRaw) => if negative { b"-" } else { b"" },
+ (_, Sign::MinusPlus) | (_, Sign::MinusPlusRaw) => if negative { b"-" } else { b"+" },
+ }
+}
+
+/// Formats given floating point number into the decimal form with at least
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e. `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `v`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 5 parts available, due to the worst case like
+/// `[+][0.][0000][45][0000]` with `frac_digits = 10`.
+pub fn to_shortest_str<'a, T, F>(mut format_shortest: F, v: T,
+ sign: Sign, frac_digits: usize, _upper: bool,
+ buf: &'a mut [u8], parts: &'a mut [Part<'a>]) -> Formatted<'a>
+ where T: DecodableFloat, F: FnMut(&Decoded, &mut [u8]) -> (usize, i16) {
+ assert!(parts.len() >= 4);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = Part::Copy(b"NaN");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = Part::Copy(b"inf");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 { // [0.][0000]
+ parts[0] = Part::Copy(b"0.");
+ parts[1] = Part::Zero(frac_digits);
+ Formatted { sign: sign, parts: &parts[..2] }
+ } else {
+ parts[0] = Part::Copy(b"0");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (len, exp) = format_shortest(decoded, buf);
+ Formatted { sign: sign,
+ parts: digits_to_dec_str(&buf[..len], exp, frac_digits, parts) }
+ }
+ }
+}
+
+/// Formats given floating point number into the decimal form or
+/// the exponential form, depending on the resulting exponent. The result is
+/// stored to the supplied parts array while utilizing given byte buffer
+/// as a scratch. `upper` is used to determine the case of non-finite values
+/// (`inf` and `nan`) or the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// The `dec_bounds` is a tuple `(lo, hi)` such that the number is formatted
+/// as decimal only when `10^lo <= V < 10^hi`. Note that this is the *apparent* `V`
+/// instead of the actual `v`! Thus any printed exponent in the exponential form
+/// cannot be in this range, avoiding any confusion.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 7 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][67]`.
+pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T,
+ sign: Sign, dec_bounds: (i16, i16), upper: bool,
+ buf: &'a mut [u8], parts: &'a mut [Part<'a>]) -> Formatted<'a>
+ where T: DecodableFloat, F: FnMut(&Decoded, &mut [u8]) -> (usize, i16) {
+ assert!(parts.len() >= 6);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(dec_bounds.0 <= dec_bounds.1);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = Part::Copy(b"NaN");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = Part::Copy(b"inf");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Zero => {
+ parts[0] = if dec_bounds.0 <= 0 && 0 < dec_bounds.1 {
+ Part::Copy(b"0")
+ } else {
+ Part::Copy(if upper { b"0E0" } else { b"0e0" })
+ };
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (len, exp) = format_shortest(decoded, buf);
+ let vis_exp = exp as i32 - 1;
+ let parts = if dec_bounds.0 as i32 <= vis_exp && vis_exp < dec_bounds.1 as i32 {
+ digits_to_dec_str(&buf[..len], exp, 0, parts)
+ } else {
+ digits_to_exp_str(&buf[..len], exp, 0, upper, parts)
+ };
+ Formatted { sign: sign, parts: parts }
+ }
+ }
+}
+
+/// Returns rather crude approximation (upper bound) for the maximum buffer size
+/// calculated from the given decoded exponent.
+///
+/// The exact limit is:
+///
+/// - when `exp < 0`, the maximum length is `ceil(log_10 (5^-exp * (2^64 - 1)))`.
+/// - when `exp >= 0`, the maximum length is `ceil(log_10 (2^exp * (2^64 - 1)))`.
+///
+/// `ceil(log_10 (x^exp * (2^64 - 1)))` is less than `ceil(log_10 (2^64 - 1)) +
+/// ceil(exp * log_10 x)`, which is in turn less than `20 + (1 + exp * log_10 x)`.
+/// We use the facts that `log_10 2 < 5/16` and `log_10 5 < 12/16`, which is
+/// enough for our purposes.
+///
+/// Why do we need this? `format_exact` functions will fill the entire buffer
+/// unless limited by the last digit restriction, but it is possible that
+/// the number of digits requested is ridiculously large (say, 30,000 digits).
+/// The vast majority of buffer will be filled with zeroes, so we don't want to
+/// allocate all the buffer beforehand. Consequently, for any given arguments,
+/// 826 bytes of buffer should be sufficient for `f64`. Compare this with
+/// the actual number for the worst case: 770 bytes (when `exp = -1074`).
+fn estimate_max_buf_len(exp: i16) -> usize {
+ 21 + ((if exp < 0 { -12 } else { 5 } * exp as i32) as usize >> 4)
+}
+
+/// Formats given floating point number into the exponential form with
+/// exactly given number of significant digits. The result is stored to
+/// the supplied parts array while utilizing given byte buffer as a scratch.
+/// `upper` is used to determine the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be at least `ndigits` bytes long unless `ndigits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, so 1000 bytes should be enough.)
+/// There should be at least 7 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][67]`.
+pub fn to_exact_exp_str<'a, T, F>(mut format_exact: F, v: T,
+ sign: Sign, ndigits: usize, upper: bool,
+ buf: &'a mut [u8], parts: &'a mut [Part<'a>]) -> Formatted<'a>
+ where T: DecodableFloat, F: FnMut(&Decoded, &mut [u8], i16) -> (usize, i16) {
+ assert!(parts.len() >= 6);
+ assert!(ndigits > 0);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = Part::Copy(b"NaN");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = Part::Copy(b"inf");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Zero => {
+ if ndigits > 1 { // [0.][0000][e0]
+ parts[0] = Part::Copy(b"0.");
+ parts[1] = Part::Zero(ndigits - 1);
+ parts[2] = Part::Copy(if upper { b"E0" } else { b"e0" });
+ Formatted { sign: sign, parts: &parts[..3] }
+ } else {
+ parts[0] = Part::Copy(if upper { b"0E0" } else { b"0e0" });
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= ndigits || buf.len() >= maxlen);
+
+ let trunc = if ndigits < maxlen { ndigits } else { maxlen };
+ let (len, exp) = format_exact(decoded, &mut buf[..trunc], i16::MIN);
+ Formatted { sign: sign,
+ parts: digits_to_exp_str(&buf[..len], exp, ndigits, upper, parts) }
+ }
+ }
+}
+
+/// Formats given floating point number into the decimal form with exactly
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e. `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be enough for the output unless `frac_digits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, and 1000 bytes should be enough.)
+/// There should be at least 5 parts available, due to the worst case like
+/// `[+][0.][0000][45][0000]` with `frac_digits = 10`.
+pub fn to_exact_fixed_str<'a, T, F>(mut format_exact: F, v: T,
+ sign: Sign, frac_digits: usize, _upper: bool,
+ buf: &'a mut [u8], parts: &'a mut [Part<'a>]) -> Formatted<'a>
+ where T: DecodableFloat, F: FnMut(&Decoded, &mut [u8], i16) -> (usize, i16) {
+ assert!(parts.len() >= 4);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = Part::Copy(b"NaN");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = Part::Copy(b"inf");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 { // [0.][0000]
+ parts[0] = Part::Copy(b"0.");
+ parts[1] = Part::Zero(frac_digits);
+ Formatted { sign: sign, parts: &parts[..2] }
+ } else {
+ parts[0] = Part::Copy(b"0");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= maxlen);
+
+ // it *is* possible that `frac_digits` is ridiculously large.
+ // `format_exact` will end rendering digits much earlier in this case,
+ // because we are strictly limited by `maxlen`.
+ let limit = if frac_digits < 0x8000 { -(frac_digits as i16) } else { i16::MIN };
+ let (len, exp) = format_exact(decoded, &mut buf[..maxlen], limit);
+ if exp <= limit {
+ // the restriction couldn't been met, so this should render like zero no matter
+ // `exp` was. this does not include the case that the restriction has been met
+ // only after the final rounding-up; it's a regular case with `exp = limit + 1`.
+ debug_assert_eq!(len, 0);
+ if frac_digits > 0 { // [0.][0000]
+ parts[0] = Part::Copy(b"0.");
+ parts[1] = Part::Zero(frac_digits);
+ Formatted { sign: sign, parts: &parts[..2] }
+ } else {
+ parts[0] = Part::Copy(b"0");
+ Formatted { sign: sign, parts: &parts[..1] }
+ }
+ } else {
+ Formatted { sign: sign,
+ parts: digits_to_dec_str(&buf[..len], exp, frac_digits, parts) }
+ }
+ }
+ }
+}
+
diff --git a/libcore/num/flt2dec/strategy/dragon.rs b/libcore/num/flt2dec/strategy/dragon.rs
new file mode 100644
index 0000000..2d68c3a
--- /dev/null
+++ b/libcore/num/flt2dec/strategy/dragon.rs
@@ -0,0 +1,329 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+Almost direct (but slightly optimized) Rust translation of Figure 3 of [1].
+
+[1] Burger, R. G. and Dybvig, R. K. 1996. Printing floating-point numbers
+ quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116.
+*/
+
+use prelude::v1::*;
+
+use cmp::Ordering;
+
+use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};
+use num::flt2dec::estimator::estimate_scaling_factor;
+use num::bignum::Digit32 as Digit;
+use num::bignum::Big32x40 as Big;
+
+static POW10: [Digit; 10] = [1, 10, 100, 1000, 10000, 100000,
+ 1000000, 10000000, 100000000, 1000000000];
+static TWOPOW10: [Digit; 10] = [2, 20, 200, 2000, 20000, 200000,
+ 2000000, 20000000, 200000000, 2000000000];
+
+// precalculated arrays of `Digit`s for 10^(2^n)
+static POW10TO16: [Digit; 2] = [0x6fc10000, 0x2386f2];
+static POW10TO32: [Digit; 4] = [0, 0x85acef81, 0x2d6d415b, 0x4ee];
+static POW10TO64: [Digit; 7] = [0, 0, 0xbf6a1f01, 0x6e38ed64, 0xdaa797ed, 0xe93ff9f4, 0x184f03];
+static POW10TO128: [Digit; 14] =
+ [0, 0, 0, 0, 0x2e953e01, 0x3df9909, 0xf1538fd, 0x2374e42f, 0xd3cff5ec, 0xc404dc08,
+ 0xbccdb0da, 0xa6337f19, 0xe91f2603, 0x24e];
+static POW10TO256: [Digit; 27] =
+ [0, 0, 0, 0, 0, 0, 0, 0, 0x982e7c01, 0xbed3875b, 0xd8d99f72, 0x12152f87, 0x6bde50c6,
+ 0xcf4a6e70, 0xd595d80f, 0x26b2716e, 0xadc666b0, 0x1d153624, 0x3c42d35a, 0x63ff540e,
+ 0xcc5573c0, 0x65f9ef17, 0x55bc28f2, 0x80dcc7f7, 0xf46eeddc, 0x5fdcefce, 0x553f7];
+
+#[doc(hidden)]
+pub fn mul_pow10(x: &mut Big, n: usize) -> &mut Big {
+ debug_assert!(n < 512);
+ if n & 7 != 0 { x.mul_small(POW10[n & 7]); }
+ if n & 8 != 0 { x.mul_small(POW10[8]); }
+ if n & 16 != 0 { x.mul_digits(&POW10TO16); }
+ if n & 32 != 0 { x.mul_digits(&POW10TO32); }
+ if n & 64 != 0 { x.mul_digits(&POW10TO64); }
+ if n & 128 != 0 { x.mul_digits(&POW10TO128); }
+ if n & 256 != 0 { x.mul_digits(&POW10TO256); }
+ x
+}
+
+fn div_2pow10(x: &mut Big, mut n: usize) -> &mut Big {
+ let largest = POW10.len() - 1;
+ while n > largest {
+ x.div_rem_small(POW10[largest]);
+ n -= largest;
+ }
+ x.div_rem_small(TWOPOW10[n]);
+ x
+}
+
+// only usable when `x < 16 * scale`; `scaleN` should be `scale.mul_small(N)`
+fn div_rem_upto_16<'a>(x: &'a mut Big, scale: &Big,
+ scale2: &Big, scale4: &Big, scale8: &Big) -> (u8, &'a mut Big) {
+ let mut d = 0;
+ if *x >= *scale8 { x.sub(scale8); d += 8; }
+ if *x >= *scale4 { x.sub(scale4); d += 4; }
+ if *x >= *scale2 { x.sub(scale2); d += 2; }
+ if *x >= *scale { x.sub(scale); d += 1; }
+ debug_assert!(*x < *scale);
+ (d, x)
+}
+
+/// The shortest mode implementation for Dragon.
+pub fn format_shortest(d: &Decoded, buf: &mut [u8]) -> (/*#digits*/ usize, /*exp*/ i16) {
+ // the number `v` to format is known to be:
+ // - equal to `mant * 2^exp`;
+ // - preceded by `(mant - 2 * minus) * 2^exp` in the original type; and
+ // - followed by `(mant + 2 * plus) * 2^exp` in the original type.
+ //
+ // obviously, `minus` and `plus` cannot be zero. (for infinities, we use out-of-range values.)
+ // also we assume that at least one digit is generated, i.e. `mant` cannot be zero too.
+ //
+ // this also means that any number between `low = (mant - minus) * 2^exp` and
+ // `high = (mant + plus) * 2^exp` will map to this exact floating point number,
+ // with bounds included when the original mantissa was even (i.e. `!mant_was_odd`).
+
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ // `a.cmp(&b) < rounding` is `if d.inclusive {a <= b} else {a < b}`
+ let rounding = if d.inclusive {Ordering::Greater} else {Ordering::Equal};
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < high <= 10^(k_0+1)`.
+ // the tight bound `k` satisfying `10^(k-1) < high <= 10^k` is calculated later.
+ let mut k = estimate_scaling_factor(d.mant + d.plus, d.exp);
+
+ // convert `{mant, plus, minus} * 2^exp` into the fractional form so that:
+ // - `v = mant / scale`
+ // - `low = (mant - minus) / scale`
+ // - `high = (mant + plus) / scale`
+ let mut mant = Big::from_u64(d.mant);
+ let mut minus = Big::from_u64(d.minus);
+ let mut plus = Big::from_u64(d.plus);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ minus.mul_pow2(d.exp as usize);
+ plus.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant + plus <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ mul_pow10(&mut minus, -k as usize);
+ mul_pow10(&mut plus, -k as usize);
+ }
+
+ // fixup when `mant + plus > scale` (or `>=`).
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // now `scale < mant + plus <= scale * 10` and we are ready to generate digits.
+ //
+ // note that `d[0]` *can* be zero, when `scale - plus < mant < scale`.
+ // in this case rounding-up condition (`up` below) will be triggered immediately.
+ if scale.cmp(mant.clone().add(&plus)) < rounding {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // cache `(2, 4, 8) * scale` for digit generation.
+ let mut scale2 = scale.clone(); scale2.mul_pow2(1);
+ let mut scale4 = scale.clone(); scale4.mul_pow2(2);
+ let mut scale8 = scale.clone(); scale8.mul_pow2(3);
+
+ let mut down;
+ let mut up;
+ let mut i = 0;
+ loop {
+ // invariants, where `d[0..n-1]` are digits generated so far:
+ // - `v = mant / scale * 10^(k-n-1) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n-1)`
+ // - `high - v = plus / scale * 10^(k-n-1)`
+ // - `(mant + plus) / scale <= 10` (thus `mant / scale < 10`)
+ // where `d[i..j]` is a shorthand for `d[i] * 10^(j-i) + ... + d[j-1] * 10 + d[j]`.
+
+ // generate one digit: `d[n] = floor(mant / scale) < 10`.
+ let (d, _) = div_rem_upto_16(&mut mant, &scale, &scale2, &scale4, &scale8);
+ debug_assert!(d < 10);
+ buf[i] = b'0' + d;
+ i += 1;
+
+ // this is a simplified description of the modified Dragon algorithm.
+ // many intermediate derivations and completeness arguments are omitted for convenience.
+ //
+ // start with modified invariants, as we've updated `n`:
+ // - `v = mant / scale * 10^(k-n) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n)`
+ // - `high - v = plus / scale * 10^(k-n)`
+ //
+ // assume that `d[0..n-1]` is the shortest representation between `low` and `high`,
+ // i.e. `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
+ // - `low < d[0..n-1] * 10^(k-n) < high` (bijectivity: digits round to `v`); and
+ // - `abs(v / 10^(k-n) - d[0..n-1]) <= 1/2` (the last digit is correct).
+ //
+ // the second condition simplifies to `2 * mant <= scale`.
+ // solving invariants in terms of `mant`, `low` and `high` yields
+ // a simpler version of the first condition: `-plus < mant < minus`.
+ // since `-plus < 0 <= mant`, we have the correct shortest representation
+ // when `mant < minus` and `2 * mant <= scale`.
+ // (the former becomes `mant <= minus` when the original mantissa is even.)
+ //
+ // when the second doesn't hold (`2 * mant > scale`), we need to increase the last digit.
+ // this is enough for restoring that condition: we already know that
+ // the digit generation guarantees `0 <= v / 10^(k-n) - d[0..n-1] < 1`.
+ // in this case, the first condition becomes `-plus < mant - scale < minus`.
+ // since `mant < scale` after the generation, we have `scale < mant + plus`.
+ // (again, this becomes `scale <= mant + plus` when the original mantissa is even.)
+ //
+ // in short:
+ // - stop and round `down` (keep digits as is) when `mant < minus` (or `<=`).
+ // - stop and round `up` (increase the last digit) when `scale < mant + plus` (or `<=`).
+ // - keep generating otherwise.
+ down = mant.cmp(&minus) < rounding;
+ up = scale.cmp(mant.clone().add(&plus)) < rounding;
+ if down || up { break; } // we have the shortest representation, proceed to the rounding
+
+ // restore the invariants.
+ // this makes the algorithm always terminating: `minus` and `plus` always increases,
+ // but `mant` is clipped modulo `scale` and `scale` is fixed.
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // rounding up happens when
+ // i) only the rounding-up condition was triggered, or
+ // ii) both conditions were triggered and tie breaking prefers rounding up.
+ if up && (!down || *mant.mul_pow2(1) >= scale) {
+ // if rounding up changes the length, the exponent should also change.
+ // it seems that this condition is very hard to satisfy (possibly impossible),
+ // but we are just being safe and consistent here.
+ if let Some(c) = round_up(buf, i) {
+ buf[i] = c;
+ i += 1;
+ k += 1;
+ }
+ }
+
+ (i, k)
+}
+
+/// The exact and fixed mode implementation for Dragon.
+pub fn format_exact(d: &Decoded, buf: &mut [u8], limit: i16) -> (/*#digits*/ usize, /*exp*/ i16) {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < v <= 10^(k_0+1)`.
+ let mut k = estimate_scaling_factor(d.mant, d.exp);
+
+ // `v = mant / scale`.
+ let mut mant = Big::from_u64(d.mant);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ }
+
+ // fixup when `mant + plus >= scale`, where `plus / scale = 10^-buf.len() / 2`.
+ // in order to keep the fixed-size bignum, we actually use `mant + floor(plus) >= scale`.
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // again with the shortest algorithm, `d[0]` can be zero but will be eventually rounded up.
+ if *div_2pow10(&mut scale.clone(), buf.len()).add(&mant) >= scale {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ }
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let mut len = if k < limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ // we return an empty buffer, with an exception of the later rounding-up case
+ // which occurs when `k == limit` and has to produce exactly one digit.
+ 0
+ } else if ((k as i32 - limit as i32) as usize) < buf.len() {
+ (k - limit) as usize
+ } else {
+ buf.len()
+ };
+
+ if len > 0 {
+ // cache `(2, 4, 8) * scale` for digit generation.
+ // (this can be expensive, so do not calculate them when the buffer is empty.)
+ let mut scale2 = scale.clone(); scale2.mul_pow2(1);
+ let mut scale4 = scale.clone(); scale4.mul_pow2(2);
+ let mut scale8 = scale.clone(); scale8.mul_pow2(3);
+
+ for i in 0..len {
+ if mant.is_zero() { // following digits are all zeroes, we stop here
+ // do *not* try to perform rounding! rather, fill remaining digits.
+ for c in &mut buf[i..len] { *c = b'0'; }
+ return (len, k);
+ }
+
+ let mut d = 0;
+ if mant >= scale8 { mant.sub(&scale8); d += 8; }
+ if mant >= scale4 { mant.sub(&scale4); d += 4; }
+ if mant >= scale2 { mant.sub(&scale2); d += 2; }
+ if mant >= scale { mant.sub(&scale); d += 1; }
+ debug_assert!(mant < scale);
+ debug_assert!(d < 10);
+ buf[i] = b'0' + d;
+ mant.mul_small(10);
+ }
+ }
+
+ // rounding up if we stop in the middle of digits
+ // if the following digits are exactly 5000..., check the prior digit and try to
+ // round to even (i.e. avoid rounding up when the prior digit is even).
+ let order = mant.cmp(scale.mul_small(5));
+ if order == Ordering::Greater || (order == Ordering::Equal &&
+ (len == 0 || buf[len-1] & 1 == 1)) {
+ // if rounding up changes the length, the exponent should also change.
+ // but we've been requested a fixed number of digits, so do not alter the buffer...
+ if let Some(c) = round_up(buf, len) {
+ // ...unless we've been requested the fixed precision instead.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `k == limit` (edge case).
+ k += 1;
+ if k > limit && len < buf.len() {
+ buf[len] = c;
+ len += 1;
+ }
+ }
+ }
+
+ (len, k)
+}
diff --git a/libcore/num/flt2dec/strategy/grisu.rs b/libcore/num/flt2dec/strategy/grisu.rs
new file mode 100644
index 0000000..13e01d9
--- /dev/null
+++ b/libcore/num/flt2dec/strategy/grisu.rs
@@ -0,0 +1,696 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+Rust adaptation of Grisu3 algorithm described in [1]. It uses about
+1KB of precomputed table, and in turn, it's very quick for most inputs.
+
+[1] Florian Loitsch. 2010. Printing floating-point numbers quickly and
+ accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243.
+*/
+
+use prelude::v1::*;
+
+use num::diy_float::Fp;
+use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up};
+
+
+// see the comments in `format_shortest_opt` for the rationale.
+#[doc(hidden)] pub const ALPHA: i16 = -60;
+#[doc(hidden)] pub const GAMMA: i16 = -32;
+
+/*
+# the following Python code generates this table:
+for i in xrange(-308, 333, 8):
+ if i >= 0: f = 10**i; e = 0
+ else: f = 2**(80-4*i) // 10**-i; e = 4 * i - 80
+ l = f.bit_length()
+ f = ((f << 64 >> (l-1)) + 1) >> 1; e += l - 64
+ print ' (%#018x, %5d, %4d),' % (f, e, i)
+*/
+
+#[doc(hidden)]
+pub static CACHED_POW10: [(u64, i16, i16); 81] = [ // (f, e, k)
+ (0xe61acf033d1a45df, -1087, -308),
+ (0xab70fe17c79ac6ca, -1060, -300),
+ (0xff77b1fcbebcdc4f, -1034, -292),
+ (0xbe5691ef416bd60c, -1007, -284),
+ (0x8dd01fad907ffc3c, -980, -276),
+ (0xd3515c2831559a83, -954, -268),
+ (0x9d71ac8fada6c9b5, -927, -260),
+ (0xea9c227723ee8bcb, -901, -252),
+ (0xaecc49914078536d, -874, -244),
+ (0x823c12795db6ce57, -847, -236),
+ (0xc21094364dfb5637, -821, -228),
+ (0x9096ea6f3848984f, -794, -220),
+ (0xd77485cb25823ac7, -768, -212),
+ (0xa086cfcd97bf97f4, -741, -204),
+ (0xef340a98172aace5, -715, -196),
+ (0xb23867fb2a35b28e, -688, -188),
+ (0x84c8d4dfd2c63f3b, -661, -180),
+ (0xc5dd44271ad3cdba, -635, -172),
+ (0x936b9fcebb25c996, -608, -164),
+ (0xdbac6c247d62a584, -582, -156),
+ (0xa3ab66580d5fdaf6, -555, -148),
+ (0xf3e2f893dec3f126, -529, -140),
+ (0xb5b5ada8aaff80b8, -502, -132),
+ (0x87625f056c7c4a8b, -475, -124),
+ (0xc9bcff6034c13053, -449, -116),
+ (0x964e858c91ba2655, -422, -108),
+ (0xdff9772470297ebd, -396, -100),
+ (0xa6dfbd9fb8e5b88f, -369, -92),
+ (0xf8a95fcf88747d94, -343, -84),
+ (0xb94470938fa89bcf, -316, -76),
+ (0x8a08f0f8bf0f156b, -289, -68),
+ (0xcdb02555653131b6, -263, -60),
+ (0x993fe2c6d07b7fac, -236, -52),
+ (0xe45c10c42a2b3b06, -210, -44),
+ (0xaa242499697392d3, -183, -36),
+ (0xfd87b5f28300ca0e, -157, -28),
+ (0xbce5086492111aeb, -130, -20),
+ (0x8cbccc096f5088cc, -103, -12),
+ (0xd1b71758e219652c, -77, -4),
+ (0x9c40000000000000, -50, 4),
+ (0xe8d4a51000000000, -24, 12),
+ (0xad78ebc5ac620000, 3, 20),
+ (0x813f3978f8940984, 30, 28),
+ (0xc097ce7bc90715b3, 56, 36),
+ (0x8f7e32ce7bea5c70, 83, 44),
+ (0xd5d238a4abe98068, 109, 52),
+ (0x9f4f2726179a2245, 136, 60),
+ (0xed63a231d4c4fb27, 162, 68),
+ (0xb0de65388cc8ada8, 189, 76),
+ (0x83c7088e1aab65db, 216, 84),
+ (0xc45d1df942711d9a, 242, 92),
+ (0x924d692ca61be758, 269, 100),
+ (0xda01ee641a708dea, 295, 108),
+ (0xa26da3999aef774a, 322, 116),
+ (0xf209787bb47d6b85, 348, 124),
+ (0xb454e4a179dd1877, 375, 132),
+ (0x865b86925b9bc5c2, 402, 140),
+ (0xc83553c5c8965d3d, 428, 148),
+ (0x952ab45cfa97a0b3, 455, 156),
+ (0xde469fbd99a05fe3, 481, 164),
+ (0xa59bc234db398c25, 508, 172),
+ (0xf6c69a72a3989f5c, 534, 180),
+ (0xb7dcbf5354e9bece, 561, 188),
+ (0x88fcf317f22241e2, 588, 196),
+ (0xcc20ce9bd35c78a5, 614, 204),
+ (0x98165af37b2153df, 641, 212),
+ (0xe2a0b5dc971f303a, 667, 220),
+ (0xa8d9d1535ce3b396, 694, 228),
+ (0xfb9b7cd9a4a7443c, 720, 236),
+ (0xbb764c4ca7a44410, 747, 244),
+ (0x8bab8eefb6409c1a, 774, 252),
+ (0xd01fef10a657842c, 800, 260),
+ (0x9b10a4e5e9913129, 827, 268),
+ (0xe7109bfba19c0c9d, 853, 276),
+ (0xac2820d9623bf429, 880, 284),
+ (0x80444b5e7aa7cf85, 907, 292),
+ (0xbf21e44003acdd2d, 933, 300),
+ (0x8e679c2f5e44ff8f, 960, 308),
+ (0xd433179d9c8cb841, 986, 316),
+ (0x9e19db92b4e31ba9, 1013, 324),
+ (0xeb96bf6ebadf77d9, 1039, 332),
+];
+
+#[doc(hidden)] pub const CACHED_POW10_FIRST_E: i16 = -1087;
+#[doc(hidden)] pub const CACHED_POW10_LAST_E: i16 = 1039;
+
+#[doc(hidden)]
+pub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) {
+ let offset = CACHED_POW10_FIRST_E as i32;
+ let range = (CACHED_POW10.len() as i32) - 1;
+ let domain = (CACHED_POW10_LAST_E - CACHED_POW10_FIRST_E) as i32;
+ let idx = ((gamma as i32) - offset) * range / domain;
+ let (f, e, k) = CACHED_POW10[idx as usize];
+ debug_assert!(alpha <= e && e <= gamma);
+ (k, Fp { f: f, e: e })
+}
+
+/// Given `x > 0`, returns `(k, 10^k)` such that `10^k <= x < 10^(k+1)`.
+#[doc(hidden)]
+pub fn max_pow10_no_more_than(x: u32) -> (u8, u32) {
+ debug_assert!(x > 0);
+
+ const X9: u32 = 10_0000_0000;
+ const X8: u32 = 1_0000_0000;
+ const X7: u32 = 1000_0000;
+ const X6: u32 = 100_0000;
+ const X5: u32 = 10_0000;
+ const X4: u32 = 1_0000;
+ const X3: u32 = 1000;
+ const X2: u32 = 100;
+ const X1: u32 = 10;
+
+ if x < X4 {
+ if x < X2 { if x < X1 {(0, 1)} else {(1, X1)} }
+ else { if x < X3 {(2, X2)} else {(3, X3)} }
+ } else {
+ if x < X6 { if x < X5 {(4, X4)} else {(5, X5)} }
+ else if x < X8 { if x < X7 {(6, X6)} else {(7, X7)} }
+ else { if x < X9 {(8, X8)} else {(9, X9)} }
+ }
+}
+
+/// The shortest mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_shortest_opt(d: &Decoded,
+ buf: &mut [u8]) -> Option<(/*#digits*/ usize, /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(d.mant + d.plus < (1 << 61)); // we need at least three bits of additional precision
+
+ // start with the normalized values with the shared exponent
+ let plus = Fp { f: d.mant + d.plus, e: d.exp }.normalize();
+ let minus = Fp { f: d.mant - d.minus, e: d.exp }.normalize_to(plus.e);
+ let v = Fp { f: d.mant, e: d.exp }.normalize_to(plus.e);
+
+ // find any `cached = 10^minusk` such that `ALPHA <= minusk + plus.e + 64 <= GAMMA`.
+ // since `plus` is normalized, this means `2^(62 + ALPHA) <= plus * cached < 2^(64 + GAMMA)`;
+ // given our choices of `ALPHA` and `GAMMA`, this puts `plus * cached` into `[4, 2^32)`.
+ //
+ // it is obviously desirable to maximize `GAMMA - ALPHA`,
+ // so that we don't need many cached powers of 10, but there are some considerations:
+ //
+ // 1. we want to keep `floor(plus * cached)` within `u32` since it needs a costly division.
+ // (this is not really avoidable, remainder is required for accuracy estimation.)
+ // 2. the remainder of `floor(plus * cached)` repeatedly gets multiplied by 10,
+ // and it should not overflow.
+ //
+ // the first gives `64 + GAMMA <= 32`, while the second gives `10 * 2^-ALPHA <= 2^64`;
+ // -60 and -32 is the maximal range with this constraint, and V8 also uses them.
+ let (minusk, cached) = cached_power(ALPHA - plus.e - 64, GAMMA - plus.e - 64);
+
+ // scale fps. this gives the maximal error of 1 ulp (proved from Theorem 5.1).
+ let plus = plus.mul(&cached);
+ let minus = minus.mul(&cached);
+ let v = v.mul(&cached);
+ debug_assert_eq!(plus.e, minus.e);
+ debug_assert_eq!(plus.e, v.e);
+
+ // +- actual range of minus
+ // | <---|---------------------- unsafe region --------------------------> |
+ // | | |
+ // | |<--->| | <--------------- safe region ---------------> | |
+ // | | | | | |
+ // |1 ulp|1 ulp| |1 ulp|1 ulp| |1 ulp|1 ulp|
+ // |<--->|<--->| |<--->|<--->| |<--->|<--->|
+ // |-----|-----|-------...-------|-----|-----|-------...-------|-----|-----|
+ // | minus | | v | | plus |
+ // minus1 minus0 v - 1 ulp v + 1 ulp plus0 plus1
+ //
+ // above `minus`, `v` and `plus` are *quantized* approximations (error < 1 ulp).
+ // as we don't know the error is positive or negative, we use two approximations spaced equally
+ // and have the maximal error of 2 ulps.
+ //
+ // the "unsafe region" is a liberal interval which we initially generate.
+ // the "safe region" is a conservative interval which we only accept.
+ // we start with the correct repr within the unsafe region, and try to find the closest repr
+ // to `v` which is also within the safe region. if we can't, we give up.
+ let plus1 = plus.f + 1;
+// let plus0 = plus.f - 1; // only for explanation
+// let minus0 = minus.f + 1; // only for explanation
+ let minus1 = minus.f - 1;
+ let e = -plus.e as usize; // shared exponent
+
+ // divide `plus1` into integral and fractional parts.
+ // integral parts are guaranteed to fit in u32, since cached power guarantees `plus < 2^32`
+ // and normalized `plus.f` is always less than `2^64 - 2^4` due to the precision requirement.
+ let plus1int = (plus1 >> e) as u32;
+ let plus1frac = plus1 & ((1 << e) - 1);
+
+ // calculate the largest `10^max_kappa` no more than `plus1` (thus `plus1 < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(plus1int);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // Theorem 6.2: if `k` is the greatest integer s.t. `0 <= y mod 10^k <= y - x`,
+ // then `V = floor(y / 10^k) * 10^k` is in `[x, y]` and one of the shortest
+ // representations (with the minimal number of significant digits) in that range.
+ //
+ // find the digit length `kappa` between `(minus1, plus1)` as per Theorem 6.2.
+ // Theorem 6.2 can be adopted to exclude `x` by requiring `y mod 10^k < y - x` instead.
+ // (e.g. `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
+ // the algorithm relies on the later verification phase to exclude `y`.
+ let delta1 = plus1 - minus1;
+// let delta1int = (delta1 >> e) as usize; // only for explanation
+ let delta1frac = delta1 & ((1 << e) - 1);
+
+ // render integral parts, while checking for the accuracy at each step.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = plus1int; // digits yet to be rendered
+ loop { // we always have at least one digit to render, as `plus1 >= 10^kappa`
+ // invariants:
+ // - `delta1int <= remainder < 10^(kappa+1)`
+ // - `plus1int = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = plus1int % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = b'0' + q as u8;
+ i += 1;
+
+ let plus1rem = ((r as u64) << e) + plus1frac; // == (plus1 % 10^kappa) * 2^e
+ if plus1rem < delta1 {
+ // `plus1 % 10^kappa < delta1 = plus1 - minus1`; we've found the correct `kappa`.
+ let ten_kappa = (ten_kappa as u64) << e; // scale 10^kappa back to the shared exponent
+ return round_and_weed(&mut buf[..i], exp, plus1rem, delta1, plus1 - v.f, ten_kappa, 1);
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts, while checking for the accuracy at each step.
+ // this time we rely on repeated multiplications, as division will lose the precision.
+ let mut remainder = plus1frac;
+ let mut threshold = delta1frac;
+ let mut ulp = 1;
+ loop { // the next digit should be significant as we've tested that before breaking out
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `plus1frac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ threshold *= 10;
+ ulp *= 10;
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = b'0' + q as u8;
+ i += 1;
+
+ if r < threshold {
+ let ten_kappa = 1 << e; // implicit divisor
+ return round_and_weed(&mut buf[..i], exp, r, threshold,
+ (plus1 - v.f) * ulp, ten_kappa, ulp);
+ }
+
+ // restore invariants
+ kappa -= 1;
+ remainder = r;
+ }
+
+ // we've generated all significant digits of `plus1`, but not sure if it's the optimal one.
+ // for example, if `minus1` is 3.14153... and `plus1` is 3.14158..., there are 5 different
+ // shortest representation from 3.14154 to 3.14158 but we only have the greatest one.
+ // we have to successively decrease the last digit and check if this is the optimal repr.
+ // there are at most 9 candidates (..1 to ..9), so this is fairly quick. ("rounding" phase)
+ //
+ // the function checks if this "optimal" repr is actually within the ulp ranges,
+ // and also, it is possible that the "second-to-optimal" repr can actually be optimal
+ // due to the rounding error. in either cases this returns `None`. ("weeding" phase)
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (plus1 % 10^kappa) * k`
+ // - `threshold = (plus1 - minus1) * k` (and also, `remainder < threshold`)
+ // - `plus1v = (plus1 - v) * k` (and also, `threshold > plus1v` from prior invariants)
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ fn round_and_weed(buf: &mut [u8], exp: i16, remainder: u64, threshold: u64, plus1v: u64,
+ ten_kappa: u64, ulp: u64) -> Option<(usize, i16)> {
+ assert!(!buf.is_empty());
+
+ // produce two approximations to `v` (actually `plus1 - v`) within 1.5 ulps.
+ // the resulting representation should be the closest representation to both.
+ //
+ // here `plus1 - v` is used since calculations are done with respect to `plus1`
+ // in order to avoid overflow/underflow (hence the seemingly swapped names).
+ let plus1v_down = plus1v + ulp; // plus1 - (v - 1 ulp)
+ let plus1v_up = plus1v - ulp; // plus1 - (v + 1 ulp)
+
+ // decrease the last digit and stop at the closest representation to `v + 1 ulp`.
+ let mut plus1w = remainder; // plus1w(n) = plus1 - w(n)
+ {
+ let last = buf.last_mut().unwrap();
+
+ // we work with the approximated digits `w(n)`, which is initially equal to `plus1 -
+ // plus1 % 10^kappa`. after running the loop body `n` times, `w(n) = plus1 -
+ // plus1 % 10^kappa - n * 10^kappa`. we set `plus1w(n) = plus1 - w(n) =
+ // plus1 % 10^kappa + n * 10^kappa` (thus `remainder = plus1w(0)`) to simplify checks.
+ // note that `plus1w(n)` is always increasing.
+ //
+ // we have three conditions to terminate. any of them will make the loop unable to
+ // proceed, but we then have at least one valid representation known to be closest to
+ // `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
+ //
+ // TC1: `w(n) <= v + 1 ulp`, i.e. this is the last repr that can be the closest one.
+ // this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
+ // combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
+ // overflow on the calculation of `plus1w(n)`.
+ //
+ // TC2: `w(n+1) < minus1`, i.e. the next repr definitely does not round to `v`.
+ // this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
+ // plus1 - minus1 = threshold`. the left hand side can overflow, but we know
+ // `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
+ // threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
+ // `threshold - plus1w(n) < 10^kappa` instead.
+ //
+ // TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e. the next repr is
+ // no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
+ // this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
+ // `z(n) > 0`. we have two cases to consider:
+ //
+ // - when `z(n+1) >= 0`: TC3 becomes `z(n) <= z(n+1)`. as `plus1w(n)` is increasing,
+ // `z(n)` should be decreasing and this is clearly false.
+ // - when `z(n+1) < 0`:
+ // - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
+ // false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
+ // - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e. `plus1v_up - plus1w(n) >=
+ // plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
+ // gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
+ // combined with TC3a.
+ //
+ // consequently, we should stop when `TC1 || TC2 || (TC3a && TC3b)`. the following is
+ // equal to its inverse, `!TC1 && !TC2 && (!TC3a || !TC3b)`.
+ while plus1w < plus1v_up &&
+ threshold - plus1w >= ten_kappa &&
+ (plus1w + ten_kappa < plus1v_up ||
+ plus1v_up - plus1w >= plus1w + ten_kappa - plus1v_up) {
+ *last -= 1;
+ debug_assert!(*last > b'0'); // the shortest repr cannot end with `0`
+ plus1w += ten_kappa;
+ }
+ }
+
+ // check if this representation is also the closest representation to `v - 1 ulp`.
+ //
+ // this is simply same to the terminating conditions for `v + 1 ulp`, with all `plus1v_up`
+ // replaced by `plus1v_down` instead. overflow analysis equally holds.
+ if plus1w < plus1v_down &&
+ threshold - plus1w >= ten_kappa &&
+ (plus1w + ten_kappa < plus1v_down ||
+ plus1v_down - plus1w >= plus1w + ten_kappa - plus1v_down) {
+ return None;
+ }
+
+ // now we have the closest representation to `v` between `plus1` and `minus1`.
+ // this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
+ // i.e. `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
+ // that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
+ if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp {
+ Some((buf.len(), exp))
+ } else {
+ None
+ }
+ }
+}
+
+/// The shortest mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_shortest(d: &Decoded, buf: &mut [u8]) -> (/*#digits*/ usize, /*exp*/ i16) {
+ use num::flt2dec::strategy::dragon::format_shortest as fallback;
+ match format_shortest_opt(d, buf) {
+ Some(ret) => ret,
+ None => fallback(d, buf),
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_exact_opt(d: &Decoded, buf: &mut [u8], limit: i16)
+ -> Option<(/*#digits*/ usize, /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.mant < (1 << 61)); // we need at least three bits of additional precision
+ assert!(!buf.is_empty());
+
+ // normalize and scale `v`.
+ let v = Fp { f: d.mant, e: d.exp }.normalize();
+ let (minusk, cached) = cached_power(ALPHA - v.e - 64, GAMMA - v.e - 64);
+ let v = v.mul(&cached);
+
+ // divide `v` into integral and fractional parts.
+ let e = -v.e as usize;
+ let vint = (v.f >> e) as u32;
+ let vfrac = v.f & ((1 << e) - 1);
+
+ // both old `v` and new `v` (scaled by `10^-k`) has an error of < 1 ulp (Theorem 5.1).
+ // as we don't know the error is positive or negative, we use two approximations
+ // spaced equally and have the maximal error of 2 ulps (same to the shortest case).
+ //
+ // the goal is to find the exactly rounded series of digits that are common to
+ // both `v - 1 ulp` and `v + 1 ulp`, so that we are maximally confident.
+ // if this is not possible, we don't know which one is the correct output for `v`,
+ // so we give up and fall back.
+ //
+ // `err` is defined as `1 ulp * 2^e` here (same to the ulp in `vfrac`),
+ // and we will scale it whenever `v` gets scaled.
+ let mut err = 1;
+
+ // calculate the largest `10^max_kappa` no more than `v` (thus `v < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(vint);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let len = if exp <= limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ //
+ // in principle we can immediately call `possibly_round` with an empty buffer,
+ // but scaling `max_ten_kappa << e` by 10 can result in overflow.
+ // thus we are being sloppy here and widen the error range by a factor of 10.
+ // this will increase the false negative rate, but only very, *very* slightly;
+ // it can only matter noticeably when the mantissa is bigger than 60 bits.
+ return possibly_round(buf, 0, exp, limit, v.f / 10, (max_ten_kappa as u64) << e, err << e);
+ } else if ((exp as i32 - limit as i32) as usize) < buf.len() {
+ (exp - limit) as usize
+ } else {
+ buf.len()
+ };
+ debug_assert!(len > 0);
+
+ // render integral parts.
+ // the error is entirely fractional, so we don't need to check it in this part.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = vint; // digits yet to be rendered
+ loop { // we always have at least one digit to render
+ // invariants:
+ // - `remainder < 10^(kappa+1)`
+ // - `vint = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = vint % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = b'0' + q as u8;
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ let vrem = ((r as u64) << e) + vfrac; // == (v % 10^kappa) * 2^e
+ return possibly_round(buf, len, exp, limit, vrem, (ten_kappa as u64) << e, err << e);
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts.
+ //
+ // in principle we can continue to the last available digit and check for the accuracy.
+ // unfortunately we are working with the finite-sized integers, so we need some criterion
+ // to detect the overflow. V8 uses `remainder > err`, which becomes false when
+ // the first `i` significant digits of `v - 1 ulp` and `v` differ. however this rejects
+ // too many otherwise valid input.
+ //
+ // since the later phase has a correct overflow detection, we instead use tighter criterion:
+ // we continue til `err` exceeds `10^kappa / 2`, so that the range between `v - 1 ulp` and
+ // `v + 1 ulp` definitely contains two or more rounded representations. this is same to
+ // the first two comparisons from `possibly_round`, for the reference.
+ let mut remainder = vfrac;
+ let maxerr = 1 << (e - 1);
+ while err < maxerr {
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `vfrac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+ // - `err = 10^(n-m)`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ err *= 10; // won't overflow, `err * 10 < 2^e * 5 < 2^64`
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = b'0' + q as u8;
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ return possibly_round(buf, len, exp, limit, r, 1 << e, err);
+ }
+
+ // restore invariants
+ remainder = r;
+ }
+
+ // further calculation is useless (`possibly_round` definitely fails), so we give up.
+ return None;
+
+ // we've generated all requested digits of `v`, which should be also same to corresponding
+ // digits of `v - 1 ulp`. now we check if there is a unique representation shared by
+ // both `v - 1 ulp` and `v + 1 ulp`; this can be either same to generated digits, or
+ // to the rounded-up version of those digits. if the range contains multiple representations
+ // of the same length, we cannot be sure and should return `None` instead.
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (v % 10^kappa) * k`
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ fn possibly_round(buf: &mut [u8], mut len: usize, mut exp: i16, limit: i16,
+ remainder: u64, ten_kappa: u64, ulp: u64) -> Option<(usize, i16)> {
+ debug_assert!(remainder < ten_kappa);
+
+ // 10^kappa
+ // : : :<->: :
+ // : : : : :
+ // :|1 ulp|1 ulp| :
+ // :|<--->|<--->| :
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // (for the reference, the dotted line indicates the exact value for
+ // possible representations in given number of digits.)
+ //
+ // error is too large that there are at least three possible representations
+ // between `v - 1 ulp` and `v + 1 ulp`. we cannot determine which one is correct.
+ if ulp >= ten_kappa { return None; }
+
+ // 10^kappa
+ // :<------->:
+ // : :
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // in fact, 1/2 ulp is enough to introduce two possible representations.
+ // (remember that we need a unique representation for both `v - 1 ulp` and `v + 1 ulp`.)
+ // this won't overflow, as `ulp < ten_kappa` from the first check.
+ if ten_kappa - ulp <= ulp { return None; }
+
+ // remainder
+ // :<->| :
+ // : | :
+ // :<--------- 10^kappa ---------->:
+ // | : | :
+ // |1 ulp|1 ulp| :
+ // |<--->|<--->| :
+ // ----|-----|-----|------------------------
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // if `v + 1 ulp` is closer to the rounded-down representation (which is already in `buf`),
+ // then we can safely return. note that `v - 1 ulp` *can* be less than the current
+ // representation, but as `1 ulp < 10^kappa / 2`, this condition is enough:
+ // the distance between `v - 1 ulp` and the current representation
+ // cannot exceed `10^kappa / 2`.
+ //
+ // the condition equals to `remainder + ulp < 10^kappa / 2`.
+ // since this can easily overflow, first check if `remainder < 10^kappa / 2`.
+ // we've already verified that `ulp < 10^kappa / 2`, so as long as
+ // `10^kappa` did not overflow after all, the second check is fine.
+ if ten_kappa - remainder > remainder && ten_kappa - 2 * remainder >= 2 * ulp {
+ return Some((len, exp));
+ }
+
+ // :<------- remainder ------>| :
+ // : | :
+ // :<--------- 10^kappa --------->:
+ // : | | : |
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // -----------------------|-----|-----|-----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // on the other hands, if `v - 1 ulp` is closer to the rounded-up representation,
+ // we should round up and return. for the same reason we don't need to check `v + 1 ulp`.
+ //
+ // the condition equals to `remainder - ulp >= 10^kappa / 2`.
+ // again we first check if `remainder > ulp` (note that this is not `remainder >= ulp`,
+ // as `10^kappa` is never zero). also note that `remainder - ulp <= 10^kappa`,
+ // so the second check does not overflow.
+ if remainder > ulp && ten_kappa - (remainder - ulp) <= remainder - ulp {
+ if let Some(c) = round_up(buf, len) {
+ // only add an additional digit when we've been requested the fixed precision.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `exp == limit` (edge case).
+ exp += 1;
+ if exp > limit && len < buf.len() {
+ buf[len] = c;
+ len += 1;
+ }
+ }
+ return Some((len, exp));
+ }
+
+ // otherwise we are doomed (i.e. some values between `v - 1 ulp` and `v + 1 ulp` are
+ // rounding down and others are rounding up) and give up.
+ None
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_exact(d: &Decoded, buf: &mut [u8], limit: i16) -> (/*#digits*/ usize, /*exp*/ i16) {
+ use num::flt2dec::strategy::dragon::format_exact as fallback;
+ match format_exact_opt(d, buf, limit) {
+ Some(ret) => ret,
+ None => fallback(d, buf, limit),
+ }
+}
diff --git a/libcore/num/i16.rs b/libcore/num/i16.rs
new file mode 100644
index 0000000..1dd8209
--- /dev/null
+++ b/libcore/num/i16.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 16-bit signed integer type.
+//!
+//! *[See also the `i16` primitive type](../../std/primitive.i16.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i16, 16 }
diff --git a/libcore/num/i32.rs b/libcore/num/i32.rs
new file mode 100644
index 0000000..8a21689
--- /dev/null
+++ b/libcore/num/i32.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 32-bit signed integer type.
+//!
+//! *[See also the `i32` primitive type](../../std/primitive.i32.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i32, 32 }
diff --git a/libcore/num/i64.rs b/libcore/num/i64.rs
new file mode 100644
index 0000000..2ce9eb1
--- /dev/null
+++ b/libcore/num/i64.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 64-bit signed integer type.
+//!
+//! *[See also the `i64` primitive type](../../std/primitive.i64.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i64, 64 }
diff --git a/libcore/num/i8.rs b/libcore/num/i8.rs
new file mode 100644
index 0000000..8b5a7f1
--- /dev/null
+++ b/libcore/num/i8.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 8-bit signed integer type.
+//!
+//! *[See also the `i8` primitive type](../../std/primitive.i8.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i8, 8 }
diff --git a/libcore/num/int_macros.rs b/libcore/num/int_macros.rs
new file mode 100644
index 0000000..4234925
--- /dev/null
+++ b/libcore/num/int_macros.rs
@@ -0,0 +1,27 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![doc(hidden)]
+
+macro_rules! int_module { ($T:ty, $bits:expr) => (
+
+// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
+// calling the `Bounded::min_value` function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN: $T = (-1 as $T) << ($bits - 1);
+// FIXME(#9837): Compute MIN like this so the high bits that shouldn't exist are 0.
+// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
+// calling the `Bounded::max_value` function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX: $T = !MIN;
+
+) }
diff --git a/libcore/num/isize.rs b/libcore/num/isize.rs
new file mode 100644
index 0000000..de5b177
--- /dev/null
+++ b/libcore/num/isize.rs
@@ -0,0 +1,20 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The pointer-sized signed integer type.
+//!
+//! *[See also the `isize` primitive type](../../std/primitive.isize.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(target_pointer_width = "32")]
+int_module! { isize, 32 }
+#[cfg(target_pointer_width = "64")]
+int_module! { isize, 64 }
diff --git a/libcore/num/mod.rs b/libcore/num/mod.rs
new file mode 100644
index 0000000..229a864
--- /dev/null
+++ b/libcore/num/mod.rs
@@ -0,0 +1,2516 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Numeric traits and functions for the built-in numeric types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![allow(missing_docs)]
+
+use char::CharExt;
+use cmp::{Eq, PartialOrd};
+use convert::From;
+use fmt;
+use intrinsics;
+use marker::{Copy, Sized};
+use mem::size_of;
+use option::Option::{self, Some, None};
+use result::Result::{self, Ok, Err};
+use str::{FromStr, StrExt};
+use slice::SliceExt;
+
+/// Provides intentionally-wrapped arithmetic on `T`.
+///
+/// Operations like `+` on `u32` values is intended to never overflow,
+/// and in some debug configurations overflow is detected and results
+/// in a panic. While most arithmetic falls into this category, some
+/// code explicitly expects and relies upon modular arithmetic (e.g.,
+/// hashing).
+///
+/// Wrapping arithmetic can be achieved either through methods like
+/// `wrapping_add`, or through the `Wrapping<T>` type, which says that
+/// all standard arithmetic operations on the underlying value are
+/// intended to have wrapping semantics.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Default)]
+pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
+
+mod wrapping;
+
+// All these modules are technically private and only exposed for libcoretest:
+pub mod flt2dec;
+pub mod dec2flt;
+pub mod bignum;
+pub mod diy_float;
+
+/// Types that have a "zero" value.
+///
+/// This trait is intended for use in conjunction with `Add`, as an identity:
+/// `x + T::zero() == x`.
+#[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+pub trait Zero: Sized {
+ /// The "zero" (usually, additive identity) for this type.
+ fn zero() -> Self;
+}
+
+/// Types that have a "one" value.
+///
+/// This trait is intended for use in conjunction with `Mul`, as an identity:
+/// `x * T::one() == x`.
+#[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+pub trait One: Sized {
+ /// The "one" (usually, multiplicative identity) for this type.
+ fn one() -> Self;
+}
+
+macro_rules! zero_one_impl {
+ ($($t:ty)*) => ($(
+ #[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+ impl Zero for $t {
+ #[inline]
+ fn zero() -> Self { 0 }
+ }
+ #[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+ impl One for $t {
+ #[inline]
+ fn one() -> Self { 1 }
+ }
+ )*)
+}
+zero_one_impl! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
+
+macro_rules! zero_one_impl_float {
+ ($($t:ty)*) => ($(
+ #[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+ impl Zero for $t {
+ #[inline]
+ fn zero() -> Self { 0.0 }
+ }
+ #[unstable(feature = "zero_one",
+ reason = "unsure of placement, wants to use associated constants",
+ issue = "27739")]
+ impl One for $t {
+ #[inline]
+ fn one() -> Self { 1.0 }
+ }
+ )*)
+}
+zero_one_impl_float! { f32 f64 }
+
+macro_rules! checked_op {
+ ($U:ty, $op:path, $x:expr, $y:expr) => {{
+ let (result, overflowed) = unsafe { $op($x as $U, $y as $U) };
+ if overflowed { None } else { Some(result as Self) }
+ }}
+}
+
+// `Int` + `SignedInt` implemented for signed integers
+macro_rules! int_impl {
+ ($ActualT:ident, $UnsignedT:ty, $BITS:expr,
+ $add_with_overflow:path,
+ $sub_with_overflow:path,
+ $mul_with_overflow:path) => {
+ /// Returns the smallest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub const fn min_value() -> Self {
+ (-1 as Self) << ($BITS - 1)
+ }
+
+ /// Returns the largest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub const fn max_value() -> Self {
+ !Self::min_value()
+ }
+
+ /// Converts a string slice in a given base to an integer.
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(u32::from_str_radix("A", 16), Ok(10));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_zeros(), 5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+
+ /// Returns the number of leading zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.leading_zeros(), 10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn leading_zeros(self) -> u32 {
+ (self as $UnsignedT).leading_zeros()
+ }
+
+ /// Returns the number of trailing zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn trailing_zeros(self) -> u32 {
+ (self as $UnsignedT).trailing_zeros()
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0x3456789ABCDEF012u64;
+ ///
+ /// assert_eq!(n.rotate_left(12), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rotate_left(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_left(n) as Self
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xDEF0123456789ABCu64;
+ ///
+ /// assert_eq!(n.rotate_right(12), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rotate_right(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_right(n) as Self
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xEFCDAB8967452301u64;
+ ///
+ /// assert_eq!(n.swap_bytes(), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn swap_bytes(self) -> Self {
+ (self as $UnsignedT).swap_bytes() as Self
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(u64::from_be(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_be(n), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn from_be(x: Self) -> Self {
+ if cfg!(target_endian = "big") { x } else { x.swap_bytes() }
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(u64::from_le(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_le(n), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn from_le(x: Self) -> Self {
+ if cfg!(target_endian = "little") { x } else { x.swap_bytes() }
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_be(self) -> Self { // or not to be?
+ if cfg!(target_endian = "big") { self } else { self.swap_bytes() }
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_le(self) -> Self {
+ if cfg!(target_endian = "little") { self } else { self.swap_bytes() }
+ }
+
+ /// Checked integer addition. Computes `self + other`, returning `None`
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(5u16.checked_add(65530), Some(65535));
+ /// assert_eq!(6u16.checked_add(65530), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_add(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer subtraction. Computes `self - other`, returning
+ /// `None` if underflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!((-127i8).checked_sub(1), Some(-128));
+ /// assert_eq!((-128i8).checked_sub(1), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_sub(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer multiplication. Computes `self * other`, returning
+ /// `None` if underflow or overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(5u8.checked_mul(51), Some(255));
+ /// assert_eq!(5u8.checked_mul(52), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_mul(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer division. Computes `self / other`, returning `None`
+ /// if `other == 0` or the operation results in underflow or overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!((-127i8).checked_div(-1), Some(127));
+ /// assert_eq!((-128i8).checked_div(-1), None);
+ /// assert_eq!((1i8).checked_div(0), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_div(self, other: Self) -> Option<Self> {
+ if other == 0 {
+ None
+ } else {
+ let (a, b) = self.overflowing_div(other);
+ if b {None} else {Some(a)}
+ }
+ }
+
+ /// Checked integer remainder. Computes `self % other`, returning `None`
+ /// if `other == 0` or the operation results in underflow or overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.checked_rem(2), Some(1));
+ /// assert_eq!(5i32.checked_rem(0), None);
+ /// assert_eq!(i32::MIN.checked_rem(-1), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_rem(self, other: Self) -> Option<Self> {
+ if other == 0 {
+ None
+ } else {
+ let (a, b) = self.overflowing_rem(other);
+ if b {None} else {Some(a)}
+ }
+ }
+
+ /// Checked negation. Computes `-self`, returning `None` if `self ==
+ /// MIN`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.checked_neg(), Some(-5));
+ /// assert_eq!(i32::MIN.checked_neg(), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0x10i32.checked_shl(4), Some(0x100));
+ /// assert_eq!(0x10i32.checked_shl(33), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0x10i32.checked_shr(4), Some(0x1));
+ /// assert_eq!(0x10i32.checked_shr(33), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if b {None} else {Some(a)}
+ }
+
+ /// Saturating integer addition. Computes `self + other`, saturating at
+ /// the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.saturating_add(1), 101);
+ /// assert_eq!(100i8.saturating_add(127), 127);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn saturating_add(self, other: Self) -> Self {
+ match self.checked_add(other) {
+ Some(x) => x,
+ None if other >= Self::zero() => Self::max_value(),
+ None => Self::min_value(),
+ }
+ }
+
+ /// Saturating integer subtraction. Computes `self - other`, saturating
+ /// at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.saturating_sub(127), -27);
+ /// assert_eq!((-100i8).saturating_sub(127), -128);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn saturating_sub(self, other: Self) -> Self {
+ match self.checked_sub(other) {
+ Some(x) => x,
+ None if other >= Self::zero() => Self::min_value(),
+ None => Self::max_value(),
+ }
+ }
+
+ /// Saturating integer multiplication. Computes `self * other`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(100i32.saturating_mul(127), 12700);
+ /// assert_eq!((1i32 << 23).saturating_mul(1 << 23), i32::MAX);
+ /// assert_eq!((-1i32 << 23).saturating_mul(1 << 23), i32::MIN);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn saturating_mul(self, other: Self) -> Self {
+ self.checked_mul(other).unwrap_or_else(|| {
+ if (self < 0 && other < 0) || (self > 0 && other > 0) {
+ Self::max_value()
+ } else {
+ Self::min_value()
+ }
+ })
+ }
+
+ /// Wrapping (modular) addition. Computes `self + other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_add(27), 127);
+ /// assert_eq!(100i8.wrapping_add(127), -29);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_add(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) subtraction. Computes `self - other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0i8.wrapping_sub(127), -127);
+ /// assert_eq!((-2i8).wrapping_sub(127), 127);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_sub(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) multiplication. Computes `self *
+ /// other`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(10i8.wrapping_mul(12), 120);
+ /// assert_eq!(11i8.wrapping_mul(12), -124);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_mul(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_mul(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) division. Computes `self / other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one
+ /// divides `MIN / -1` on a signed type (where `MIN` is the
+ /// negative minimal value for the type); this is equivalent
+ /// to `-MIN`, a positive value that is too large to represent
+ /// in the type. In such a case, this function returns `MIN`
+ /// itself.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.wrapping_div(10), 10);
+ /// assert_eq!((-128i8).wrapping_div(-1), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_div(self, rhs: Self) -> Self {
+ self.overflowing_div(rhs).0
+ }
+
+ /// Wrapping (modular) remainder. Computes `self % other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Such wrap-around never actually occurs mathematically;
+ /// implementation artifacts make `x % y` invalid for `MIN /
+ /// -1` on a signed type (where `MIN` is the negative
+ /// minimal value). In such a case, this function returns `0`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_rem(10), 0);
+ /// assert_eq!((-128i8).wrapping_rem(-1), 0);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_rem(self, rhs: Self) -> Self {
+ self.overflowing_rem(rhs).0
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// The only case where such wrapping can occur is when one
+ /// negates `MIN` on a signed type (where `MIN` is the
+ /// negative minimal value for the type); this is a positive
+ /// value that is too large to represent in the type. In such
+ /// a case, this function returns `MIN` itself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_neg(self) -> Self {
+ self.overflowing_neg().0
+ }
+
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-left; the
+ /// RHS of a wrapping shift-left is restricted to the range
+ /// of the type, rather than the bits shifted out of the LHS
+ /// being returned to the other end. The primitive integer
+ /// types all implement a `rotate_left` function, which may
+ /// be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(1u8.wrapping_shl(7), 128);
+ /// assert_eq!(1u8.wrapping_shl(8), 1);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_shl(self, rhs: u32) -> Self {
+ self.overflowing_shl(rhs).0
+ }
+
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// Note that this is *not* the same as a rotate-right; the
+ /// RHS of a wrapping shift-right is restricted to the range
+ /// of the type, rather than the bits shifted out of the LHS
+ /// being returned to the other end. The primitive integer
+ /// types all implement a `rotate_right` function, which may
+ /// be what you want instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(128u8.wrapping_shr(7), 1);
+ /// assert_eq!(128u8.wrapping_shr(8), 128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_shr(self, rhs: u32) -> Self {
+ self.overflowing_shr(rhs).0
+ }
+
+ /// Calculates `self` + `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.overflowing_add(2), (7, false));
+ /// assert_eq!(i32::MAX.overflowing_add(1), (i32::MIN, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $add_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates `self` - `rhs`
+ ///
+ /// Returns a tuple of the subtraction along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.overflowing_sub(2), (3, false));
+ /// assert_eq!(i32::MIN.overflowing_sub(1), (i32::MAX, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $sub_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates the multiplication of `self` and `rhs`.
+ ///
+ /// Returns a tuple of the multiplication along with a boolean
+ /// indicating whether an arithmetic overflow would occur. If an
+ /// overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(5i32.overflowing_mul(2), (10, false));
+ /// assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $mul_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates the divisor when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// occur then self is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.overflowing_div(2), (2, false));
+ /// assert_eq!(i32::MIN.overflowing_div(-1), (i32::MIN, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ if self == Self::min_value() && rhs == -1 {
+ (self, true)
+ } else {
+ (self / rhs, false)
+ }
+ }
+
+ /// Calculates the remainder when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the remainder after dividing along with a boolean
+ /// indicating whether an arithmetic overflow would occur. If an
+ /// overflow would occur then 0 is returned.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(5i32.overflowing_rem(2), (1, false));
+ /// assert_eq!(i32::MIN.overflowing_rem(-1), (0, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ if self == Self::min_value() && rhs == -1 {
+ (0, true)
+ } else {
+ (self % rhs, false)
+ }
+ }
+
+ /// Negates self, overflowing if this is equal to the minimum value.
+ ///
+ /// Returns a tuple of the negated version of self along with a boolean
+ /// indicating whether an overflow happened. If `self` is the minimum
+ /// value (e.g. `i32::MIN` for values of type `i32`), then the minimum
+ /// value will be returned again and `true` will be returned for an
+ /// overflow happening.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::i32;
+ ///
+ /// assert_eq!(2i32.overflowing_neg(), (-2, false));
+ /// assert_eq!(i32::MIN.overflowing_neg(), (i32::MIN, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_neg(self) -> (Self, bool) {
+ if self == Self::min_value() {
+ (Self::min_value(), true)
+ } else {
+ (-self, false)
+ }
+ }
+
+ /// Shifts self left by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(0x10i32.overflowing_shl(4), (0x100, false));
+ /// assert_eq!(0x10i32.overflowing_shl(36), (0x100, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1)))
+ }
+
+ /// Shifts self right by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(0x10i32.overflowing_shr(4), (0x1, false));
+ /// assert_eq!(0x10i32.overflowing_shr(36), (0x1, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1)))
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: i32 = 2; // or any other integer type
+ ///
+ /// assert_eq!(x.pow(4), 16);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ pub fn pow(self, mut exp: u32) -> Self {
+ let mut base = self;
+ let mut acc = Self::one();
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc * base;
+ }
+ exp /= 2;
+ base = base * base;
+ }
+
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ if exp == 1 {
+ acc = acc * base;
+ }
+
+ acc
+ }
+
+ /// Computes the absolute value of `self`.
+ ///
+ /// # Overflow behavior
+ ///
+ /// The absolute value of `i32::min_value()` cannot be represented as an
+ /// `i32`, and attempting to calculate it will cause an overflow. This
+ /// means that code in debug mode will trigger a panic on this case and
+ /// optimized code will return `i32::min_value()` without a panic.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(10i8.abs(), 10);
+ /// assert_eq!((-10i8).abs(), 10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ pub fn abs(self) -> Self {
+ if self.is_negative() {
+ // Note that the #[inline] above means that the overflow
+ // semantics of this negation depend on the crate we're being
+ // inlined into.
+ -self
+ } else {
+ self
+ }
+ }
+
+ /// Returns a number representing sign of `self`.
+ ///
+ /// - `0` if the number is zero
+ /// - `1` if the number is positive
+ /// - `-1` if the number is negative
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(10i8.signum(), 1);
+ /// assert_eq!(0i8.signum(), 0);
+ /// assert_eq!((-10i8).signum(), -1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn signum(self) -> Self {
+ match self {
+ n if n > 0 => 1,
+ 0 => 0,
+ _ => -1,
+ }
+ }
+
+ /// Returns `true` if `self` is positive and `false` if the number
+ /// is zero or negative.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(10i8.is_positive());
+ /// assert!(!(-10i8).is_positive());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_positive(self) -> bool { self > 0 }
+
+ /// Returns `true` if `self` is negative and `false` if the number
+ /// is zero or positive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!((-10i8).is_negative());
+ /// assert!(!10i8.is_negative());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_negative(self) -> bool { self < 0 }
+ }
+}
+
+#[lang = "i8"]
+impl i8 {
+ int_impl! { i8, u8, 8,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "i16"]
+impl i16 {
+ int_impl! { i16, u16, 16,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "i32"]
+impl i32 {
+ int_impl! { i32, u32, 32,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "i64"]
+impl i64 {
+ int_impl! { i64, u64, 64,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[cfg(target_pointer_width = "32")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { i32, u32, 32,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { i64, u64, 64,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+// `Int` + `UnsignedInt` implemented for unsigned integers
+macro_rules! uint_impl {
+ ($ActualT:ty, $BITS:expr,
+ $ctpop:path,
+ $ctlz:path,
+ $cttz:path,
+ $bswap:path,
+ $add_with_overflow:path,
+ $sub_with_overflow:path,
+ $mul_with_overflow:path) => {
+ /// Returns the smallest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub const fn min_value() -> Self { 0 }
+
+ /// Returns the largest value that can be represented by this integer type.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub const fn max_value() -> Self { !0 }
+
+ /// Converts a string slice in a given base to an integer.
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Arguments
+ ///
+ /// * src - A string slice
+ /// * radix - The base to use. Must lie in the range [2 .. 36]
+ ///
+ /// # Return value
+ ///
+ /// `Err(ParseIntError)` if the string did not represent a valid number.
+ /// Otherwise, `Ok(n)` where `n` is the integer represented by `src`.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+
+ /// Returns the number of ones in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_ones(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn count_ones(self) -> u32 {
+ unsafe { $ctpop(self as $ActualT) as u32 }
+ }
+
+ /// Returns the number of zeros in the binary representation of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b01001100u8;
+ ///
+ /// assert_eq!(n.count_zeros(), 5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+
+ /// Returns the number of leading zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.leading_zeros(), 10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn leading_zeros(self) -> u32 {
+ unsafe { $ctlz(self as $ActualT) as u32 }
+ }
+
+ /// Returns the number of trailing zeros in the binary representation
+ /// of `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0b0101000u16;
+ ///
+ /// assert_eq!(n.trailing_zeros(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn trailing_zeros(self) -> u32 {
+ // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
+ // emits two conditional moves on x86_64. By promoting the value to
+ // u16 and setting bit 8, we get better code without any conditional
+ // operations.
+ // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284)
+ // pending, remove this workaround once LLVM generates better code
+ // for cttz8.
+ unsafe {
+ if $BITS == 8 {
+ intrinsics::cttz(self as u16 | 0x100) as u32
+ } else {
+ intrinsics::cttz(self) as u32
+ }
+ }
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0x3456789ABCDEF012u64;
+ ///
+ /// assert_eq!(n.rotate_left(12), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rotate_left(self, n: u32) -> Self {
+ // Protect against undefined behaviour for over-long bit shifts
+ let n = n % $BITS;
+ (self << n) | (self >> (($BITS - n) % $BITS))
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xDEF0123456789ABCu64;
+ ///
+ /// assert_eq!(n.rotate_right(12), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rotate_right(self, n: u32) -> Self {
+ // Protect against undefined behaviour for over-long bit shifts
+ let n = n % $BITS;
+ (self >> n) | (self << (($BITS - n) % $BITS))
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ /// let m = 0xEFCDAB8967452301u64;
+ ///
+ /// assert_eq!(n.swap_bytes(), m);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn swap_bytes(self) -> Self {
+ unsafe { $bswap(self as $ActualT) as Self }
+ }
+
+ /// Converts an integer from big endian to the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(u64::from_be(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_be(n), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn from_be(x: Self) -> Self {
+ if cfg!(target_endian = "big") { x } else { x.swap_bytes() }
+ }
+
+ /// Converts an integer from little endian to the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(u64::from_le(n), n)
+ /// } else {
+ /// assert_eq!(u64::from_le(n), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn from_le(x: Self) -> Self {
+ if cfg!(target_endian = "little") { x } else { x.swap_bytes() }
+ }
+
+ /// Converts `self` to big endian from the target's endianness.
+ ///
+ /// On big endian this is a no-op. On little endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(n.to_be(), n)
+ /// } else {
+ /// assert_eq!(n.to_be(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_be(self) -> Self { // or not to be?
+ if cfg!(target_endian = "big") { self } else { self.swap_bytes() }
+ }
+
+ /// Converts `self` to little endian from the target's endianness.
+ ///
+ /// On little endian this is a no-op. On big endian the bytes are
+ /// swapped.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 0x0123456789ABCDEFu64;
+ ///
+ /// if cfg!(target_endian = "little") {
+ /// assert_eq!(n.to_le(), n)
+ /// } else {
+ /// assert_eq!(n.to_le(), n.swap_bytes())
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_le(self) -> Self {
+ if cfg!(target_endian = "little") { self } else { self.swap_bytes() }
+ }
+
+ /// Checked integer addition. Computes `self + other`, returning `None`
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(5u16.checked_add(65530), Some(65535));
+ /// assert_eq!(6u16.checked_add(65530), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_add(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer subtraction. Computes `self - other`, returning
+ /// `None` if underflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(1u8.checked_sub(1), Some(0));
+ /// assert_eq!(0u8.checked_sub(1), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_sub(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer multiplication. Computes `self * other`, returning
+ /// `None` if underflow or overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(5u8.checked_mul(51), Some(255));
+ /// assert_eq!(5u8.checked_mul(52), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_mul(self, other: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(other);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked integer division. Computes `self / other`, returning `None`
+ /// if `other == 0` or the operation results in underflow or overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(128u8.checked_div(2), Some(64));
+ /// assert_eq!(1u8.checked_div(0), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn checked_div(self, other: Self) -> Option<Self> {
+ match other {
+ 0 => None,
+ other => Some(self / other),
+ }
+ }
+
+ /// Checked integer remainder. Computes `self % other`, returning `None`
+ /// if `other == 0` or the operation results in underflow or overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(5u32.checked_rem(2), Some(1));
+ /// assert_eq!(5u32.checked_rem(0), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_rem(self, other: Self) -> Option<Self> {
+ if other == 0 {
+ None
+ } else {
+ Some(self % other)
+ }
+ }
+
+ /// Checked negation. Computes `-self`, returning `None` unless `self ==
+ /// 0`.
+ ///
+ /// Note that negating any positive integer will overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0u32.checked_neg(), Some(0));
+ /// assert_eq!(1u32.checked_neg(), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0x10u32.checked_shl(4), Some(0x100));
+ /// assert_eq!(0x10u32.checked_shl(33), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if b {None} else {Some(a)}
+ }
+
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(0x10u32.checked_shr(4), Some(0x1));
+ /// assert_eq!(0x10u32.checked_shr(33), None);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if b {None} else {Some(a)}
+ }
+
+ /// Saturating integer addition. Computes `self + other`, saturating at
+ /// the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.saturating_add(1), 101);
+ /// assert_eq!(200u8.saturating_add(127), 255);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn saturating_add(self, other: Self) -> Self {
+ match self.checked_add(other) {
+ Some(x) => x,
+ None => Self::max_value(),
+ }
+ }
+
+ /// Saturating integer subtraction. Computes `self - other`, saturating
+ /// at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.saturating_sub(27), 73);
+ /// assert_eq!(13u8.saturating_sub(127), 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn saturating_sub(self, other: Self) -> Self {
+ match self.checked_sub(other) {
+ Some(x) => x,
+ None => Self::min_value(),
+ }
+ }
+
+ /// Saturating integer multiplication. Computes `self * other`,
+ /// saturating at the numeric bounds instead of overflowing.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::u32;
+ ///
+ /// assert_eq!(100u32.saturating_mul(127), 12700);
+ /// assert_eq!((1u32 << 23).saturating_mul(1 << 23), u32::MAX);
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[inline]
+ pub fn saturating_mul(self, other: Self) -> Self {
+ self.checked_mul(other).unwrap_or(Self::max_value())
+ }
+
+ /// Wrapping (modular) addition. Computes `self + other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(200u8.wrapping_add(55), 255);
+ /// assert_eq!(200u8.wrapping_add(155), 99);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_add(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_add(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) subtraction. Computes `self - other`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.wrapping_sub(100), 0);
+ /// assert_eq!(100u8.wrapping_sub(155), 201);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_sub(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_sub(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) multiplication. Computes `self *
+ /// other`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(10u8.wrapping_mul(12), 120);
+ /// assert_eq!(25u8.wrapping_mul(12), 44);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn wrapping_mul(self, rhs: Self) -> Self {
+ unsafe {
+ intrinsics::overflowing_mul(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) division. Computes `self / other`.
+ /// Wrapped division on unsigned types is just normal division.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.wrapping_div(10), 10);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_div(self, rhs: Self) -> Self {
+ self / rhs
+ }
+
+ /// Wrapping (modular) remainder. Computes `self % other`.
+ /// Wrapped remainder calculation on unsigned types is
+ /// just the regular remainder calculation.
+ /// There's no way wrapping could ever happen.
+ /// This function exists, so that all operations
+ /// are accounted for in the wrapping operations.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_rem(10), 0);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(100u8.wrapping_neg(), 156);
+ /// assert_eq!(0u8.wrapping_neg(), 0);
+ /// assert_eq!(180u8.wrapping_neg(), 76);
+ /// assert_eq!(180u8.wrapping_neg(), (127 + 1) - (180u8 - (127 + 1)));
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_neg(self) -> Self {
+ self.overflowing_neg().0
+ }
+
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(1u8.wrapping_shl(7), 128);
+ /// assert_eq!(1u8.wrapping_shl(8), 1);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_shl(self, rhs: u32) -> Self {
+ self.overflowing_shl(rhs).0
+ }
+
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high-order bits of `rhs` that
+ /// would cause the shift to exceed the bitwidth of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(128u8.wrapping_shr(7), 1);
+ /// assert_eq!(128u8.wrapping_shr(8), 128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[inline(always)]
+ pub fn wrapping_shr(self, rhs: u32) -> Self {
+ self.overflowing_shr(rhs).0
+ }
+
+ /// Calculates `self` + `rhs`
+ ///
+ /// Returns a tuple of the addition along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::u32;
+ ///
+ /// assert_eq!(5u32.overflowing_add(2), (7, false));
+ /// assert_eq!(u32::MAX.overflowing_add(1), (0, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $add_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates `self` - `rhs`
+ ///
+ /// Returns a tuple of the subtraction along with a boolean indicating
+ /// whether an arithmetic overflow would occur. If an overflow would
+ /// have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// use std::u32;
+ ///
+ /// assert_eq!(5u32.overflowing_sub(2), (3, false));
+ /// assert_eq!(0u32.overflowing_sub(1), (u32::MAX, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $sub_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates the multiplication of `self` and `rhs`.
+ ///
+ /// Returns a tuple of the multiplication along with a boolean
+ /// indicating whether an arithmetic overflow would occur. If an
+ /// overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(5u32.overflowing_mul(2), (10, false));
+ /// assert_eq!(1_000_000_000u32.overflowing_mul(10), (1410065408, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ unsafe {
+ let (a, b) = $mul_with_overflow(self as $ActualT,
+ rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates the divisor when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the divisor along with a boolean indicating
+ /// whether an arithmetic overflow would occur. Note that for unsigned
+ /// integers overflow never occurs, so the second value is always
+ /// `false`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(5u32.overflowing_div(2), (2, false));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ (self / rhs, false)
+ }
+
+ /// Calculates the remainder when `self` is divided by `rhs`.
+ ///
+ /// Returns a tuple of the remainder after dividing along with a boolean
+ /// indicating whether an arithmetic overflow would occur. Note that for
+ /// unsigned integers overflow never occurs, so the second value is
+ /// always `false`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `rhs` is 0.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(5u32.overflowing_rem(2), (1, false));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ (self % rhs, false)
+ }
+
+ /// Negates self in an overflowing fashion.
+ ///
+ /// Returns `!self + 1` using wrapping operations to return the value
+ /// that represents the negation of this unsigned value. Note that for
+ /// positive unsigned values overflow always occurs, but negating 0 does
+ /// not overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(0u32.overflowing_neg(), (0, false));
+ /// assert_eq!(2u32.overflowing_neg(), (-2i32 as u32, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_neg(self) -> (Self, bool) {
+ ((!self).wrapping_add(1), self != 0)
+ }
+
+ /// Shifts self left by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(0x10u32.overflowing_shl(4), (0x100, false));
+ /// assert_eq!(0x10u32.overflowing_shl(36), (0x100, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1)))
+ }
+
+ /// Shifts self right by `rhs` bits.
+ ///
+ /// Returns a tuple of the shifted version of self along with a boolean
+ /// indicating whether the shift value was larger than or equal to the
+ /// number of bits. If the shift value is too large, then value is
+ /// masked (N-1) where N is the number of bits, and this value is then
+ /// used to perform the shift.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// assert_eq!(0x10u32.overflowing_shr(4), (0x1, false));
+ /// assert_eq!(0x10u32.overflowing_shr(36), (0x1, true));
+ /// ```
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1)))
+ }
+
+ /// Raises self to the power of `exp`, using exponentiation by squaring.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(2u32.pow(4), 16);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ pub fn pow(self, mut exp: u32) -> Self {
+ let mut base = self;
+ let mut acc = Self::one();
+
+ let mut prev_base = self;
+ let mut base_oflo = false;
+ while exp > 0 {
+ if (exp & 1) == 1 {
+ if base_oflo {
+ // ensure overflow occurs in the same manner it
+ // would have otherwise (i.e. signal any exception
+ // it would have otherwise).
+ acc = acc * (prev_base * prev_base);
+ } else {
+ acc = acc * base;
+ }
+ }
+ prev_base = base;
+ let (new_base, new_base_oflo) = base.overflowing_mul(base);
+ base = new_base;
+ base_oflo = new_base_oflo;
+ exp /= 2;
+ }
+ acc
+ }
+
+ /// Returns `true` if and only if `self == 2^k` for some `k`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(16u8.is_power_of_two());
+ /// assert!(!10u8.is_power_of_two());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_power_of_two(self) -> bool {
+ (self.wrapping_sub(Self::one())) & self == Self::zero() &&
+ !(self == Self::zero())
+ }
+
+ /// Returns the smallest power of two greater than or equal to `self`.
+ /// Unspecified behavior on overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(2u8.next_power_of_two(), 2);
+ /// assert_eq!(3u8.next_power_of_two(), 4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn next_power_of_two(self) -> Self {
+ let bits = size_of::<Self>() * 8;
+ let one: Self = Self::one();
+ one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits)
+ }
+
+ /// Returns the smallest power of two greater than or equal to `n`. If
+ /// the next power of two is greater than the type's maximum value,
+ /// `None` is returned, otherwise the power of two is wrapped in `Some`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!(2u8.checked_next_power_of_two(), Some(2));
+ /// assert_eq!(3u8.checked_next_power_of_two(), Some(4));
+ /// assert_eq!(200u8.checked_next_power_of_two(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn checked_next_power_of_two(self) -> Option<Self> {
+ let npot = self.next_power_of_two();
+ if npot >= self {
+ Some(npot)
+ } else {
+ None
+ }
+ }
+ }
+}
+
+#[lang = "u8"]
+impl u8 {
+ uint_impl! { u8, 8,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "u16"]
+impl u16 {
+ uint_impl! { u16, 16,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "u32"]
+impl u32 {
+ uint_impl! { u32, 32,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[lang = "u64"]
+impl u64 {
+ uint_impl! { u64, 64,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[cfg(target_pointer_width = "32")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { u32, 32,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { u64, 64,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
+/// A classification of floating point numbers.
+///
+/// This `enum` is used as the return type for [`f32::classify()`] and [`f64::classify()`]. See
+/// their documentation for more.
+///
+/// [`f32::classify()`]: ../../std/primitive.f32.html#method.classify
+/// [`f64::classify()`]: ../../std/primitive.f64.html#method.classify
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum FpCategory {
+ /// "Not a Number", often obtained by dividing by zero
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Nan,
+
+ /// Positive or negative infinity
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Infinite ,
+
+ /// Positive or negative zero
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Zero,
+
+ /// De-normalized floating point representation (less precise than `Normal`)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Subnormal,
+
+ /// A regular floating point number
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Normal,
+}
+
+/// A built-in floating point number.
+#[doc(hidden)]
+#[unstable(feature = "core_float",
+ reason = "stable interface is via `impl f{32,64}` in later crates",
+ issue = "32110")]
+pub trait Float: Sized {
+ /// Returns the NaN value.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn nan() -> Self;
+ /// Returns the infinite value.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn infinity() -> Self;
+ /// Returns the negative infinite value.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn neg_infinity() -> Self;
+ /// Returns -0.0.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn neg_zero() -> Self;
+ /// Returns 0.0.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn zero() -> Self;
+ /// Returns 1.0.
+ #[unstable(feature = "float_extras", reason = "needs removal",
+ issue = "27752")]
+ fn one() -> Self;
+
+ /// Returns true if this value is NaN and false otherwise.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_nan(self) -> bool;
+ /// Returns true if this value is positive infinity or negative infinity and
+ /// false otherwise.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_infinite(self) -> bool;
+ /// Returns true if this number is neither infinite nor NaN.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_finite(self) -> bool;
+ /// Returns true if this number is neither zero, infinite, denormal, or NaN.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_normal(self) -> bool;
+ /// Returns the category that this number falls into.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn classify(self) -> FpCategory;
+
+ /// Returns the mantissa, exponent and sign as integers, respectively.
+ #[unstable(feature = "float_extras", reason = "signature is undecided",
+ issue = "27752")]
+ fn integer_decode(self) -> (u64, i16, i8);
+
+ /// Computes the absolute value of `self`. Returns `Float::nan()` if the
+ /// number is `Float::nan()`.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn abs(self) -> Self;
+ /// Returns a number that represents the sign of `self`.
+ ///
+ /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
+ /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
+ /// - `Float::nan()` if the number is `Float::nan()`
+ #[stable(feature = "core", since = "1.6.0")]
+ fn signum(self) -> Self;
+
+ /// Returns `true` if `self` is positive, including `+0.0` and
+ /// `Float::infinity()`.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_sign_positive(self) -> bool;
+ /// Returns `true` if `self` is negative, including `-0.0` and
+ /// `Float::neg_infinity()`.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn is_sign_negative(self) -> bool;
+
+ /// Take the reciprocal (inverse) of a number, `1/x`.
+ #[stable(feature = "core", since = "1.6.0")]
+ fn recip(self) -> Self;
+
+ /// Raise a number to an integer power.
+ ///
+ /// Using this function is generally faster than using `powf`
+ #[stable(feature = "core", since = "1.6.0")]
+ fn powi(self, n: i32) -> Self;
+
+ /// Convert radians to degrees.
+ #[unstable(feature = "float_extras", reason = "desirability is unclear",
+ issue = "27752")]
+ fn to_degrees(self) -> Self;
+ /// Convert degrees to radians.
+ #[unstable(feature = "float_extras", reason = "desirability is unclear",
+ issue = "27752")]
+ fn to_radians(self) -> Self;
+}
+
+macro_rules! from_str_radix_int_impl {
+ ($($t:ty)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseIntError;
+ fn from_str(src: &str) -> Result<Self, ParseIntError> {
+ from_str_radix(src, 10)
+ }
+ }
+ )*}
+}
+from_str_radix_int_impl! { isize i8 i16 i32 i64 usize u8 u16 u32 u64 }
+
+#[doc(hidden)]
+trait FromStrRadixHelper: PartialOrd + Copy {
+ fn min_value() -> Self;
+ fn from_u32(u: u32) -> Self;
+ fn checked_mul(&self, other: u32) -> Option<Self>;
+ fn checked_sub(&self, other: u32) -> Option<Self>;
+ fn checked_add(&self, other: u32) -> Option<Self>;
+}
+
+macro_rules! doit {
+ ($($t:ty)*) => ($(impl FromStrRadixHelper for $t {
+ fn min_value() -> Self { Self::min_value() }
+ fn from_u32(u: u32) -> Self { u as Self }
+ fn checked_mul(&self, other: u32) -> Option<Self> {
+ Self::checked_mul(*self, other as Self)
+ }
+ fn checked_sub(&self, other: u32) -> Option<Self> {
+ Self::checked_sub(*self, other as Self)
+ }
+ fn checked_add(&self, other: u32) -> Option<Self> {
+ Self::checked_add(*self, other as Self)
+ }
+ })*)
+}
+doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize }
+
+fn from_str_radix<T: FromStrRadixHelper>(src: &str, radix: u32)
+ -> Result<T, ParseIntError> {
+ use self::IntErrorKind::*;
+ use self::ParseIntError as PIE;
+
+ assert!(radix >= 2 && radix <= 36,
+ "from_str_radix_int: must lie in the range `[2, 36]` - found {}",
+ radix);
+
+ if src.is_empty() {
+ return Err(PIE { kind: Empty });
+ }
+
+ let is_signed_ty = T::from_u32(0) > T::min_value();
+
+ // all valid digits are ascii, so we will just iterate over the utf8 bytes
+ // and cast them to chars. .to_digit() will safely return None for anything
+ // other than a valid ascii digit for the given radix, including the first-byte
+ // of multi-byte sequences
+ let src = src.as_bytes();
+
+ let (is_positive, digits) = match src[0] {
+ b'+' => (true, &src[1..]),
+ b'-' if is_signed_ty => (false, &src[1..]),
+ _ => (true, src)
+ };
+
+ if digits.is_empty() {
+ return Err(PIE { kind: Empty });
+ }
+
+ let mut result = T::from_u32(0);
+ if is_positive {
+ // The number is positive
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
+ Some(x) => x,
+ None => return Err(PIE { kind: InvalidDigit }),
+ };
+ result = match result.checked_mul(radix) {
+ Some(result) => result,
+ None => return Err(PIE { kind: Overflow }),
+ };
+ result = match result.checked_add(x) {
+ Some(result) => result,
+ None => return Err(PIE { kind: Overflow }),
+ };
+ }
+ } else {
+ // The number is negative
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
+ Some(x) => x,
+ None => return Err(PIE { kind: InvalidDigit }),
+ };
+ result = match result.checked_mul(radix) {
+ Some(result) => result,
+ None => return Err(PIE { kind: Underflow }),
+ };
+ result = match result.checked_sub(x) {
+ Some(result) => result,
+ None => return Err(PIE { kind: Underflow }),
+ };
+ }
+ }
+ Ok(result)
+}
+
+/// An error which can be returned when parsing an integer.
+///
+/// This error is used as the error type for the `from_str_radix()` functions
+/// on the primitive integer types, such as [`i8::from_str_radix()`].
+///
+/// [`i8::from_str_radix()`]: ../../std/primitive.i8.html#method.from_str_radix
+#[derive(Debug, Clone, PartialEq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseIntError { kind: IntErrorKind }
+
+#[derive(Debug, Clone, PartialEq)]
+enum IntErrorKind {
+ Empty,
+ InvalidDigit,
+ Overflow,
+ Underflow,
+}
+
+impl ParseIntError {
+ #[unstable(feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "0")]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ IntErrorKind::Empty => "cannot parse integer from empty string",
+ IntErrorKind::InvalidDigit => "invalid digit found in string",
+ IntErrorKind::Overflow => "number too large to fit in target type",
+ IntErrorKind::Underflow => "number too small to fit in target type",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseIntError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use num::dec2flt::ParseFloatError;
+
+// Conversion traits for primitive integer and float types
+// Conversions T -> T are covered by a blanket impl and therefore excluded
+// Some conversions from and to usize/isize are not implemented due to portability concerns
+macro_rules! impl_from {
+ ($Small: ty, $Large: ty) => {
+ #[stable(feature = "lossless_prim_conv", since = "1.5.0")]
+ impl From<$Small> for $Large {
+ #[inline]
+ fn from(small: $Small) -> $Large {
+ small as $Large
+ }
+ }
+ }
+}
+
+// Unsigned -> Unsigned
+impl_from! { u8, u16 }
+impl_from! { u8, u32 }
+impl_from! { u8, u64 }
+impl_from! { u8, usize }
+impl_from! { u16, u32 }
+impl_from! { u16, u64 }
+impl_from! { u32, u64 }
+
+// Signed -> Signed
+impl_from! { i8, i16 }
+impl_from! { i8, i32 }
+impl_from! { i8, i64 }
+impl_from! { i8, isize }
+impl_from! { i16, i32 }
+impl_from! { i16, i64 }
+impl_from! { i32, i64 }
+
+// Unsigned -> Signed
+impl_from! { u8, i16 }
+impl_from! { u8, i32 }
+impl_from! { u8, i64 }
+impl_from! { u16, i32 }
+impl_from! { u16, i64 }
+impl_from! { u32, i64 }
+
+// Note: integers can only be represented with full precision in a float if
+// they fit in the significand, which is 24 bits in f32 and 53 bits in f64.
+// Lossy float conversions are not implemented at this time.
+
+// Signed -> Float
+impl_from! { i8, f32 }
+impl_from! { i8, f64 }
+impl_from! { i16, f32 }
+impl_from! { i16, f64 }
+impl_from! { i32, f64 }
+
+// Unsigned -> Float
+impl_from! { u8, f32 }
+impl_from! { u8, f64 }
+impl_from! { u16, f32 }
+impl_from! { u16, f64 }
+impl_from! { u32, f64 }
+
+// Float -> Float
+impl_from! { f32, f64 }
diff --git a/libcore/num/u16.rs b/libcore/num/u16.rs
new file mode 100644
index 0000000..d34d87c
--- /dev/null
+++ b/libcore/num/u16.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 16-bit unsigned integer type.
+//!
+//! *[See also the `u16` primitive type](../../std/primitive.u16.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+uint_module! { u16, 16 }
diff --git a/libcore/num/u32.rs b/libcore/num/u32.rs
new file mode 100644
index 0000000..f9c9099
--- /dev/null
+++ b/libcore/num/u32.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 32-bit unsigned integer type.
+//!
+//! *[See also the `u32` primitive type](../../std/primitive.u32.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+uint_module! { u32, 32 }
diff --git a/libcore/num/u64.rs b/libcore/num/u64.rs
new file mode 100644
index 0000000..8dfe433
--- /dev/null
+++ b/libcore/num/u64.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 64-bit unsigned integer type.
+//!
+//! *[See also the `u64` primitive type](../../std/primitive.u64.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+uint_module! { u64, 64 }
diff --git a/libcore/num/u8.rs b/libcore/num/u8.rs
new file mode 100644
index 0000000..0106ee8
--- /dev/null
+++ b/libcore/num/u8.rs
@@ -0,0 +1,17 @@
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The 8-bit unsigned integer type.
+//!
+//! *[See also the `u8` primitive type](../../std/primitive.u8.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+uint_module! { u8, 8 }
diff --git a/libcore/num/uint_macros.rs b/libcore/num/uint_macros.rs
new file mode 100644
index 0000000..6479836
--- /dev/null
+++ b/libcore/num/uint_macros.rs
@@ -0,0 +1,22 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![doc(hidden)]
+
+macro_rules! uint_module { ($T:ty, $bits:expr) => (
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MIN: $T = 0 as $T;
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(missing_docs)]
+pub const MAX: $T = !0 as $T;
+
+) }
diff --git a/libcore/num/usize.rs b/libcore/num/usize.rs
new file mode 100644
index 0000000..0c7d16a
--- /dev/null
+++ b/libcore/num/usize.rs
@@ -0,0 +1,20 @@
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The pointer-sized unsigned integer type.
+//!
+//! *[See also the `usize` primitive type](../../std/primitive.usize.html).*
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(target_pointer_width = "32")]
+uint_module! { usize, 32 }
+#[cfg(target_pointer_width = "64")]
+uint_module! { usize, 64 }
diff --git a/libcore/num/wrapping.rs b/libcore/num/wrapping.rs
new file mode 100644
index 0000000..e28a36a
--- /dev/null
+++ b/libcore/num/wrapping.rs
@@ -0,0 +1,309 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::Wrapping;
+
+use ops::*;
+
+macro_rules! sh_impl_signed {
+ ($t:ident, $f:ident) => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shr((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl ShlAssign<$f> for Wrapping<$t> {
+ #[inline(always)]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shl((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl ShrAssign<$f> for Wrapping<$t> {
+ #[inline(always)]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ )
+}
+
+macro_rules! sh_impl_unsigned {
+ ($t:ident, $f:ident) => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl ShlAssign<$f> for Wrapping<$t> {
+ #[inline(always)]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl ShrAssign<$f> for Wrapping<$t> {
+ #[inline(always)]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ )
+}
+
+// FIXME (#23545): uncomment the remaining impls
+macro_rules! sh_impl_all {
+ ($($t:ident)*) => ($(
+ //sh_impl_unsigned! { $t, u8 }
+ //sh_impl_unsigned! { $t, u16 }
+ //sh_impl_unsigned! { $t, u32 }
+ //sh_impl_unsigned! { $t, u64 }
+ sh_impl_unsigned! { $t, usize }
+
+ //sh_impl_signed! { $t, i8 }
+ //sh_impl_signed! { $t, i16 }
+ //sh_impl_signed! { $t, i32 }
+ //sh_impl_signed! { $t, i64 }
+ //sh_impl_signed! { $t, isize }
+ )*)
+}
+
+sh_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
+
+// FIXME(30524): impl Op<T> for Wrapping<T>, impl OpAssign<T> for Wrapping<T>
+macro_rules! wrapping_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Add for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn add(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_add(other.0))
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl AddAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn add_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self + other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Sub for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn sub(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_sub(other.0))
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl SubAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn sub_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self - other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Mul for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn mul(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_mul(other.0))
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl MulAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn mul_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self * other;
+ }
+ }
+
+ #[stable(feature = "wrapping_div", since = "1.3.0")]
+ impl Div for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn div(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_div(other.0))
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl DivAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn div_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self / other;
+ }
+ }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl Rem for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn rem(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_rem(other.0))
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl RemAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn rem_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self % other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Not for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn not(self) -> Wrapping<$t> {
+ Wrapping(!self.0)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitXor for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn bitxor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 ^ other.0)
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitXorAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn bitxor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self ^ other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitOr for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn bitor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 | other.0)
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitOrAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn bitor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self | other;
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitAnd for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline(always)]
+ fn bitand(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 & other.0)
+ }
+ }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitAndAssign for Wrapping<$t> {
+ #[inline(always)]
+ fn bitand_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self & other;
+ }
+ }
+ )*)
+}
+
+wrapping_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
+
+mod shift_max {
+ #![allow(non_upper_case_globals)]
+
+ #[cfg(target_pointer_width = "32")]
+ mod platform {
+ pub const usize: u32 = super::u32;
+ pub const isize: u32 = super::i32;
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ mod platform {
+ pub const usize: u32 = super::u64;
+ pub const isize: u32 = super::i64;
+ }
+
+ pub const i8: u32 = (1 << 3) - 1;
+ pub const i16: u32 = (1 << 4) - 1;
+ pub const i32: u32 = (1 << 5) - 1;
+ pub const i64: u32 = (1 << 6) - 1;
+ pub use self::platform::isize;
+
+ pub const u8: u32 = i8;
+ pub const u16: u32 = i16;
+ pub const u32: u32 = i32;
+ pub const u64: u32 = i64;
+ pub use self::platform::usize;
+}