aboutsummaryrefslogtreecommitdiff
path: root/libcore
diff options
context:
space:
mode:
Diffstat (limited to 'libcore')
-rw-r--r--libcore/Cargo.toml5
-rw-r--r--libcore/build.rs14
-rw-r--r--libcore/cell.rs177
-rw-r--r--libcore/char.rs96
-rw-r--r--libcore/clone.rs37
-rw-r--r--libcore/cmp.rs168
-rw-r--r--libcore/convert.rs57
-rw-r--r--libcore/default.rs138
-rw-r--r--libcore/fmt/mod.rs32
-rw-r--r--libcore/fmt/num.rs4
-rw-r--r--libcore/hash/mod.rs30
-rw-r--r--libcore/intrinsics.rs19
-rw-r--r--libcore/iter/iterator.rs5
-rw-r--r--libcore/iter/mod.rs22
-rw-r--r--libcore/lib.rs3
-rw-r--r--libcore/macros.rs2
-rw-r--r--libcore/marker.rs39
-rw-r--r--libcore/mem.rs15
-rw-r--r--libcore/num/bignum.rs2
-rw-r--r--libcore/num/dec2flt/algorithm.rs89
-rw-r--r--libcore/num/int_macros.rs11
-rw-r--r--libcore/num/isize.rs2
-rw-r--r--libcore/num/mod.rs133
-rw-r--r--libcore/num/uint_macros.rs6
-rw-r--r--libcore/num/usize.rs2
-rw-r--r--libcore/num/wrapping.rs15
-rw-r--r--libcore/ops.rs11
-rw-r--r--libcore/raw.rs76
-rw-r--r--libcore/slice.rs28
-rw-r--r--libcore/str/mod.rs135
-rw-r--r--libcore/sync/atomic.rs1125
31 files changed, 1348 insertions, 1150 deletions
diff --git a/libcore/Cargo.toml b/libcore/Cargo.toml
index 98f941f..3b406ac 100644
--- a/libcore/Cargo.toml
+++ b/libcore/Cargo.toml
@@ -2,9 +2,12 @@
authors = ["The Rust Project Developers"]
name = "core"
version = "0.0.0"
-build = "build.rs"
[lib]
name = "core"
path = "lib.rs"
test = false
+
+[[test]]
+name = "coretest"
+path = "../libcoretest/lib.rs"
diff --git a/libcore/build.rs b/libcore/build.rs
deleted file mode 100644
index a991ac0..0000000
--- a/libcore/build.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- // Remove this whenever snapshots and rustbuild nightlies are synced.
- println!("cargo:rustc-cfg=cargobuild");
-}
diff --git a/libcore/cell.rs b/libcore/cell.rs
index 257027d..2700f01 100644
--- a/libcore/cell.rs
+++ b/libcore/cell.rs
@@ -145,7 +145,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use clone::Clone;
-use cmp::{PartialEq, Eq};
+use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering};
use default::Default;
use marker::{Copy, Send, Sync, Sized, Unsize};
use ops::{Deref, DerefMut, Drop, FnOnce, CoerceUnsized};
@@ -232,6 +232,18 @@ impl<T:Copy> Cell<T> {
pub fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
&self.value
}
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `Cell` mutably (at compile-time) which guarantees
+ /// that we possess the only reference.
+ #[inline]
+ #[unstable(feature = "cell_get_mut", issue = "33444")]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe {
+ &mut *self.value.get()
+ }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -267,6 +279,42 @@ impl<T:PartialEq + Copy> PartialEq for Cell<T> {
#[stable(feature = "cell_eq", since = "1.2.0")]
impl<T:Eq + Copy> Eq for Cell<T> {}
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T:PartialOrd + Copy> PartialOrd for Cell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Cell<T>) -> Option<Ordering> {
+ self.get().partial_cmp(&other.get())
+ }
+
+ #[inline]
+ fn lt(&self, other: &Cell<T>) -> bool {
+ self.get() < other.get()
+ }
+
+ #[inline]
+ fn le(&self, other: &Cell<T>) -> bool {
+ self.get() <= other.get()
+ }
+
+ #[inline]
+ fn gt(&self, other: &Cell<T>) -> bool {
+ self.get() > other.get()
+ }
+
+ #[inline]
+ fn ge(&self, other: &Cell<T>) -> bool {
+ self.get() >= other.get()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T:Ord + Copy> Ord for Cell<T> {
+ #[inline]
+ fn cmp(&self, other: &Cell<T>) -> Ordering {
+ self.get().cmp(&other.get())
+ }
+}
+
/// A mutable memory location with dynamically checked borrow rules
///
/// See the [module-level documentation](index.html) for more.
@@ -455,6 +503,18 @@ impl<T: ?Sized> RefCell<T> {
pub unsafe fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
&self.value
}
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `RefCell` mutably (at compile-time) so there is no
+ /// need for dynamic checks.
+ #[inline]
+ #[unstable(feature = "cell_get_mut", issue="33444")]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe {
+ &mut *self.value.get()
+ }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -490,6 +550,42 @@ impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
#[stable(feature = "cell_eq", since = "1.2.0")]
impl<T: ?Sized + Eq> Eq for RefCell<T> {}
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &RefCell<T>) -> Option<Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+
+ #[inline]
+ fn lt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() < *other.borrow()
+ }
+
+ #[inline]
+ fn le(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() <= *other.borrow()
+ }
+
+ #[inline]
+ fn gt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() > *other.borrow()
+ }
+
+ #[inline]
+ fn ge(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() >= *other.borrow()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + Ord> Ord for RefCell<T> {
+ #[inline]
+ fn cmp(&self, other: &RefCell<T>) -> Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
struct BorrowRef<'b> {
borrow: &'b Cell<BorrowFlag>,
}
@@ -522,7 +618,9 @@ impl<'b> Clone for BorrowRef<'b> {
// Since this Ref exists, we know the borrow flag
// is not set to WRITING.
let borrow = self.borrow.get();
- debug_assert!(borrow != WRITING && borrow != UNUSED);
+ debug_assert!(borrow != UNUSED);
+ // Prevent the borrow counter from overflowing.
+ assert!(borrow != WRITING);
self.borrow.set(borrow + 1);
BorrowRef { borrow: self.borrow }
}
@@ -596,40 +694,6 @@ impl<'b, T: ?Sized> Ref<'b, T> {
borrow: orig.borrow,
}
}
-
- /// Make a new `Ref` for an optional component of the borrowed data, e.g. an
- /// enum variant.
- ///
- /// The `RefCell` is already immutably borrowed, so this cannot fail.
- ///
- /// This is an associated function that needs to be used as
- /// `Ref::filter_map(...)`. A method would interfere with methods of the
- /// same name on the contents of a `RefCell` used through `Deref`.
- ///
- /// # Example
- ///
- /// ```
- /// # #![feature(cell_extras)]
- /// use std::cell::{RefCell, Ref};
- ///
- /// let c = RefCell::new(Ok(5));
- /// let b1: Ref<Result<u32, ()>> = c.borrow();
- /// let b2: Ref<u32> = Ref::filter_map(b1, |o| o.as_ref().ok()).unwrap();
- /// assert_eq!(*b2, 5)
- /// ```
- #[unstable(feature = "cell_extras", reason = "recently added",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0", reason = "can be built on `Ref::map`: \
- https://crates.io/crates/ref_filter_map")]
- #[inline]
- pub fn filter_map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Option<Ref<'b, U>>
- where F: FnOnce(&T) -> Option<&U>
- {
- f(orig.value).map(move |new| Ref {
- value: new,
- borrow: orig.borrow,
- })
- }
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
@@ -669,47 +733,6 @@ impl<'b, T: ?Sized> RefMut<'b, T> {
borrow: orig.borrow,
}
}
-
- /// Make a new `RefMut` for an optional component of the borrowed data, e.g.
- /// an enum variant.
- ///
- /// The `RefCell` is already mutably borrowed, so this cannot fail.
- ///
- /// This is an associated function that needs to be used as
- /// `RefMut::filter_map(...)`. A method would interfere with methods of the
- /// same name on the contents of a `RefCell` used through `Deref`.
- ///
- /// # Example
- ///
- /// ```
- /// # #![feature(cell_extras)]
- /// use std::cell::{RefCell, RefMut};
- ///
- /// let c = RefCell::new(Ok(5));
- /// {
- /// let b1: RefMut<Result<u32, ()>> = c.borrow_mut();
- /// let mut b2: RefMut<u32> = RefMut::filter_map(b1, |o| {
- /// o.as_mut().ok()
- /// }).unwrap();
- /// assert_eq!(*b2, 5);
- /// *b2 = 42;
- /// }
- /// assert_eq!(*c.borrow(), Ok(42));
- /// ```
- #[unstable(feature = "cell_extras", reason = "recently added",
- issue = "27746")]
- #[rustc_deprecated(since = "1.8.0", reason = "can be built on `RefMut::map`: \
- https://crates.io/crates/ref_filter_map")]
- #[inline]
- pub fn filter_map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> Option<RefMut<'b, U>>
- where F: FnOnce(&mut T) -> Option<&mut U>
- {
- let RefMut { value, borrow } = orig;
- f(value).map(move |new| RefMut {
- value: new,
- borrow: borrow,
- })
- }
}
struct BorrowRefMut<'b> {
diff --git a/libcore/char.rs b/libcore/char.rs
index 1404038..d80b456 100644
--- a/libcore/char.rs
+++ b/libcore/char.rs
@@ -15,11 +15,9 @@
#![allow(non_snake_case)]
#![stable(feature = "core_char", since = "1.2.0")]
-use iter::Iterator;
+use prelude::v1::*;
+
use mem::transmute;
-use option::Option::{None, Some};
-use option::Option;
-use slice::SliceExt;
// UTF-8 ranges and tags for encoding characters
const TAG_CONT: u8 = 0b1000_0000;
@@ -413,14 +411,17 @@ pub struct EscapeUnicode {
hex_digit_idx: usize,
}
+// The enum values are ordered so that their representation is the
+// same as the remaining length (besides the hexadecimal digits). This
+// likely makes `len()` a single load from memory) and inline-worth.
#[derive(Clone, Debug)]
enum EscapeUnicodeState {
- Backslash,
- Type,
- LeftBrace,
- Value,
- RightBrace,
Done,
+ RightBrace,
+ Value,
+ LeftBrace,
+ Type,
+ Backslash,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -459,18 +460,44 @@ impl Iterator for EscapeUnicode {
}
}
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- let n = match self.state {
- EscapeUnicodeState::Backslash => 5,
- EscapeUnicodeState::Type => 4,
- EscapeUnicodeState::LeftBrace => 3,
- EscapeUnicodeState::Value => 2,
- EscapeUnicodeState::RightBrace => 1,
- EscapeUnicodeState::Done => 0,
- };
- let n = n + self.hex_digit_idx;
+ let n = self.len();
(n, Some(n))
}
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn last(self) -> Option<char> {
+ match self.state {
+ EscapeUnicodeState::Done => None,
+
+ EscapeUnicodeState::RightBrace |
+ EscapeUnicodeState::Value |
+ EscapeUnicodeState::LeftBrace |
+ EscapeUnicodeState::Type |
+ EscapeUnicodeState::Backslash => Some('}'),
+ }
+ }
+}
+
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeUnicode {
+ #[inline]
+ fn len(&self) -> usize {
+ // The match is a single memory access with no branching
+ self.hex_digit_idx + match self.state {
+ EscapeUnicodeState::Done => 0,
+ EscapeUnicodeState::RightBrace => 1,
+ EscapeUnicodeState::Value => 2,
+ EscapeUnicodeState::LeftBrace => 3,
+ EscapeUnicodeState::Type => 4,
+ EscapeUnicodeState::Backslash => 5,
+ }
+ }
}
/// An iterator that yields the literal escape code of a `char`.
@@ -488,9 +515,9 @@ pub struct EscapeDefault {
#[derive(Clone, Debug)]
enum EscapeDefaultState {
- Backslash(char),
- Char(char),
Done,
+ Char(char),
+ Backslash(char),
Unicode(EscapeUnicode),
}
@@ -513,22 +540,15 @@ impl Iterator for EscapeDefault {
}
}
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- match self.state {
- EscapeDefaultState::Char(_) => (1, Some(1)),
- EscapeDefaultState::Backslash(_) => (2, Some(2)),
- EscapeDefaultState::Unicode(ref iter) => iter.size_hint(),
- EscapeDefaultState::Done => (0, Some(0)),
- }
+ let n = self.len();
+ (n, Some(n))
}
+ #[inline]
fn count(self) -> usize {
- match self.state {
- EscapeDefaultState::Char(_) => 1,
- EscapeDefaultState::Unicode(iter) => iter.count(),
- EscapeDefaultState::Done => 0,
- EscapeDefaultState::Backslash(_) => 2,
- }
+ self.len()
}
fn nth(&mut self, n: usize) -> Option<char> {
@@ -568,6 +588,18 @@ impl Iterator for EscapeDefault {
}
}
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeDefault {
+ fn len(&self) -> usize {
+ match self.state {
+ EscapeDefaultState::Done => 0,
+ EscapeDefaultState::Char(_) => 1,
+ EscapeDefaultState::Backslash(_) => 2,
+ EscapeDefaultState::Unicode(ref iter) => iter.len(),
+ }
+ }
+}
+
/// An iterator over `u8` entries represending the UTF-8 encoding of a `char`
/// value.
///
diff --git a/libcore/clone.rs b/libcore/clone.rs
index a9c5684..c7a8fa8 100644
--- a/libcore/clone.rs
+++ b/libcore/clone.rs
@@ -46,9 +46,42 @@
use marker::Sized;
-/// A common trait for cloning an object.
+/// A common trait for the ability to explicitly duplicate an object.
///
-/// This trait can be used with `#[derive]`.
+/// Differs from `Copy` in that `Copy` is implicit and extremely inexpensive, while
+/// `Clone` is always explicit and may or may not be expensive. In order to enforce
+/// these characteristics, Rust does not allow you to reimplement `Copy`, but you
+/// may reimplement `Clone` and run arbitrary code.
+///
+/// Since `Clone` is more general than `Copy`, you can automatically make anything
+/// `Copy` be `Clone` as well.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d
+/// implementation of `clone()` calls `clone()` on each field.
+///
+/// ## How can I implement `Clone`?
+///
+/// Types that are `Copy` should have a trivial implementation of `Clone`. More formally:
+/// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`.
+/// Manual implementations should be careful to uphold this invariant; however, unsafe code
+/// must not rely on it to ensure memory safety.
+///
+/// An example is an array holding more than 32 elements of a type that is `Clone`; the standard
+/// library only implements `Clone` up until arrays of size 32. In this case, the implementation of
+/// `Clone` cannot be `derive`d, but can be implemented as:
+///
+/// ```
+/// #[derive(Copy)]
+/// struct Stats {
+/// frequencies: [i32; 100],
+/// }
+///
+/// impl Clone for Stats {
+/// fn clone(&self) -> Stats { *self }
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Clone : Sized {
/// Returns a copy of the value.
diff --git a/libcore/cmp.rs b/libcore/cmp.rs
index d3481ba..8764766 100644
--- a/libcore/cmp.rs
+++ b/libcore/cmp.rs
@@ -53,12 +53,43 @@ use option::Option::{self, Some};
/// symmetrically and transitively: if `T: PartialEq<U>` and `U: PartialEq<V>`
/// then `U: PartialEq<T>` and `T: PartialEq<V>`.
///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d on structs, two
+/// instances are equal if all fields are equal, and not equal if any fields
+/// are not equal. When `derive`d on enums, each variant is equal to itself
+/// and not equal to the other variants.
+///
+/// ## How can I implement `PartialEq`?
+///
/// PartialEq only requires the `eq` method to be implemented; `ne` is defined
/// in terms of it by default. Any manual implementation of `ne` *must* respect
/// the rule that `eq` is a strict inverse of `ne`; that is, `!(a == b)` if and
/// only if `a != b`.
///
-/// This trait can be used with `#[derive]`.
+/// An example implementation for a domain in which two books are considered
+/// the same book if their ISBN matches, even if the formats differ:
+///
+/// ```
+/// enum BookFormat { Paperback, Hardback, Ebook }
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Book) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+///
+/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
+/// let b2 = Book { isbn: 3, format: BookFormat::Ebook };
+/// let b3 = Book { isbn: 10, format: BookFormat::Paperback };
+///
+/// assert!(b1 == b2);
+/// assert!(b1 != b3);
+/// ```
///
/// # Examples
///
@@ -96,7 +127,32 @@ pub trait PartialEq<Rhs: ?Sized = Self> {
/// This property cannot be checked by the compiler, and therefore `Eq` implies
/// `PartialEq`, and has no extra methods.
///
-/// This trait can be used with `#[derive]`.
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
+/// no extra methods, it is only informing the compiler that this is an
+/// equivalence relation rather than a partial equivalence relation. Note that
+/// the `derive` strategy requires all fields are `PartialEq`, which isn't
+/// always desired.
+///
+/// ## How can I implement `Eq`?
+///
+/// If you cannot use the `derive` strategy, specify that your type implements
+/// `Eq`, which has no methods:
+///
+/// ```
+/// enum BookFormat { Paperback, Hardback, Ebook }
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Book) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+/// impl Eq for Book {}
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Eq: PartialEq<Self> {
// FIXME #13101: this method is used solely by #[deriving] to
@@ -190,8 +246,49 @@ impl Ordering {
/// - total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true; and
/// - transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
///
+/// ## Derivable
+///
/// This trait can be used with `#[derive]`. When `derive`d, it will produce a lexicographic
/// ordering based on the top-to-bottom declaration order of the struct's members.
+///
+/// ## How can I implement `Ord`?
+///
+/// `Ord` requires that the type also be `PartialOrd` and `Eq` (which requires `PartialEq`).
+///
+/// Then you must define an implementation for `cmp()`. You may find it useful to use
+/// `cmp()` on your type's fields.
+///
+/// Here's an example where you want to sort people by height only, disregarding `id`
+/// and `name`:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Person) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Person) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Person) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Ord: Eq + PartialOrd<Self> {
/// This method returns an `Ordering` between `self` and `other`.
@@ -242,6 +339,13 @@ impl PartialOrd for Ordering {
/// transitively: if `T: PartialOrd<U>` and `U: PartialOrd<V>` then `U: PartialOrd<T>` and `T:
/// PartialOrd<V>`.
///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d, it will produce a lexicographic
+/// ordering based on the top-to-bottom declaration order of the struct's members.
+///
+/// ## How can I implement `Ord`?
+///
/// PartialOrd only requires implementation of the `partial_cmp` method, with the others generated
/// from default implementations.
///
@@ -249,8 +353,64 @@ impl PartialOrd for Ordering {
/// total order. For example, for floating point numbers, `NaN < 0 == false` and `NaN >= 0 ==
/// false` (cf. IEEE 754-2008 section 5.11).
///
-/// This trait can be used with `#[derive]`. When `derive`d, it will produce an ordering
-/// based on the top-to-bottom declaration order of the struct's members.
+/// `PartialOrd` requires your type to be `PartialEq`.
+///
+/// If your type is `Ord`, you can implement `partial_cmp()` by using `cmp()`:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Person) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Person) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Person) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// You may also find it useful to use `partial_cmp()` on your type`s fields. Here
+/// is an example of `Person` types who have a floating-point `height` field that
+/// is the only field to be used for sorting:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: f64,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Person) -> Option<Ordering> {
+/// self.height.partial_cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Person) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
///
/// # Examples
///
diff --git a/libcore/convert.rs b/libcore/convert.rs
index 2d99986..48421ab 100644
--- a/libcore/convert.rs
+++ b/libcore/convert.rs
@@ -20,18 +20,19 @@
//! - Impl the `As*` traits for reference-to-reference conversions
//! - Impl the `Into` trait when you want to consume the value in the conversion
//! - The `From` trait is the most flexible, useful for value _and_ reference conversions
+//! - The `TryFrom` and `TryInto` traits behave like `From` and `Into`, but allow for the
+//! conversion to fail
//!
-//! As a library author, you should prefer implementing `From<T>` rather than
-//! `Into<U>`, as `From` provides greater flexibility and offers an equivalent `Into`
-//! implementation for free, thanks to a blanket implementation in the standard library.
-//!
-//! **Note: these traits must not fail**. If the conversion can fail, you must use a dedicated
-//! method which returns an `Option<T>` or a `Result<T, E>`.
+//! As a library author, you should prefer implementing `From<T>` or `TryFrom<T>` rather than
+//! `Into<U>` or `TryInto<U>`, as `From` and `TryFrom` provide greater flexibility and offer
+//! equivalent `Into` or `TryInto` implementations for free, thanks to a blanket implementation
+//! in the standard library.
//!
//! # Generic impl
//!
//! - `AsRef` and `AsMut` auto-dereference if the inner type is a reference
//! - `From<U> for T` implies `Into<T> for U`
+//! - `TryFrom<U> for T` implies `TryInto<T> for U`
//! - `From` and `Into` are reflexive, which means that all types can `into()`
//! themselves and `from()` themselves
//!
@@ -40,6 +41,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
+use result::Result;
/// A cheap, reference-to-reference conversion.
///
@@ -98,8 +100,8 @@ pub trait AsMut<T: ?Sized> {
/// A conversion that consumes `self`, which may or may not be expensive.
///
-/// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// returns an `Option<T>` or a `Result<T, E>`.
+/// **Note: this trait must not fail**. If the conversion can fail, use `TryInto` or a dedicated
+/// method which returns an `Option<T>` or a `Result<T, E>`.
///
/// Library authors should not directly implement this trait, but should prefer implementing
/// the `From` trait, which offers greater flexibility and provides an equivalent `Into`
@@ -133,8 +135,8 @@ pub trait Into<T>: Sized {
/// Construct `Self` via a conversion.
///
-/// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which
-/// returns an `Option<T>` or a `Result<T, E>`.
+/// **Note: this trait must not fail**. If the conversion can fail, use `TryFrom` or a dedicated
+/// method which returns an `Option<T>` or a `Result<T, E>`.
///
/// # Examples
///
@@ -158,6 +160,30 @@ pub trait From<T>: Sized {
fn from(T) -> Self;
}
+/// An attempted conversion that consumes `self`, which may or may not be expensive.
+///
+/// Library authors should not directly implement this trait, but should prefer implementing
+/// the `TryFrom` trait, which offers greater flexibility and provides an equivalent `TryInto`
+/// implementation for free, thanks to a blanket implementation in the standard library.
+#[unstable(feature = "try_from", issue = "33417")]
+pub trait TryInto<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Err;
+
+ /// Performs the conversion.
+ fn try_into(self) -> Result<T, Self::Err>;
+}
+
+/// Attempt to construct `Self` via a conversion.
+#[unstable(feature = "try_from", issue = "33417")]
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ type Err;
+
+ /// Performs the conversion.
+ fn try_from(T) -> Result<Self, Self::Err>;
+}
+
////////////////////////////////////////////////////////////////////////////////
// GENERIC IMPLS
////////////////////////////////////////////////////////////////////////////////
@@ -216,6 +242,17 @@ impl<T> From<T> for T {
fn from(t: T) -> T { t }
}
+
+// TryFrom implies TryInto
+#[unstable(feature = "try_from", issue = "33417")]
+impl<T, U> TryInto<U> for T where U: TryFrom<T> {
+ type Err = U::Err;
+
+ fn try_into(self) -> Result<U, U::Err> {
+ U::try_from(self)
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
// CONCRETE IMPLS
////////////////////////////////////////////////////////////////////////////////
diff --git a/libcore/default.rs b/libcore/default.rs
index 4ec4fb6..028e820 100644
--- a/libcore/default.rs
+++ b/libcore/default.rs
@@ -9,76 +9,6 @@
// except according to those terms.
//! The `Default` trait for types which may have meaningful default values.
-//!
-//! Sometimes, you want to fall back to some kind of default value, and
-//! don't particularly care what it is. This comes up often with `struct`s
-//! that define a set of options:
-//!
-//! ```
-//! # #[allow(dead_code)]
-//! struct SomeOptions {
-//! foo: i32,
-//! bar: f32,
-//! }
-//! ```
-//!
-//! How can we define some default values? You can use `Default`:
-//!
-//! ```
-//! # #[allow(dead_code)]
-//! #[derive(Default)]
-//! struct SomeOptions {
-//! foo: i32,
-//! bar: f32,
-//! }
-//!
-//!
-//! fn main() {
-//! let options: SomeOptions = Default::default();
-//! }
-//! ```
-//!
-//! Now, you get all of the default values. Rust implements `Default` for various primitives types.
-//! If you have your own type, you need to implement `Default` yourself:
-//!
-//! ```
-//! # #![allow(dead_code)]
-//! enum Kind {
-//! A,
-//! B,
-//! C,
-//! }
-//!
-//! impl Default for Kind {
-//! fn default() -> Kind { Kind::A }
-//! }
-//!
-//! #[derive(Default)]
-//! struct SomeOptions {
-//! foo: i32,
-//! bar: f32,
-//! baz: Kind,
-//! }
-//!
-//!
-//! fn main() {
-//! let options: SomeOptions = Default::default();
-//! }
-//! ```
-//!
-//! If you want to override a particular option, but still retain the other defaults:
-//!
-//! ```
-//! # #[allow(dead_code)]
-//! # #[derive(Default)]
-//! # struct SomeOptions {
-//! # foo: i32,
-//! # bar: f32,
-//! # }
-//! fn main() {
-//! let options = SomeOptions { foo: 42, ..Default::default() };
-//! }
-//! ```
#![stable(feature = "rust1", since = "1.0.0")]
@@ -86,8 +16,72 @@ use marker::Sized;
/// A trait for giving a type a useful default value.
///
-/// A struct can derive default implementations of `Default` for basic types using
-/// `#[derive(Default)]`.
+/// Sometimes, you want to fall back to some kind of default value, and
+/// don't particularly care what it is. This comes up often with `struct`s
+/// that define a set of options:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+/// ```
+///
+/// How can we define some default values? You can use `Default`:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Default)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+///
+///
+/// fn main() {
+/// let options: SomeOptions = Default::default();
+/// }
+/// ```
+///
+/// Now, you get all of the default values. Rust implements `Default` for various primitives types.
+///
+/// If you want to override a particular option, but still retain the other defaults:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// # #[derive(Default)]
+/// # struct SomeOptions {
+/// # foo: i32,
+/// # bar: f32,
+/// # }
+/// fn main() {
+/// let options = SomeOptions { foo: 42, ..Default::default() };
+/// }
+/// ```
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all of the type's fields implement
+/// `Default`. When `derive`d, it will use the default value for each field's type.
+///
+/// ## How can I implement `Default`?
+///
+/// Provide an implementation for the `default()` method that returns the value of
+/// your type that should be the default:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// enum Kind {
+/// A,
+/// B,
+/// C,
+/// }
+///
+/// impl Default for Kind {
+/// fn default() -> Kind { Kind::A }
+/// }
+/// ```
///
/// # Examples
///
diff --git a/libcore/fmt/mod.rs b/libcore/fmt/mod.rs
index 8b92561..bf5a26b 100644
--- a/libcore/fmt/mod.rs
+++ b/libcore/fmt/mod.rs
@@ -319,7 +319,11 @@ impl<'a> Display for Arguments<'a> {
///
/// [module]: ../../std/fmt/index.html
///
-/// This trait can be used with `#[derive]`.
+/// This trait can be used with `#[derive]` if all fields implement `Debug`. When
+/// `derive`d for structs, it will use the name of the `struct`, then `{`, then a
+/// comma-separated list of each field's name and `Debug` value, then `}`. For
+/// `enum`s, it will use the name of the variant and, if applicable, `(`, then the
+/// `Debug` values of the fields, then `)`.
///
/// # Examples
///
@@ -777,6 +781,32 @@ pub trait UpperExp {
///
/// * output - the buffer to write output to
/// * args - the precompiled arguments generated by `format_args!`
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let mut output = String::new();
+/// fmt::write(&mut output, format_args!("Hello {}!", "world"))
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// Please note that using [`write!`][write_macro] might be preferrable. Example:
+///
+/// ```
+/// use std::fmt::Write;
+///
+/// let mut output = String::new();
+/// write!(&mut output, "Hello {}!", "world")
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// [write_macro]: ../../std/macro.write!.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn write(output: &mut Write, args: Arguments) -> Result {
let mut formatter = Formatter {
diff --git a/libcore/fmt/num.rs b/libcore/fmt/num.rs
index a944c99..d55e031 100644
--- a/libcore/fmt/num.rs
+++ b/libcore/fmt/num.rs
@@ -29,6 +29,7 @@ trait Int: Zero + PartialEq + PartialOrd + Div<Output=Self> + Rem<Output=Self> +
Sub<Output=Self> + Copy {
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
+ fn to_u16(&self) -> u16;
fn to_u32(&self) -> u32;
fn to_u64(&self) -> u64;
}
@@ -37,6 +38,7 @@ macro_rules! doit {
($($t:ident)*) => ($(impl Int for $t {
fn from_u8(u: u8) -> $t { u as $t }
fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
fn to_u32(&self) -> u32 { *self as u32 }
fn to_u64(&self) -> u64 { *self as u64 }
})*)
@@ -256,6 +258,8 @@ macro_rules! impl_Display {
impl_Display!(i8, u8, i16, u16, i32, u32: to_u32);
impl_Display!(i64, u64: to_u64);
+#[cfg(target_pointer_width = "16")]
+impl_Display!(isize, usize: to_u16);
#[cfg(target_pointer_width = "32")]
impl_Display!(isize, usize: to_u32);
#[cfg(target_pointer_width = "64")]
diff --git a/libcore/hash/mod.rs b/libcore/hash/mod.rs
index 4d0fed9..051eb97 100644
--- a/libcore/hash/mod.rs
+++ b/libcore/hash/mod.rs
@@ -38,7 +38,7 @@
//! ```
//!
//! If you need more control over how a value is hashed, you need to implement
-//! the trait `Hash`:
+//! the `Hash` trait:
//!
//! ```rust
//! use std::hash::{Hash, Hasher, SipHasher};
@@ -97,7 +97,33 @@ mod sip;
/// In other words, if two keys are equal, their hashes should also be equal.
/// `HashMap` and `HashSet` both rely on this behavior.
///
-/// This trait can be used with `#[derive]`.
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all fields implement `Hash`.
+/// When `derive`d, the resulting hash will be the combination of the values
+/// from calling `.hash()` on each field.
+///
+/// ## How can I implement `Hash`?
+///
+/// If you need more control over how a value is hashed, you need to implement
+/// the `Hash` trait:
+///
+/// ```
+/// use std::hash::{Hash, Hasher};
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// phone: u64,
+/// }
+///
+/// impl Hash for Person {
+/// fn hash<H: Hasher>(&self, state: &mut H) {
+/// self.id.hash(state);
+/// self.phone.hash(state);
+/// }
+/// }
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Hash {
/// Feeds this value into the state given, updating the hasher as necessary.
diff --git a/libcore/intrinsics.rs b/libcore/intrinsics.rs
index dd564b7..baf0838 100644
--- a/libcore/intrinsics.rs
+++ b/libcore/intrinsics.rs
@@ -10,7 +10,7 @@
//! rustc compiler intrinsics.
//!
-//! The corresponding definitions are in librustc_trans/trans/intrinsic.rs.
+//! The corresponding definitions are in librustc_trans/intrinsic.rs.
//!
//! # Volatiles
//!
@@ -168,6 +168,16 @@ extern "rust-intrinsic" {
pub fn atomic_singlethreadfence_rel();
pub fn atomic_singlethreadfence_acqrel();
+ /// Magic intrinsic that derives its meaning from attributes
+ /// attached to the function.
+ ///
+ /// For example, dataflow uses this to inject static assertions so
+ /// that `rustc_peek(potentially_uninitialized)` would actually
+ /// double-check that dataflow did indeed compute that it is
+ /// uninitialized at that point in the control flow.
+ #[cfg(not(stage0))]
+ pub fn rustc_peek<T>(_: T) -> T;
+
/// Aborts the execution of the process.
pub fn abort() -> !;
@@ -192,11 +202,8 @@ extern "rust-intrinsic" {
/// The size of a type in bytes.
///
- /// This is the exact number of bytes in memory taken up by a
- /// value of the given type. In other words, a memset of this size
- /// would *exactly* overwrite a value. When laid out in vectors
- /// and structures there may be additional padding between
- /// elements.
+ /// More specifically, this is the offset in bytes between successive
+ /// items of the same type, including alignment padding.
pub fn size_of<T>() -> usize;
/// Moves a value to an uninitialized memory location.
diff --git a/libcore/iter/iterator.rs b/libcore/iter/iterator.rs
index 2033ae5..71ca5cc 100644
--- a/libcore/iter/iterator.rs
+++ b/libcore/iter/iterator.rs
@@ -172,6 +172,7 @@ pub trait Iterator {
/// assert_eq!(a.iter().count(), 5);
/// ```
#[inline]
+ #[rustc_inherit_overflow_checks]
#[stable(feature = "rust1", since = "1.0.0")]
fn count(self) -> usize where Self: Sized {
// Might overflow.
@@ -214,7 +215,7 @@ pub trait Iterator {
/// Like most indexing operations, the count starts from zero, so `nth(0)`
/// returns the first value, `nth(1)` the second, and so on.
///
- /// `nth()` will return `None` if `n` is larger than the length of the
+ /// `nth()` will return `None` if `n` is greater than or equal to the length of the
/// iterator.
///
/// # Examples
@@ -237,7 +238,7 @@ pub trait Iterator {
/// assert_eq!(iter.nth(1), None);
/// ```
///
- /// Returning `None` if there are less than `n` elements:
+ /// Returning `None` if there are less than `n + 1` elements:
///
/// ```
/// let a = [1, 2, 3];
diff --git a/libcore/iter/mod.rs b/libcore/iter/mod.rs
index abc199c..ae1e311 100644
--- a/libcore/iter/mod.rs
+++ b/libcore/iter/mod.rs
@@ -510,6 +510,7 @@ impl<A, B> Iterator for Chain<A, B> where
}
#[inline]
+ #[rustc_inherit_overflow_checks]
fn count(self) -> usize {
match self.state {
ChainState::Both => self.a.count() + self.b.count(),
@@ -542,6 +543,23 @@ impl<A, B> Iterator for Chain<A, B> where
}
#[inline]
+ fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item> where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ match self.state {
+ ChainState::Both => match self.a.find(&mut predicate) {
+ None => {
+ self.state = ChainState::Back;
+ self.b.find(predicate)
+ }
+ v => v
+ },
+ ChainState::Front => self.a.find(predicate),
+ ChainState::Back => self.b.find(predicate),
+ }
+ }
+
+ #[inline]
fn last(self) -> Option<A::Item> {
match self.state {
ChainState::Both => {
@@ -915,6 +933,7 @@ impl<I> Iterator for Enumerate<I> where I: Iterator {
///
/// Might panic if the index of the element overflows a `usize`.
#[inline]
+ #[rustc_inherit_overflow_checks]
fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
self.iter.next().map(|a| {
let ret = (self.count, a);
@@ -930,6 +949,7 @@ impl<I> Iterator for Enumerate<I> where I: Iterator {
}
#[inline]
+ #[rustc_inherit_overflow_checks]
fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
self.iter.nth(n).map(|a| {
let i = self.count + n;
@@ -991,6 +1011,7 @@ impl<I: Iterator> Iterator for Peekable<I> {
}
#[inline]
+ #[rustc_inherit_overflow_checks]
fn count(self) -> usize {
(if self.peeked.is_some() { 1 } else { 0 }) + self.iter.count()
}
@@ -1108,6 +1129,7 @@ impl<I: Iterator> Peekable<I> {
/// ```
#[unstable(feature = "peekable_is_empty", issue = "32111")]
#[inline]
+ #[rustc_deprecated(since = "1.10.0", reason = "replaced by .peek().is_none()")]
pub fn is_empty(&mut self) -> bool {
self.peek().is_none()
}
diff --git a/libcore/lib.rs b/libcore/lib.rs
index e12c690..7ae6b30 100644
--- a/libcore/lib.rs
+++ b/libcore/lib.rs
@@ -60,9 +60,12 @@
#![cfg_attr(not(stage0), deny(warnings))]
#![feature(allow_internal_unstable)]
+#![feature(asm)]
#![feature(associated_type_defaults)]
+#![feature(cfg_target_feature)]
#![feature(concat_idents)]
#![feature(const_fn)]
+#![feature(cfg_target_has_atomic)]
#![feature(custom_attribute)]
#![feature(fundamental)]
#![feature(inclusive_range_syntax)]
diff --git a/libcore/macros.rs b/libcore/macros.rs
index ad90b44..a40608b 100644
--- a/libcore/macros.rs
+++ b/libcore/macros.rs
@@ -86,7 +86,7 @@ macro_rules! assert {
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! assert_eq {
($left:expr , $right:expr) => ({
- match (&($left), &($right)) {
+ match (&$left, &$right) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
panic!("assertion failed: `(left == right)` \
diff --git a/libcore/marker.rs b/libcore/marker.rs
index 1ed2a21..c18d230 100644
--- a/libcore/marker.rs
+++ b/libcore/marker.rs
@@ -136,6 +136,26 @@ pub trait Unsize<T: ?Sized> {
/// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy`
/// ```
///
+/// ## When can my type _not_ be `Copy`?
+///
+/// Some types can't be copied safely. For example, copying `&mut T` would create an aliased
+/// mutable reference, and copying `String` would result in two attempts to free the same buffer.
+///
+/// Generalizing the latter case, any type implementing `Drop` can't be `Copy`, because it's
+/// managing some resource besides its own `size_of::<T>()` bytes.
+///
+/// ## When should my type be `Copy`?
+///
+/// Generally speaking, if your type _can_ implement `Copy`, it should. There's one important thing
+/// to consider though: if you think your type may _not_ be able to implement `Copy` in the future,
+/// then it might be prudent to not implement `Copy`. This is because removing `Copy` is a breaking
+/// change: that second example would fail to compile if we made `Foo` non-`Copy`.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all of its components implement `Copy` and the type
+/// implements `Clone`. The implementation will copy the bytes of each field using `memcpy`.
+///
/// ## How can I implement `Copy`?
///
/// There are two ways to implement `Copy` on your type:
@@ -155,25 +175,6 @@ pub trait Unsize<T: ?Sized> {
///
/// There is a small difference between the two: the `derive` strategy will also place a `Copy`
/// bound on type parameters, which isn't always desired.
-///
-/// ## When can my type _not_ be `Copy`?
-///
-/// Some types can't be copied safely. For example, copying `&mut T` would create an aliased
-/// mutable reference, and copying `String` would result in two attempts to free the same buffer.
-///
-/// Generalizing the latter case, any type implementing `Drop` can't be `Copy`, because it's
-/// managing some resource besides its own `size_of::<T>()` bytes.
-///
-/// ## When should my type be `Copy`?
-///
-/// Generally speaking, if your type _can_ implement `Copy`, it should. There's one important thing
-/// to consider though: if you think your type may _not_ be able to implement `Copy` in the future,
-/// then it might be prudent to not implement `Copy`. This is because removing `Copy` is a breaking
-/// change: that second example would fail to compile if we made `Foo` non-`Copy`.
-///
-/// # Derivable
-///
-/// This trait can be used with `#[derive]`.
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "copy"]
pub trait Copy : Clone {
diff --git a/libcore/mem.rs b/libcore/mem.rs
index 2c648d1..5c2179c 100644
--- a/libcore/mem.rs
+++ b/libcore/mem.rs
@@ -110,6 +110,7 @@ pub use intrinsics::transmute;
/// }
/// }
/// ```
+#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
unsafe { intrinsics::forget(t) }
@@ -117,6 +118,9 @@ pub fn forget<T>(t: T) {
/// Returns the size of a type in bytes.
///
+/// More specifically, this is the offset in bytes between successive
+/// items of the same type, including alignment padding.
+///
/// # Examples
///
/// ```
@@ -514,6 +518,10 @@ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
+macro_rules! repeat_u8_as_u16 {
+ ($name:expr) => { (($name as u16) << 8 |
+ ($name as u16)) }
+}
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
@@ -539,11 +547,18 @@ macro_rules! repeat_u8_as_u64 {
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
+pub const POST_DROP_U16: u16 = repeat_u8_as_u16!(POST_DROP_U8);
+#[unstable(feature = "filling_drop", issue = "5016")]
+#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
+#[cfg(target_pointer_width = "16")]
+#[unstable(feature = "filling_drop", issue = "5016")]
+#[allow(missing_docs)]
+pub const POST_DROP_USIZE: usize = POST_DROP_U16 as usize;
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
diff --git a/libcore/num/bignum.rs b/libcore/num/bignum.rs
index 66c6deb..a881b53 100644
--- a/libcore/num/bignum.rs
+++ b/libcore/num/bignum.rs
@@ -33,7 +33,7 @@ use mem;
use intrinsics;
/// Arithmetic operations required by bignums.
-pub trait FullOps {
+pub trait FullOps: Sized {
/// Returns `(carry', v')` such that `carry' * 2^W + v' = self + other + carry`,
/// where `W` is the number of bits in `Self`.
fn full_add(self, other: Self, carry: bool) -> (bool /*carry*/, Self);
diff --git a/libcore/num/dec2flt/algorithm.rs b/libcore/num/dec2flt/algorithm.rs
index e33c281..c7af46a 100644
--- a/libcore/num/dec2flt/algorithm.rs
+++ b/libcore/num/dec2flt/algorithm.rs
@@ -32,19 +32,80 @@ fn power_of_ten(e: i16) -> Fp {
Fp { f: sig, e: exp }
}
+// In most architectures, floating point operations have an explicit bit size, therefore the
+// precision of the computation is determined on a per-operation basis.
+#[cfg(any(not(target_arch="x86"), target_feature="sse2"))]
+mod fpu_precision {
+ pub fn set_precision<T>() { }
+}
+
+// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available.
+// The x87 FPU operates with 80 bits of precision by default, which means that operations will
+// round to 80 bits causing double rounding to happen when values are eventually represented as
+// 32/64 bit float values. To overcome this, the FPU control word can be set so that the
+// computations are performed in the desired precision.
+#[cfg(all(target_arch="x86", not(target_feature="sse2")))]
+mod fpu_precision {
+ use mem::size_of;
+ use ops::Drop;
+
+ /// A structure used to preserve the original value of the FPU control word, so that it can be
+ /// restored when the structure is dropped.
+ ///
+ /// The x87 FPU is a 16-bits register whose fields are as follows:
+ ///
+ /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 |
+ /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:|
+ /// | | RC | PC | | PM | UM | OM | ZM | DM | IM |
+ ///
+ /// The documentation for all of the fields is available in the IA-32 Architectures Software
+ /// Developer's Manual (Volume 1).
+ ///
+ /// The only field which is relevant for the following code is PC, Precision Control. This
+ /// field determines the precision of the operations performed by the FPU. It can be set to:
+ /// - 0b00, single precision i.e. 32-bits
+ /// - 0b10, double precision i.e. 64-bits
+ /// - 0b11, double extended precision i.e. 80-bits (default state)
+ /// The 0b01 value is reserved and should not be used.
+ pub struct FPUControlWord(u16);
+
+ fn set_cw(cw: u16) {
+ unsafe { asm!("fldcw $0" :: "m" (cw) :: "volatile") }
+ }
+
+ /// Set the precision field of the FPU to `T` and return a `FPUControlWord`
+ pub fn set_precision<T>() -> FPUControlWord {
+ let cw = 0u16;
+
+ // Compute the value for the Precision Control field that is appropriate for `T`.
+ let cw_precision = match size_of::<T>() {
+ 4 => 0x0000, // 32 bits
+ 8 => 0x0200, // 64 bits
+ _ => 0x0300, // default, 80 bits
+ };
+
+ // Get the original value of the control word to restore it later, when the
+ // `FPUControlWord` structure is dropped
+ unsafe { asm!("fnstcw $0" : "=*m" (&cw) ::: "volatile") }
+
+ // Set the control word to the desired precision. This is achieved by masking away the old
+ // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above.
+ set_cw((cw & 0xFCFF) | cw_precision);
+
+ FPUControlWord(cw)
+ }
+
+ impl Drop for FPUControlWord {
+ fn drop(&mut self) {
+ set_cw(self.0)
+ }
+ }
+}
+
/// The fast path of Bellerophon using machine-sized integers and floats.
///
/// This is extracted into a separate function so that it can be attempted before constructing
/// a bignum.
-///
-/// The fast path crucially depends on arithmetic being correctly rounded, so on x86
-/// without SSE or SSE2 it will be **wrong** (as in, off by one ULP occasionally), because the x87
-/// FPU stack will round to 80 bit first before rounding to 64/32 bit. However, as such hardware
-/// is extremely rare nowadays and in fact all in-tree target triples assume an SSE2-capable
-/// microarchitecture, there is little incentive to deal with that. There's a test that will fail
-/// when SSE or SSE2 is disabled, so people building their own non-SSE copy will get a heads up.
-///
-/// FIXME: It would nevertheless be nice if we had a good way to detect and deal with x87.
pub fn fast_path<T: RawFloat>(integral: &[u8], fractional: &[u8], e: i64) -> Option<T> {
let num_digits = integral.len() + fractional.len();
// log_10(f64::max_sig) ~ 15.95. We compare the exact value to max_sig near the end,
@@ -60,9 +121,17 @@ pub fn fast_path<T: RawFloat>(integral: &[u8], fractional: &[u8], e: i64) -> Opt
if f > T::max_sig() {
return None;
}
+
+ // The fast path crucially depends on arithmetic being rounded to the correct number of bits
+ // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision
+ // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit.
+ // The `set_precision` function takes care of setting the precision on architectures which
+ // require setting it by changing the global state (like the control word of the x87 FPU).
+ let _cw = fpu_precision::set_precision::<T>();
+
// The case e < 0 cannot be folded into the other branch. Negative powers result in
// a repeating fractional part in binary, which are rounded, which causes real
- // (and occasioally quite significant!) errors in the final result.
+ // (and occasionally quite significant!) errors in the final result.
if e >= 0 {
Some(T::from_int(f) * T::short_fast_pow10(e as usize))
} else {
diff --git a/libcore/num/int_macros.rs b/libcore/num/int_macros.rs
index 4234925..bd6cfc4 100644
--- a/libcore/num/int_macros.rs
+++ b/libcore/num/int_macros.rs
@@ -10,18 +10,13 @@
#![doc(hidden)]
-macro_rules! int_module { ($T:ty, $bits:expr) => (
+macro_rules! int_module { ($T:ident, $bits:expr) => (
-// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
-// calling the `Bounded::min_value` function.
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
-pub const MIN: $T = (-1 as $T) << ($bits - 1);
-// FIXME(#9837): Compute MIN like this so the high bits that shouldn't exist are 0.
-// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of
-// calling the `Bounded::max_value` function.
+pub const MIN: $T = $T::min_value();
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
-pub const MAX: $T = !MIN;
+pub const MAX: $T = $T::max_value();
) }
diff --git a/libcore/num/isize.rs b/libcore/num/isize.rs
index de5b177..86bcef4 100644
--- a/libcore/num/isize.rs
+++ b/libcore/num/isize.rs
@@ -14,6 +14,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(target_pointer_width = "16")]
+int_module! { isize, 16 }
#[cfg(target_pointer_width = "32")]
int_module! { isize, 32 }
#[cfg(target_pointer_width = "64")]
diff --git a/libcore/num/mod.rs b/libcore/num/mod.rs
index e5f0469..445e346 100644
--- a/libcore/num/mod.rs
+++ b/libcore/num/mod.rs
@@ -15,7 +15,7 @@
use char::CharExt;
use cmp::PartialOrd;
-use convert::From;
+use convert::{From, TryFrom};
use fmt;
use intrinsics;
use marker::{Copy, Sized};
@@ -37,6 +37,17 @@ use slice::SliceExt;
/// `wrapping_add`, or through the `Wrapping<T>` type, which says that
/// all standard arithmetic operations on the underlying value are
/// intended to have wrapping semantics.
+///
+/// # Examples
+///
+/// ```
+/// use std::num::Wrapping;
+///
+/// let zero = Wrapping(0u32);
+/// let one = Wrapping(1u32);
+///
+/// assert_eq!(std::u32::MAX, (zero - one).0);
+/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
@@ -1025,7 +1036,7 @@ macro_rules! int_impl {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ #[rustc_inherit_overflow_checks]
pub fn pow(self, mut exp: u32) -> Self {
let mut base = self;
let mut acc = Self::one();
@@ -1067,7 +1078,7 @@ macro_rules! int_impl {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ #[rustc_inherit_overflow_checks]
pub fn abs(self) -> Self {
if self.is_negative() {
// Note that the #[inline] above means that the overflow
@@ -1168,6 +1179,15 @@ impl i64 {
intrinsics::mul_with_overflow }
}
+#[cfg(target_pointer_width = "16")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { i16, u16, 16,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
+
#[cfg(target_pointer_width = "32")]
#[lang = "isize"]
impl isize {
@@ -2044,7 +2064,7 @@ macro_rules! uint_impl {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
- #[rustc_no_mir] // FIXME #29769 MIR overflow checking is TBD.
+ #[rustc_inherit_overflow_checks]
pub fn pow(self, mut exp: u32) -> Self {
let mut base = self;
let mut acc = Self::one();
@@ -2180,6 +2200,18 @@ impl u64 {
intrinsics::mul_with_overflow }
}
+#[cfg(target_pointer_width = "16")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { u16, 16,
+ intrinsics::ctpop,
+ intrinsics::ctlz,
+ intrinsics::cttz,
+ intrinsics::bswap,
+ intrinsics::add_with_overflow,
+ intrinsics::sub_with_overflow,
+ intrinsics::mul_with_overflow }
+}
#[cfg(target_pointer_width = "32")]
#[lang = "usize"]
impl usize {
@@ -2345,9 +2377,101 @@ macro_rules! from_str_radix_int_impl {
}
from_str_radix_int_impl! { isize i8 i16 i32 i64 usize u8 u16 u32 u64 }
+/// The error type returned when a checked integral type conversion fails.
+#[unstable(feature = "try_from", issue = "33417")]
+#[derive(Debug, Copy, Clone)]
+pub struct TryFromIntError(());
+
+impl TryFromIntError {
+ #[unstable(feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "0")]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ "out of range integral type conversion attempted"
+ }
+}
+
+#[unstable(feature = "try_from", issue = "33417")]
+impl fmt::Display for TryFromIntError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(fmt)
+ }
+}
+
+macro_rules! same_sign_from_int_impl {
+ ($storage:ty, $target:ty, $($source:ty),*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl TryFrom<$source> for $target {
+ type Err = TryFromIntError;
+
+ fn try_from(u: $source) -> Result<$target, TryFromIntError> {
+ let min = <$target as FromStrRadixHelper>::min_value() as $storage;
+ let max = <$target as FromStrRadixHelper>::max_value() as $storage;
+ if u as $storage < min || u as $storage > max {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as $target)
+ }
+ }
+ }
+ )*}
+}
+
+same_sign_from_int_impl!(u64, u8, u8, u16, u32, u64, usize);
+same_sign_from_int_impl!(i64, i8, i8, i16, i32, i64, isize);
+same_sign_from_int_impl!(u64, u16, u8, u16, u32, u64, usize);
+same_sign_from_int_impl!(i64, i16, i8, i16, i32, i64, isize);
+same_sign_from_int_impl!(u64, u32, u8, u16, u32, u64, usize);
+same_sign_from_int_impl!(i64, i32, i8, i16, i32, i64, isize);
+same_sign_from_int_impl!(u64, u64, u8, u16, u32, u64, usize);
+same_sign_from_int_impl!(i64, i64, i8, i16, i32, i64, isize);
+same_sign_from_int_impl!(u64, usize, u8, u16, u32, u64, usize);
+same_sign_from_int_impl!(i64, isize, i8, i16, i32, i64, isize);
+
+macro_rules! cross_sign_from_int_impl {
+ ($unsigned:ty, $($signed:ty),*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl TryFrom<$unsigned> for $signed {
+ type Err = TryFromIntError;
+
+ fn try_from(u: $unsigned) -> Result<$signed, TryFromIntError> {
+ let max = <$signed as FromStrRadixHelper>::max_value() as u64;
+ if u as u64 > max {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as $signed)
+ }
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl TryFrom<$signed> for $unsigned {
+ type Err = TryFromIntError;
+
+ fn try_from(u: $signed) -> Result<$unsigned, TryFromIntError> {
+ let max = <$unsigned as FromStrRadixHelper>::max_value() as u64;
+ if u < 0 || u as u64 > max {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as $unsigned)
+ }
+ }
+ }
+ )*}
+}
+
+cross_sign_from_int_impl!(u8, i8, i16, i32, i64, isize);
+cross_sign_from_int_impl!(u16, i8, i16, i32, i64, isize);
+cross_sign_from_int_impl!(u32, i8, i16, i32, i64, isize);
+cross_sign_from_int_impl!(u64, i8, i16, i32, i64, isize);
+cross_sign_from_int_impl!(usize, i8, i16, i32, i64, isize);
+
#[doc(hidden)]
trait FromStrRadixHelper: PartialOrd + Copy {
fn min_value() -> Self;
+ fn max_value() -> Self;
fn from_u32(u: u32) -> Self;
fn checked_mul(&self, other: u32) -> Option<Self>;
fn checked_sub(&self, other: u32) -> Option<Self>;
@@ -2357,6 +2481,7 @@ trait FromStrRadixHelper: PartialOrd + Copy {
macro_rules! doit {
($($t:ty)*) => ($(impl FromStrRadixHelper for $t {
fn min_value() -> Self { Self::min_value() }
+ fn max_value() -> Self { Self::max_value() }
fn from_u32(u: u32) -> Self { u as Self }
fn checked_mul(&self, other: u32) -> Option<Self> {
Self::checked_mul(*self, other as Self)
diff --git a/libcore/num/uint_macros.rs b/libcore/num/uint_macros.rs
index 6479836..2ab2f95 100644
--- a/libcore/num/uint_macros.rs
+++ b/libcore/num/uint_macros.rs
@@ -10,13 +10,13 @@
#![doc(hidden)]
-macro_rules! uint_module { ($T:ty, $bits:expr) => (
+macro_rules! uint_module { ($T:ident, $bits:expr) => (
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
-pub const MIN: $T = 0 as $T;
+pub const MIN: $T = $T::min_value();
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(missing_docs)]
-pub const MAX: $T = !0 as $T;
+pub const MAX: $T = $T::max_value();
) }
diff --git a/libcore/num/usize.rs b/libcore/num/usize.rs
index 0c7d16a..685c52e 100644
--- a/libcore/num/usize.rs
+++ b/libcore/num/usize.rs
@@ -14,6 +14,8 @@
#![stable(feature = "rust1", since = "1.0.0")]
+#[cfg(target_pointer_width = "16")]
+uint_module! { usize, 16 }
#[cfg(target_pointer_width = "32")]
uint_module! { usize, 32 }
#[cfg(target_pointer_width = "64")]
diff --git a/libcore/num/wrapping.rs b/libcore/num/wrapping.rs
index e28a36a..4857817 100644
--- a/libcore/num/wrapping.rs
+++ b/libcore/num/wrapping.rs
@@ -275,6 +275,15 @@ macro_rules! wrapping_impl {
*self = *self & other;
}
}
+
+ #[stable(feature = "wrapping_neg", since = "1.10.0")]
+ impl Neg for Wrapping<$t> {
+ type Output = Self;
+ #[inline(always)]
+ fn neg(self) -> Self {
+ Wrapping(0) - self
+ }
+ }
)*)
}
@@ -283,6 +292,12 @@ wrapping_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
mod shift_max {
#![allow(non_upper_case_globals)]
+ #[cfg(target_pointer_width = "16")]
+ mod platform {
+ pub const usize: u32 = super::u16;
+ pub const isize: u32 = super::i16;
+ }
+
#[cfg(target_pointer_width = "32")]
mod platform {
pub const usize: u32 = super::u32;
diff --git a/libcore/ops.rs b/libcore/ops.rs
index 35374db..3f9d3ac 100644
--- a/libcore/ops.rs
+++ b/libcore/ops.rs
@@ -208,6 +208,7 @@ macro_rules! add_impl {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn add(self, other: $t) -> $t { self + other }
}
@@ -263,6 +264,7 @@ macro_rules! sub_impl {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn sub(self, other: $t) -> $t { self - other }
}
@@ -318,6 +320,7 @@ macro_rules! mul_impl {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn mul(self, other: $t) -> $t { self * other }
}
@@ -519,6 +522,7 @@ macro_rules! neg_impl_core {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn neg(self) -> $t { let $id = self; $body }
}
@@ -798,6 +802,7 @@ macro_rules! shl_impl {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn shl(self, other: $f) -> $t {
self << other
}
@@ -869,6 +874,7 @@ macro_rules! shr_impl {
type Output = $t;
#[inline]
+ #[rustc_inherit_overflow_checks]
fn shr(self, other: $f) -> $t {
self >> other
}
@@ -933,6 +939,7 @@ macro_rules! add_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl AddAssign for $t {
#[inline]
+ #[rustc_inherit_overflow_checks]
fn add_assign(&mut self, other: $t) { *self += other }
}
)+)
@@ -979,6 +986,7 @@ macro_rules! sub_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl SubAssign for $t {
#[inline]
+ #[rustc_inherit_overflow_checks]
fn sub_assign(&mut self, other: $t) { *self -= other }
}
)+)
@@ -1025,6 +1033,7 @@ macro_rules! mul_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl MulAssign for $t {
#[inline]
+ #[rustc_inherit_overflow_checks]
fn mul_assign(&mut self, other: $t) { *self *= other }
}
)+)
@@ -1295,6 +1304,7 @@ macro_rules! shl_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShlAssign<$f> for $t {
#[inline]
+ #[rustc_inherit_overflow_checks]
fn shl_assign(&mut self, other: $f) {
*self <<= other
}
@@ -1357,6 +1367,7 @@ macro_rules! shr_assign_impl {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShrAssign<$f> for $t {
#[inline]
+ #[rustc_inherit_overflow_checks]
fn shr_assign(&mut self, other: $f) {
*self >>= other
}
diff --git a/libcore/raw.rs b/libcore/raw.rs
index 19226d8..6b21224 100644
--- a/libcore/raw.rs
+++ b/libcore/raw.rs
@@ -18,63 +18,6 @@
//!
//! Their definition should always match the ABI defined in `rustc::back::abi`.
-use clone::Clone;
-use marker::Copy;
-use mem;
-
-/// The representation of a slice like `&[T]`.
-///
-/// This struct is guaranteed to have the layout of types like `&[T]`,
-/// `&str`, and `Box<[T]>`, but is not the type of such slices
-/// (e.g. the fields are not directly accessible on a `&[T]`) nor does
-/// it control that layout (changing the definition will not change
-/// the layout of a `&[T]`). It is only designed to be used by unsafe
-/// code that needs to manipulate the low-level details.
-///
-/// However, it is not recommended to use this type for such code,
-/// since there are alternatives which may be safer:
-///
-/// - Creating a slice from a data pointer and length can be done with
-/// `std::slice::from_raw_parts` or `std::slice::from_raw_parts_mut`
-/// instead of `std::mem::transmute`ing a value of type `Slice`.
-/// - Extracting the data pointer and length from a slice can be
-/// performed with the `as_ptr` (or `as_mut_ptr`) and `len`
-/// methods.
-///
-/// If one does decide to convert a slice value to a `Slice`, the
-/// `Repr` trait in this module provides a method for a safe
-/// conversion from `&[T]` (and `&str`) to a `Slice`, more type-safe
-/// than a call to `transmute`.
-///
-/// # Examples
-///
-/// ```
-/// #![feature(raw)]
-///
-/// use std::raw::{self, Repr};
-///
-/// let slice: &[u16] = &[1, 2, 3, 4];
-///
-/// let repr: raw::Slice<u16> = slice.repr();
-/// println!("data pointer = {:?}, length = {}", repr.data, repr.len);
-/// ```
-#[repr(C)]
-#[allow(missing_debug_implementations)]
-#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
- since = "1.9.0")]
-#[unstable(feature = "raw", issue = "27751")]
-pub struct Slice<T> {
- pub data: *const T,
- pub len: usize,
-}
-
-#[allow(deprecated)]
-impl<T> Copy for Slice<T> {}
-#[allow(deprecated)]
-impl<T> Clone for Slice<T> {
- fn clone(&self) -> Slice<T> { *self }
-}
-
/// The representation of a trait object like `&SomeTrait`.
///
/// This struct has the same layout as types like `&SomeTrait` and
@@ -154,22 +97,3 @@ pub struct TraitObject {
pub data: *mut (),
pub vtable: *mut (),
}
-
-/// This trait is meant to map equivalences between raw structs and their
-/// corresponding rust values.
-#[rustc_deprecated(reason = "use raw accessors/constructors in `slice` module",
- since = "1.9.0")]
-#[unstable(feature = "raw", issue = "27751")]
-pub unsafe trait Repr<T> {
- /// This function "unwraps" a rust value (without consuming it) into its raw
- /// struct representation. This can be used to read/write different values
- /// for the struct. This is a safe method because by default it does not
- /// enable write-access to the fields of the return value in safe code.
- #[inline]
- fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
-}
-
-#[allow(deprecated)]
-unsafe impl<T> Repr<Slice<T>> for [T] {}
-#[allow(deprecated)]
-unsafe impl Repr<Slice<u8>> for str {}
diff --git a/libcore/slice.rs b/libcore/slice.rs
index 07f76ac..b6ae6fd 100644
--- a/libcore/slice.rs
+++ b/libcore/slice.rs
@@ -106,6 +106,10 @@ pub trait SliceExt {
#[stable(feature = "core", since = "1.6.0")]
fn binary_search_by<F>(&self, f: F) -> Result<usize, usize>
where F: FnMut(&Self::Item) -> Ordering;
+ #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
+ fn binary_search_by_key<B, F>(&self, b: &B, f: F) -> Result<usize, usize>
+ where F: FnMut(&Self::Item) -> B,
+ B: Ord;
#[stable(feature = "core", since = "1.6.0")]
fn len(&self) -> usize;
#[stable(feature = "core", since = "1.6.0")]
@@ -157,11 +161,6 @@ pub trait SliceExt {
fn clone_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Clone;
#[stable(feature = "copy_from_slice", since = "1.9.0")]
fn copy_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Copy;
-
- #[unstable(feature = "slice_binary_search_by_key", reason = "recently added", issue = "33018")]
- fn binary_search_by_key<B, F>(&self, b: &B, f: F) -> Result<usize, usize>
- where F: FnMut(&Self::Item) -> B,
- B: Ord;
}
// Use macros to be generic over const/mut
@@ -523,6 +522,7 @@ impl<T> SliceExt for [T] {
}
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<usize> for [T] {
type Output = T;
@@ -533,6 +533,7 @@ impl<T> ops::Index<usize> for [T] {
}
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<usize> for [T] {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut T {
@@ -553,7 +554,6 @@ fn slice_index_order_fail(index: usize, end: usize) -> ! {
panic!("slice index starts at {} but ends at {}", index, end);
}
-// FIXME implement indexing with inclusive ranges
/// Implements slicing with syntax `&self[begin .. end]`.
///
@@ -566,6 +566,7 @@ fn slice_index_order_fail(index: usize, end: usize) -> ! {
/// Requires that `begin <= end` and `end <= self.len()`,
/// otherwise slicing will panic.
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<ops::Range<usize>> for [T] {
type Output = [T];
@@ -592,6 +593,7 @@ impl<T> ops::Index<ops::Range<usize>> for [T] {
///
/// Equivalent to `&self[0 .. end]`
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<ops::RangeTo<usize>> for [T] {
type Output = [T];
@@ -607,6 +609,7 @@ impl<T> ops::Index<ops::RangeTo<usize>> for [T] {
///
/// Equivalent to `&self[begin .. self.len()]`
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<ops::RangeFrom<usize>> for [T] {
type Output = [T];
@@ -618,7 +621,7 @@ impl<T> ops::Index<ops::RangeFrom<usize>> for [T] {
/// Implements slicing with syntax `&self[..]`.
///
-/// Returns a slice of the whole slice. This operation can not panic.
+/// Returns a slice of the whole slice. This operation cannot panic.
///
/// Equivalent to `&self[0 .. self.len()]`
#[stable(feature = "rust1", since = "1.0.0")]
@@ -632,6 +635,7 @@ impl<T> ops::Index<RangeFull> for [T] {
}
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<ops::RangeInclusive<usize>> for [T] {
type Output = [T];
@@ -647,6 +651,7 @@ impl<T> ops::Index<ops::RangeInclusive<usize>> for [T] {
}
}
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::Index<ops::RangeToInclusive<usize>> for [T] {
type Output = [T];
@@ -667,6 +672,7 @@ impl<T> ops::Index<ops::RangeToInclusive<usize>> for [T] {
/// Requires that `begin <= end` and `end <= self.len()`,
/// otherwise slicing will panic.
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<ops::Range<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: ops::Range<usize>) -> &mut [T] {
@@ -691,6 +697,7 @@ impl<T> ops::IndexMut<ops::Range<usize>> for [T] {
///
/// Equivalent to `&mut self[0 .. end]`
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<ops::RangeTo<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut [T] {
@@ -704,6 +711,7 @@ impl<T> ops::IndexMut<ops::RangeTo<usize>> for [T] {
///
/// Equivalent to `&mut self[begin .. self.len()]`
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<ops::RangeFrom<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut [T] {
@@ -726,6 +734,7 @@ impl<T> ops::IndexMut<RangeFull> for [T] {
}
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut [T] {
@@ -739,6 +748,7 @@ impl<T> ops::IndexMut<ops::RangeInclusive<usize>> for [T] {
}
}
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
+#[rustc_on_unimplemented = "slice indices are of type `usize`"]
impl<T> ops::IndexMut<ops::RangeToInclusive<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut [T] {
@@ -1820,6 +1830,9 @@ impl<A> SlicePartialEq<A> for [A]
if self.len() != other.len() {
return false;
}
+ if self.as_ptr() == other.as_ptr() {
+ return true;
+ }
unsafe {
let size = mem::size_of_val(self);
memcmp(self.as_ptr() as *const u8,
@@ -1929,4 +1942,3 @@ macro_rules! impl_marker_for {
impl_marker_for!(BytewiseEquality,
u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool);
-
diff --git a/libcore/str/mod.rs b/libcore/str/mod.rs
index 2c34caf..5fc15fa 100644
--- a/libcore/str/mod.rs
+++ b/libcore/str/mod.rs
@@ -17,7 +17,7 @@
use self::pattern::Pattern;
use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
-use char::{self, CharExt};
+use char;
use clone::Clone;
use convert::AsRef;
use default::Default;
@@ -354,7 +354,7 @@ fn unwrap_or_0(opt: Option<&u8>) -> u8 {
/// UTF-8-like encoding).
#[unstable(feature = "str_internals", issue = "0")]
#[inline]
-pub fn next_code_point(bytes: &mut slice::Iter<u8>) -> Option<u32> {
+pub fn next_code_point<'a, I: Iterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
// Decode UTF-8
let x = match bytes.next() {
None => return None,
@@ -388,7 +388,8 @@ pub fn next_code_point(bytes: &mut slice::Iter<u8>) -> Option<u32> {
/// Reads the last code point out of a byte iterator (assuming a
/// UTF-8-like encoding).
#[inline]
-fn next_code_point_reverse(bytes: &mut slice::Iter<u8>) -> Option<u32> {
+fn next_code_point_reverse<'a,
+ I: DoubleEndedIterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
// Decode UTF-8
let w = match bytes.next_back() {
None => return None,
@@ -1663,40 +1664,6 @@ pub trait StrExt {
where P::Searcher: ReverseSearcher<'a>;
#[stable(feature = "is_char_boundary", since = "1.9.0")]
fn is_char_boundary(&self, index: usize) -> bool;
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at() or eventually \
- removed altogether",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars() plus len_utf8",
- since = "1.9.0")]
- fn char_range_at(&self, start: usize) -> CharRange;
- #[unstable(feature = "str_char",
- reason = "often replaced by char_indices, this method may \
- be removed in favor of just char_at_reverse() or \
- eventually removed altogether",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars().rev() plus len_utf8",
- since = "1.9.0")]
- fn char_range_at_reverse(&self, start: usize) -> CharRange;
- #[unstable(feature = "str_char",
- reason = "frequently replaced by the chars() iterator, this \
- method may be removed or possibly renamed in the \
- future; it is normally replaced by chars/char_indices \
- iterators or by getting the first char from a \
- subslice",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars()",
- since = "1.9.0")]
- fn char_at(&self, i: usize) -> char;
- #[unstable(feature = "str_char",
- reason = "see char_at for more details, but reverse semantics \
- are also somewhat unclear, especially with which \
- cases generate panics",
- issue = "27754")]
- #[rustc_deprecated(reason = "use slicing plus chars().rev()",
- since = "1.9.0")]
- fn char_at_reverse(&self, i: usize) -> char;
#[stable(feature = "core", since = "1.6.0")]
fn as_bytes(&self) -> &[u8];
#[stable(feature = "core", since = "1.6.0")]
@@ -1709,14 +1676,6 @@ pub trait StrExt {
fn split_at(&self, mid: usize) -> (&str, &str);
#[stable(feature = "core", since = "1.6.0")]
fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str);
- #[unstable(feature = "str_char",
- reason = "awaiting conventions about shifting and slices and \
- may not be warranted with the existence of the chars \
- and/or char_indices iterators",
- issue = "27754")]
- #[rustc_deprecated(reason = "use chars() plus Chars::as_str",
- since = "1.9.0")]
- fn slice_shift_char(&self) -> Option<(char, &str)>;
#[stable(feature = "core", since = "1.6.0")]
fn as_ptr(&self) -> *const u8;
#[stable(feature = "core", since = "1.6.0")]
@@ -1946,55 +1905,6 @@ impl StrExt for str {
}
#[inline]
- fn char_range_at(&self, i: usize) -> CharRange {
- let (c, n) = char_range_at_raw(self.as_bytes(), i);
- CharRange { ch: unsafe { char::from_u32_unchecked(c) }, next: n }
- }
-
- #[inline]
- fn char_range_at_reverse(&self, start: usize) -> CharRange {
- let mut prev = start;
-
- prev = prev.saturating_sub(1);
- if self.as_bytes()[prev] < 128 {
- return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
- }
-
- // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
- fn multibyte_char_range_at_reverse(s: &str, mut i: usize) -> CharRange {
- // while there is a previous byte == 10......
- while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
- i -= 1;
- }
-
- let first= s.as_bytes()[i];
- let w = UTF8_CHAR_WIDTH[first as usize];
- assert!(w != 0);
-
- let mut val = utf8_first_byte(first, w as u32);
- val = utf8_acc_cont_byte(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 3]); }
-
- CharRange {ch: unsafe { char::from_u32_unchecked(val) }, next: i}
- }
-
- multibyte_char_range_at_reverse(self, prev)
- }
-
- #[inline]
- #[allow(deprecated)]
- fn char_at(&self, i: usize) -> char {
- self.char_range_at(i).ch
- }
-
- #[inline]
- #[allow(deprecated)]
- fn char_at_reverse(&self, i: usize) -> char {
- self.char_range_at_reverse(i).ch
- }
-
- #[inline]
fn as_bytes(&self) -> &[u8] {
unsafe { mem::transmute(self) }
}
@@ -2041,18 +1951,6 @@ impl StrExt for str {
}
#[inline]
- #[allow(deprecated)]
- fn slice_shift_char(&self) -> Option<(char, &str)> {
- if self.is_empty() {
- None
- } else {
- let ch = self.char_at(0);
- let next_s = unsafe { self.slice_unchecked(ch.len_utf8(), self.len()) };
- Some((ch, next_s))
- }
- }
-
- #[inline]
fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
@@ -2077,31 +1975,6 @@ impl AsRef<[u8]> for str {
}
}
-/// Pluck a code point out of a UTF-8-like byte slice and return the
-/// index of the next code point.
-#[inline]
-fn char_range_at_raw(bytes: &[u8], i: usize) -> (u32, usize) {
- if bytes[i] < 128 {
- return (bytes[i] as u32, i + 1);
- }
-
- // Multibyte case is a fn to allow char_range_at to inline cleanly
- fn multibyte_char_range_at(bytes: &[u8], i: usize) -> (u32, usize) {
- let first = bytes[i];
- let w = UTF8_CHAR_WIDTH[first as usize];
- assert!(w != 0);
-
- let mut val = utf8_first_byte(first, w as u32);
- val = utf8_acc_cont_byte(val, bytes[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte(val, bytes[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte(val, bytes[i + 3]); }
-
- (val, i + w as usize)
- }
-
- multibyte_char_range_at(bytes, i)
-}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Default for &'a str {
fn default() -> &'a str { "" }
diff --git a/libcore/sync/atomic.rs b/libcore/sync/atomic.rs
index e74dc08..658b131 100644
--- a/libcore/sync/atomic.rs
+++ b/libcore/sync/atomic.rs
@@ -26,8 +26,9 @@
//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
//!
//! Atomic variables are safe to share between threads (they implement `Sync`)
-//! but they do not themselves provide the mechanism for sharing. The most
-//! common way to share an atomic variable is to put it into an `Arc` (an
+//! but they do not themselves provide the mechanism for sharing and follow the
+//! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
+//! The most common way to share an atomic variable is to put it into an `Arc` (an
//! atomically-reference-counted shared pointer).
//!
//! Most atomic types may be stored in static variables, initialized using
@@ -48,12 +49,16 @@
//! let spinlock = Arc::new(AtomicUsize::new(1));
//!
//! let spinlock_clone = spinlock.clone();
-//! thread::spawn(move|| {
+//! let thread = thread::spawn(move|| {
//! spinlock_clone.store(0, Ordering::SeqCst);
//! });
//!
//! // Wait for the other thread to release the lock
//! while spinlock.load(Ordering::SeqCst) != 0 {}
+//!
+//! if let Err(panic) = thread.join() {
+//! println!("Thread had an error: {:?}", panic);
+//! }
//! }
//! ```
//!
@@ -83,62 +88,33 @@ use default::Default;
use fmt;
/// A boolean type which can be safely shared between threads.
+#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicBool {
- v: UnsafeCell<usize>,
+ v: UnsafeCell<u8>,
}
+#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for AtomicBool {
fn default() -> Self {
- Self::new(Default::default())
+ Self::new(false)
}
}
// Send is implicitly implemented for AtomicBool.
+#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl Sync for AtomicBool {}
-/// A signed integer type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicIsize {
- v: UnsafeCell<isize>,
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Default for AtomicIsize {
- fn default() -> Self {
- Self::new(Default::default())
- }
-}
-
-// Send is implicitly implemented for AtomicIsize.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Sync for AtomicIsize {}
-
-/// An unsigned integer type which can be safely shared between threads.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct AtomicUsize {
- v: UnsafeCell<usize>,
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Default for AtomicUsize {
- fn default() -> Self {
- Self::new(Default::default())
- }
-}
-
-// Send is implicitly implemented for AtomicUsize.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Sync for AtomicUsize {}
-
/// A raw pointer type which can be safely shared between threads.
+#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicPtr<T> {
p: UnsafeCell<*mut T>,
}
+#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for AtomicPtr<T> {
fn default() -> AtomicPtr<T> {
@@ -146,8 +122,10 @@ impl<T> Default for AtomicPtr<T> {
}
}
+#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T> Send for AtomicPtr<T> {}
+#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T> Sync for AtomicPtr<T> {}
@@ -169,13 +147,13 @@ pub enum Ordering {
#[stable(feature = "rust1", since = "1.0.0")]
Relaxed,
/// When coupled with a store, all previous writes become visible
- /// to another thread that performs a load with `Acquire` ordering
+ /// to the other threads that perform a load with `Acquire` ordering
/// on the same value.
#[stable(feature = "rust1", since = "1.0.0")]
Release,
/// When coupled with a load, all subsequent loads will see data
/// written before a store with `Release` ordering on the same value
- /// in another thread.
+ /// in other threads.
#[stable(feature = "rust1", since = "1.0.0")]
Acquire,
/// When coupled with a load, uses `Acquire` ordering, and with a store
@@ -189,18 +167,11 @@ pub enum Ordering {
}
/// An `AtomicBool` initialized to `false`.
+#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
-/// An `AtomicIsize` initialized to `0`.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
-/// An `AtomicUsize` initialized to `0`.
-#[stable(feature = "rust1", since = "1.0.0")]
-pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
-
-// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
-const UINT_TRUE: usize = !0;
+#[cfg(target_has_atomic = "8")]
impl AtomicBool {
/// Creates a new `AtomicBool`.
///
@@ -215,7 +186,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn new(v: bool) -> AtomicBool {
- AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
+ AtomicBool { v: UnsafeCell::new(v as u8) }
}
/// Loads a value from the bool.
@@ -238,7 +209,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn load(&self, order: Ordering) -> bool {
- unsafe { atomic_load(self.v.get(), order) > 0 }
+ unsafe { atomic_load(self.v.get(), order) != 0 }
}
/// Stores a value into the bool.
@@ -262,9 +233,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn store(&self, val: bool, order: Ordering) {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_store(self.v.get(), val, order); }
+ unsafe { atomic_store(self.v.get(), val as u8, order); }
}
/// Stores a value into the bool, returning the old value.
@@ -284,9 +253,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_swap(self.v.get(), val, order) > 0 }
+ unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
}
/// Stores a value into the `bool` if the current value is the same as the `current` value.
@@ -332,7 +299,6 @@ impl AtomicBool {
/// # Examples
///
/// ```
- /// # #![feature(extended_compare_and_swap)]
/// use std::sync::atomic::{AtomicBool, Ordering};
///
/// let some_bool = AtomicBool::new(true);
@@ -351,18 +317,16 @@ impl AtomicBool {
/// assert_eq!(some_bool.load(Ordering::Relaxed), false);
/// ```
#[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
pub fn compare_exchange(&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering) -> Result<bool, bool> {
- let current = if current { UINT_TRUE } else { 0 };
- let new = if new { UINT_TRUE } else { 0 };
-
- match unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } {
- Ok(x) => Ok(x > 0),
- Err(x) => Err(x > 0),
+ match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8,
+ success, failure) } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
}
}
@@ -382,7 +346,6 @@ impl AtomicBool {
/// # Examples
///
/// ```
- /// # #![feature(extended_compare_and_swap)]
/// use std::sync::atomic::{AtomicBool, Ordering};
///
/// let val = AtomicBool::new(false);
@@ -397,19 +360,16 @@ impl AtomicBool {
/// }
/// ```
#[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
pub fn compare_exchange_weak(&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering) -> Result<bool, bool> {
- let current = if current { UINT_TRUE } else { 0 };
- let new = if new { UINT_TRUE } else { 0 };
-
- match unsafe { atomic_compare_exchange_weak(self.v.get(), current, new,
+ match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8,
success, failure) } {
- Ok(x) => Ok(x > 0),
- Err(x) => Err(x > 0),
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
}
}
@@ -440,9 +400,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_and(self.v.get(), val, order) > 0 }
+ unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
}
/// Logical "nand" with a boolean value.
@@ -473,9 +431,20 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_nand(self.v.get(), val, order) > 0 }
+ // We can't use atomic_nand here because it can result in a bool with
+ // an invalid value. This happens because the atomic operation is done
+ // with an 8-bit integer internally, which would set the upper 7 bits.
+ // So we just use a compare-exchange loop instead, which is what the
+ // intrinsic actually expands to anyways on many platforms.
+ let mut old = self.load(Relaxed);
+ loop {
+ let new = !(old && val);
+ match self.compare_exchange_weak(old, new, order, Relaxed) {
+ Ok(_) => break,
+ Err(x) => old = x,
+ }
+ }
+ old
}
/// Logical "or" with a boolean value.
@@ -505,9 +474,7 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_or(self.v.get(), val, order) > 0 }
+ unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
}
/// Logical "xor" with a boolean value.
@@ -537,563 +504,11 @@ impl AtomicBool {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
- let val = if val { UINT_TRUE } else { 0 };
-
- unsafe { atomic_xor(self.v.get(), val, order) > 0 }
- }
-}
-
-impl AtomicIsize {
- /// Creates a new `AtomicIsize`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicIsize;
- ///
- /// let atomic_forty_two = AtomicIsize::new(42);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(v: isize) -> AtomicIsize {
- AtomicIsize {v: UnsafeCell::new(v)}
- }
-
- /// Loads a value from the isize.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> isize {
- unsafe { atomic_load(self.v.get(), order) }
- }
-
- /// Stores a value into the isize.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// some_isize.store(10, Ordering::Relaxed);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, val: isize, order: Ordering) {
- unsafe { atomic_store(self.v.get(), val, order); }
- }
-
- /// Stores a value into the isize, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_swap(self.v.get(), val, order) }
- }
-
- /// Stores a value into the `isize` if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
- match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
- Ok(x) => x,
- Err(x) => x,
- }
- }
-
- /// Stores a value into the `isize` if the current value is the same as the `current` value.
- ///
- /// The return value is a result indicating whether the new value was written and containing
- /// the previous value. On success this value is guaranteed to be equal to `new`.
- ///
- /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
- /// operation. The first describes the required ordering if the operation succeeds while the
- /// second describes the required ordering when the operation fails. The failure ordering can't
- /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![feature(extended_compare_and_swap)]
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let some_isize = AtomicIsize::new(5);
- ///
- /// assert_eq!(some_isize.compare_exchange(5, 10,
- /// Ordering::Acquire,
- /// Ordering::Relaxed),
- /// Ok(5));
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_isize.compare_exchange(6, 12,
- /// Ordering::SeqCst,
- /// Ordering::Acquire),
- /// Err(10));
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
- pub fn compare_exchange(&self,
- current: isize,
- new: isize,
- success: Ordering,
- failure: Ordering) -> Result<isize, isize> {
- unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
- }
-
- /// Stores a value into the `isize` if the current value is the same as the `current` value.
- ///
- /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
- /// comparison succeeds, which can result in more efficient code on some platforms. The
- /// return value is a result indicating whether the new value was written and containing the
- /// previous value.
- ///
- /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
- /// ordering of this operation. The first describes the required ordering if the operation
- /// succeeds while the second describes the required ordering when the operation fails. The
- /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
- /// success ordering.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![feature(extended_compare_and_swap)]
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let val = AtomicIsize::new(4);
- ///
- /// let mut old = val.load(Ordering::Relaxed);
- /// loop {
- /// let new = old * 2;
- /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
- /// Ok(_) => break,
- /// Err(x) => old = x,
- /// }
- /// }
- /// ```
- #[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
- pub fn compare_exchange_weak(&self,
- current: isize,
- new: isize,
- success: Ordering,
- failure: Ordering) -> Result<isize, isize> {
- unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
- }
-
- /// Add an isize to the current value, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0);
- /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_add(self.v.get(), val, order) }
- }
-
- /// Subtract an isize from the current value, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0);
- /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), -10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_sub(self.v.get(), val, order) }
- }
-
- /// Bitwise and with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_and(self.v.get(), val, order) }
- }
-
- /// Bitwise or with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_or(self.v.get(), val, order) }
- }
-
- /// Bitwise xor with the current isize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicIsize, Ordering};
- ///
- /// let foo = AtomicIsize::new(0b101101);
- /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
- unsafe { atomic_xor(self.v.get(), val, order) }
- }
-}
-
-impl AtomicUsize {
- /// Creates a new `AtomicUsize`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::AtomicUsize;
- ///
- /// let atomic_forty_two = AtomicUsize::new(42);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub const fn new(v: usize) -> AtomicUsize {
- AtomicUsize { v: UnsafeCell::new(v) }
- }
-
- /// Loads a value from the usize.
- ///
- /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Release` or `AcqRel`.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn load(&self, order: Ordering) -> usize {
- unsafe { atomic_load(self.v.get(), order) }
- }
-
- /// Stores a value into the usize.
- ///
- /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// some_usize.store(10, Ordering::Relaxed);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- ///
- /// # Panics
- ///
- /// Panics if `order` is `Acquire` or `AcqRel`.
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn store(&self, val: usize, order: Ordering) {
- unsafe { atomic_store(self.v.get(), val, order); }
- }
-
- /// Stores a value into the usize, returning the old value.
- ///
- /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn swap(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_swap(self.v.get(), val, order) }
- }
-
- /// Stores a value into the `usize` if the current value is the same as the `current` value.
- ///
- /// The return value is always the previous value. If it is equal to `current`, then the value
- /// was updated.
- ///
- /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
- /// this operation.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_usize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
- /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
- match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
- Ok(x) => x,
- Err(x) => x,
- }
- }
-
- /// Stores a value into the `usize` if the current value is the same as the `current` value.
- ///
- /// The return value is a result indicating whether the new value was written and containing
- /// the previous value. On success this value is guaranteed to be equal to `new`.
- ///
- /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
- /// operation. The first describes the required ordering if the operation succeeds while the
- /// second describes the required ordering when the operation fails. The failure ordering can't
- /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![feature(extended_compare_and_swap)]
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let some_isize = AtomicUsize::new(5);
- ///
- /// assert_eq!(some_isize.compare_exchange(5, 10,
- /// Ordering::Acquire,
- /// Ordering::Relaxed),
- /// Ok(5));
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- ///
- /// assert_eq!(some_isize.compare_exchange(6, 12,
- /// Ordering::SeqCst,
- /// Ordering::Acquire),
- /// Err(10));
- /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
- /// ```
- #[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
- pub fn compare_exchange(&self,
- current: usize,
- new: usize,
- success: Ordering,
- failure: Ordering) -> Result<usize, usize> {
- unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
- }
-
- /// Stores a value into the `usize` if the current value is the same as the `current` value.
- ///
- /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
- /// comparison succeeds, which can result in more efficient code on some platforms. The
- /// return value is a result indicating whether the new value was written and containing the
- /// previous value.
- ///
- /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
- /// ordering of this operation. The first describes the required ordering if the operation
- /// succeeds while the second describes the required ordering when the operation fails. The
- /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
- /// success ordering.
- ///
- /// # Examples
- ///
- /// ```
- /// # #![feature(extended_compare_and_swap)]
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let val = AtomicUsize::new(4);
- ///
- /// let mut old = val.load(Ordering::Relaxed);
- /// loop {
- /// let new = old * 2;
- /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
- /// Ok(_) => break,
- /// Err(x) => old = x,
- /// }
- /// }
- /// ```
- #[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
- pub fn compare_exchange_weak(&self,
- current: usize,
- new: usize,
- success: Ordering,
- failure: Ordering) -> Result<usize, usize> {
- unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
- }
-
- /// Add to the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0);
- /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
- /// assert_eq!(foo.load(Ordering::SeqCst), 10);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_add(self.v.get(), val, order) }
- }
-
- /// Subtract from the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(10);
- /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0);
- /// ```
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_sub(self.v.get(), val, order) }
- }
-
- /// Bitwise and with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_and(self.v.get(), val, order) }
- }
-
- /// Bitwise or with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_or(self.v.get(), val, order) }
- }
-
- /// Bitwise xor with the current usize, returning the previous value.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::sync::atomic::{AtomicUsize, Ordering};
- ///
- /// let foo = AtomicUsize::new(0b101101);
- /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
- /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
- #[inline]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
- unsafe { atomic_xor(self.v.get(), val, order) }
+ unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
}
}
+#[cfg(target_has_atomic = "ptr")]
impl<T> AtomicPtr<T> {
/// Creates a new `AtomicPtr`.
///
@@ -1228,7 +643,6 @@ impl<T> AtomicPtr<T> {
/// # Examples
///
/// ```
- /// # #![feature(extended_compare_and_swap)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
@@ -1241,7 +655,7 @@ impl<T> AtomicPtr<T> {
/// Ordering::SeqCst, Ordering::Relaxed);
/// ```
#[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
pub fn compare_exchange(&self,
current: *mut T,
new: *mut T,
@@ -1276,7 +690,6 @@ impl<T> AtomicPtr<T> {
/// # Examples
///
/// ```
- /// # #![feature(extended_compare_and_swap)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let some_ptr = AtomicPtr::new(&mut 5);
@@ -1291,7 +704,7 @@ impl<T> AtomicPtr<T> {
/// }
/// ```
#[inline]
- #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
pub fn compare_exchange_weak(&self,
current: *mut T,
new: *mut T,
@@ -1311,6 +724,403 @@ impl<T> AtomicPtr<T> {
}
}
+macro_rules! atomic_int {
+ ($stable:meta,
+ $stable_cxchg:meta,
+ $stable_debug:meta,
+ $int_type:ident $atomic_type:ident $atomic_init:ident) => {
+ /// An integer type which can be safely shared between threads.
+ #[$stable]
+ pub struct $atomic_type {
+ v: UnsafeCell<$int_type>,
+ }
+
+ /// An atomic integer initialized to `0`.
+ #[$stable]
+ pub const $atomic_init: $atomic_type = $atomic_type::new(0);
+
+ #[$stable]
+ impl Default for $atomic_type {
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+ }
+
+ #[$stable_debug]
+ impl fmt::Debug for $atomic_type {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple(stringify!($atomic_type))
+ .field(&self.load(Ordering::SeqCst))
+ .finish()
+ }
+ }
+
+ // Send is implicitly implemented.
+ #[$stable]
+ unsafe impl Sync for $atomic_type {}
+
+ impl $atomic_type {
+ /// Creates a new atomic integer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicIsize;
+ ///
+ /// let atomic_forty_two = AtomicIsize::new(42);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub const fn new(v: $int_type) -> Self {
+ $atomic_type {v: UnsafeCell::new(v)}
+ }
+
+ /// Loads a value from the atomic integer.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub fn load(&self, order: Ordering) -> $int_type {
+ unsafe { atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the atomic integer.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// some_isize.store(10, Ordering::Relaxed);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ #[$stable]
+ pub fn store(&self, val: $int_type, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+
+ /// Stores a value into the atomic integer, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering of this
+ /// operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as the
+ /// `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the
+ /// value was updated.
+ ///
+ /// `compare_and_swap` also takes an `Ordering` argument which describes the memory
+ /// ordering of this operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub fn compare_and_swap(&self,
+ current: $int_type,
+ new: $int_type,
+ order: Ordering) -> $int_type {
+ match self.compare_exchange(current,
+ new,
+ order,
+ strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as the
+ /// `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and
+ /// containing the previous value. On success this value is guaranteed to be equal to
+ /// `new`.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of
+ /// this operation. The first describes the required ordering if the operation succeeds
+ /// while the second describes the required ordering when the operation fails. The
+ /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker
+ /// than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let some_isize = AtomicIsize::new(5);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(5, 10,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// Ok(5));
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ ///
+ /// assert_eq!(some_isize.compare_exchange(6, 12,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// Err(10));
+ /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
+ /// ```
+ #[inline]
+ #[$stable_cxchg]
+ pub fn compare_exchange(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the atomic integer if the current value is the same as the
+ /// `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// return value is a result indicating whether the new value was written and containing
+ /// the previous value.
+ ///
+ /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the
+ /// operation succeeds while the second describes the required ordering when the
+ /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be
+ /// equivalent or weaker than the success ordering.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let val = AtomicIsize::new(4);
+ ///
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// let new = old * 2;
+ /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[$stable_cxchg]
+ pub fn compare_exchange_weak(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
+ }
+ }
+
+ /// Add to the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0);
+ /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 10);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0);
+ /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), -10);
+ /// ```
+ #[inline]
+ #[$stable]
+ pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
+ #[inline]
+ #[$stable]
+ pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
+ #[inline]
+ #[$stable]
+ pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicIsize, Ordering};
+ ///
+ /// let foo = AtomicIsize::new(0b101101);
+ /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
+ /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
+ #[inline]
+ #[$stable]
+ pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
+ unsafe { atomic_xor(self.v.get(), val, order) }
+ }
+ }
+ }
+}
+
+#[cfg(target_has_atomic = "8")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ i8 AtomicI8 ATOMIC_I8_INIT
+}
+#[cfg(target_has_atomic = "8")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ u8 AtomicU8 ATOMIC_U8_INIT
+}
+#[cfg(target_has_atomic = "16")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ i16 AtomicI16 ATOMIC_I16_INIT
+}
+#[cfg(target_has_atomic = "16")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ u16 AtomicU16 ATOMIC_U16_INIT
+}
+#[cfg(target_has_atomic = "32")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ i32 AtomicI32 ATOMIC_I32_INIT
+}
+#[cfg(target_has_atomic = "32")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ u32 AtomicU32 ATOMIC_U32_INIT
+}
+#[cfg(target_has_atomic = "64")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ i64 AtomicI64 ATOMIC_I64_INIT
+}
+#[cfg(target_has_atomic = "64")]
+atomic_int! {
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ u64 AtomicU64 ATOMIC_U64_INIT
+}
+#[cfg(target_has_atomic = "ptr")]
+atomic_int!{
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ isize AtomicIsize ATOMIC_ISIZE_INIT
+}
+#[cfg(target_has_atomic = "ptr")]
+atomic_int!{
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ usize AtomicUsize ATOMIC_USIZE_INIT
+}
+
#[inline]
fn strongest_failure_ordering(order: Ordering) -> Ordering {
match order {
@@ -1445,18 +1255,6 @@ unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
}
#[inline]
-unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
- match order {
- Acquire => intrinsics::atomic_nand_acq(dst, val),
- Release => intrinsics::atomic_nand_rel(dst, val),
- AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
- Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
- SeqCst => intrinsics::atomic_nand(dst, val)
- }
-}
-
-
-#[inline]
unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_or_acq(dst, val),
@@ -1467,7 +1265,6 @@ unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
}
}
-
#[inline]
unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
@@ -1479,7 +1276,6 @@ unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
}
}
-
/// An atomic fence.
///
/// A fence 'A' which has `Release` ordering semantics, synchronizes with a
@@ -1514,19 +1310,16 @@ pub fn fence(order: Ordering) {
}
}
-macro_rules! impl_Debug {
- ($($t:ident)*) => ($(
- #[stable(feature = "atomic_debug", since = "1.3.0")]
- impl fmt::Debug for $t {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
- }
- }
- )*);
-}
-impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
+#[cfg(target_has_atomic = "8")]
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl fmt::Debug for AtomicBool {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
+ }
+}
+#[cfg(target_has_atomic = "ptr")]
#[stable(feature = "atomic_debug", since = "1.3.0")]
impl<T> fmt::Debug for AtomicPtr<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {