diff options
| author | pravic <[email protected]> | 2016-04-12 17:44:14 +0300 |
|---|---|---|
| committer | pravic <[email protected]> | 2016-04-12 17:44:14 +0300 |
| commit | a3395a455b76a1a3b3dd232bf57c00eb1f485863 (patch) | |
| tree | 0299908d09b43e038a1c2c51ccef6496185020c7 /liballoc | |
| parent | add cargo profile (diff) | |
| download | kmd-env-rs-a3395a455b76a1a3b3dd232bf57c00eb1f485863.tar.xz kmd-env-rs-a3395a455b76a1a3b3dd232bf57c00eb1f485863.zip | |
liballoc
Diffstat (limited to 'liballoc')
| -rw-r--r-- | liballoc/Cargo.toml | 12 | ||||
| -rw-r--r-- | liballoc/arc.rs | 1209 | ||||
| -rw-r--r-- | liballoc/boxed.rs | 643 | ||||
| -rw-r--r-- | liballoc/boxed_test.rs | 119 | ||||
| -rw-r--r-- | liballoc/heap.rs | 190 | ||||
| -rw-r--r-- | liballoc/lib.rs | 127 | ||||
| -rw-r--r-- | liballoc/oom.rs | 42 | ||||
| -rw-r--r-- | liballoc/raw_vec.rs | 629 | ||||
| -rw-r--r-- | liballoc/rc.rs | 1169 |
9 files changed, 4140 insertions, 0 deletions
diff --git a/liballoc/Cargo.toml b/liballoc/Cargo.toml new file mode 100644 index 0000000..5da0f1a --- /dev/null +++ b/liballoc/Cargo.toml @@ -0,0 +1,12 @@ +[package] +authors = ["The Rust Project Developers"] +name = "alloc" +version = "0.0.0" + +[lib] +name = "alloc" +path = "lib.rs" +test = false + +[dependencies] +core = { path = "../libcore" } diff --git a/liballoc/arc.rs b/liballoc/arc.rs new file mode 100644 index 0000000..4aba567 --- /dev/null +++ b/liballoc/arc.rs @@ -0,0 +1,1209 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![stable(feature = "rust1", since = "1.0.0")] + +//! Threadsafe reference-counted boxes (the `Arc<T>` type). +//! +//! The `Arc<T>` type provides shared ownership of an immutable value. +//! Destruction is deterministic, and will occur as soon as the last owner is +//! gone. It is marked as `Send` because it uses atomic reference counting. +//! +//! If you do not need thread-safety, and just need shared ownership, consider +//! the [`Rc<T>` type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but +//! does not use atomics, making it both thread-unsafe as well as significantly +//! faster when updating the reference count. +//! +//! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer +//! to the box. A `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but +//! will return `None` if the value has already been dropped. +//! +//! For example, a tree with parent pointers can be represented by putting the +//! nodes behind strong `Arc<T>` pointers, and then storing the parent pointers +//! as `Weak<T>` pointers. +//! +//! # Examples +//! +//! Sharing some immutable data between threads: +//! +//! ```no_run +//! use std::sync::Arc; +//! use std::thread; +//! +//! let five = Arc::new(5); +//! +//! for _ in 0..10 { +//! let five = five.clone(); +//! +//! thread::spawn(move || { +//! println!("{:?}", five); +//! }); +//! } +//! ``` +//! +//! Sharing mutable data safely between threads with a `Mutex`: +//! +//! ```no_run +//! use std::sync::{Arc, Mutex}; +//! use std::thread; +//! +//! let five = Arc::new(Mutex::new(5)); +//! +//! for _ in 0..10 { +//! let five = five.clone(); +//! +//! thread::spawn(move || { +//! let mut number = five.lock().unwrap(); +//! +//! *number += 1; +//! +//! println!("{}", *number); // prints 6 +//! }); +//! } +//! ``` + +use boxed::Box; + +use core::sync::atomic; +use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; +use core::borrow; +use core::fmt; +use core::cmp::Ordering; +use core::mem::{align_of_val, size_of_val}; +use core::intrinsics::abort; +use core::mem; +use core::mem::uninitialized; +use core::ops::Deref; +use core::ops::CoerceUnsized; +use core::ptr::{self, Shared}; +use core::marker::Unsize; +use core::hash::{Hash, Hasher}; +use core::{usize, isize}; +use core::convert::From; +use heap::deallocate; + +const MAX_REFCOUNT: usize = (isize::MAX) as usize; + +/// An atomically reference counted wrapper for shared state. +/// +/// # Examples +/// +/// In this example, a large vector is shared between several threads. +/// With simple pipes, without `Arc`, a copy would have to be made for each +/// thread. +/// +/// When you clone an `Arc<T>`, it will create another pointer to the data and +/// increase the reference counter. +/// +/// ``` +/// use std::sync::Arc; +/// use std::thread; +/// +/// fn main() { +/// let numbers: Vec<_> = (0..100).collect(); +/// let shared_numbers = Arc::new(numbers); +/// +/// for _ in 0..10 { +/// let child_numbers = shared_numbers.clone(); +/// +/// thread::spawn(move || { +/// let local_numbers = &child_numbers[..]; +/// +/// // Work with the local numbers +/// }); +/// } +/// } +/// ``` +#[unsafe_no_drop_flag] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Arc<T: ?Sized> { + ptr: Shared<ArcInner<T>>, +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {} + +/// A weak pointer to an `Arc`. +/// +/// Weak pointers will not keep the data inside of the `Arc` alive, and can be +/// used to break cycles between `Arc` pointers. +#[unsafe_no_drop_flag] +#[stable(feature = "arc_weak", since = "1.4.0")] +pub struct Weak<T: ?Sized> { + ptr: Shared<ArcInner<T>>, +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {} +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "(Weak)") + } +} + +struct ArcInner<T: ?Sized> { + strong: atomic::AtomicUsize, + + // the value usize::MAX acts as a sentinel for temporarily "locking" the + // ability to upgrade weak pointers or downgrade strong ones; this is used + // to avoid races in `make_mut` and `get_mut`. + weak: atomic::AtomicUsize, + + data: T, +} + +unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} +unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} + +impl<T> Arc<T> { + /// Constructs a new `Arc<T>`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new(data: T) -> Arc<T> { + // Start the weak pointer count as 1 which is the weak pointer that's + // held by all the strong pointers (kinda), see std/rc.rs for more info + let x: Box<_> = box ArcInner { + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), + data: data, + }; + Arc { ptr: unsafe { Shared::new(Box::into_raw(x)) } } + } + + /// Unwraps the contained value if the `Arc<T>` has exactly one strong reference. + /// + /// Otherwise, an `Err` is returned with the same `Arc<T>`. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new(3); + /// assert_eq!(Arc::try_unwrap(x), Ok(3)); + /// + /// let x = Arc::new(4); + /// let _y = x.clone(); + /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn try_unwrap(this: Self) -> Result<T, Self> { + // See `drop` for why all these atomics are like this + if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() { + return Err(this); + } + + atomic::fence(Acquire); + + unsafe { + let ptr = *this.ptr; + let elem = ptr::read(&(*ptr).data); + + // Make a weak pointer to clean up the implicit strong-weak reference + let _weak = Weak { ptr: this.ptr }; + mem::forget(this); + + Ok(elem) + } + } +} + +impl<T: ?Sized> Arc<T> { + /// Downgrades the `Arc<T>` to a `Weak<T>` reference. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// ``` + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn downgrade(this: &Self) -> Weak<T> { + // This Relaxed is OK because we're checking the value in the CAS + // below. + let mut cur = this.inner().weak.load(Relaxed); + + loop { + // check if the weak counter is currently "locked"; if so, spin. + if cur == usize::MAX { + cur = this.inner().weak.load(Relaxed); + continue; + } + + // NOTE: this code currently ignores the possibility of overflow + // into usize::MAX; in general both Rc and Arc need to be adjusted + // to deal with overflow. + + // Unlike with Clone(), we need this to be an Acquire read to + // synchronize with the write coming from `is_unique`, so that the + // events prior to that write happen before this read. + match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { + Ok(_) => return Weak { ptr: this.ptr }, + Err(old) => cur = old, + } + } + } + + /// Get the number of weak references to this value. + #[inline] + #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", + issue = "28356")] + pub fn weak_count(this: &Self) -> usize { + this.inner().weak.load(SeqCst) - 1 + } + + /// Get the number of strong references to this value. + #[inline] + #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", + issue = "28356")] + pub fn strong_count(this: &Self) -> usize { + this.inner().strong.load(SeqCst) + } + + #[inline] + fn inner(&self) -> &ArcInner<T> { + // This unsafety is ok because while this arc is alive we're guaranteed + // that the inner pointer is valid. Furthermore, we know that the + // `ArcInner` structure itself is `Sync` because the inner data is + // `Sync` as well, so we're ok loaning out an immutable pointer to these + // contents. + unsafe { &**self.ptr } + } + + // Non-inlined part of `drop`. + #[inline(never)] + unsafe fn drop_slow(&mut self) { + let ptr = *self.ptr; + + // Destroy the data at this time, even though we may not free the box + // allocation itself (there may still be weak pointers lying around). + ptr::drop_in_place(&mut (*ptr).data); + + if self.inner().weak.fetch_sub(1, Release) == 1 { + atomic::fence(Acquire); + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Clone for Arc<T> { + /// Makes a clone of the `Arc<T>`. + /// + /// This increases the strong reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five.clone(); + /// ``` + #[inline] + fn clone(&self) -> Arc<T> { + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let old_size = self.inner().strong.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone + // is `mem::forget`ing Arcs. If we don't do this the count can overflow + // and users will use-after free. We racily saturate to `isize::MAX` on + // the assumption that there aren't ~2 billion threads incrementing + // the reference count at once. This branch will never be taken in + // any realistic program. + // + // We abort because such a program is incredibly degenerate, and we + // don't care to support it. + if old_size > MAX_REFCOUNT { + unsafe { + abort(); + } + } + + Arc { ptr: self.ptr } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Deref for Arc<T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.inner().data + } +} + +impl<T: Clone> Arc<T> { + /// Make a mutable reference into the given `Arc<T>`. + /// If the `Arc<T>` has more than one strong reference, or any weak + /// references, the inner data is cloned. + /// + /// This is also referred to as a copy-on-write. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut data = Arc::new(5); + /// + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = data.clone(); // Won't clone inner data + /// *Arc::make_mut(&mut data) += 1; // Clones inner data + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// + /// // Note: data and other_data now point to different numbers + /// assert_eq!(*data, 8); + /// assert_eq!(*other_data, 12); + /// + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn make_mut(this: &mut Self) -> &mut T { + // Note that we hold both a strong reference and a weak reference. + // Thus, releasing our strong reference only will not, by itself, cause + // the memory to be deallocated. + // + // Use Acquire to ensure that we see any writes to `weak` that happen + // before release writes (i.e., decrements) to `strong`. Since we hold a + // weak count, there's no chance the ArcInner itself could be + // deallocated. + if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { + // Another strong pointer exists; clone + *this = Arc::new((**this).clone()); + } else if this.inner().weak.load(Relaxed) != 1 { + // Relaxed suffices in the above because this is fundamentally an + // optimization: we are always racing with weak pointers being + // dropped. Worst case, we end up allocated a new Arc unnecessarily. + + // We removed the last strong ref, but there are additional weak + // refs remaining. We'll move the contents to a new Arc, and + // invalidate the other weak refs. + + // Note that it is not possible for the read of `weak` to yield + // usize::MAX (i.e., locked), since the weak count can only be + // locked by a thread with a strong reference. + + // Materialize our own implicit weak pointer, so that it can clean + // up the ArcInner as needed. + let weak = Weak { ptr: this.ptr }; + + // mark the data itself as already deallocated + unsafe { + // there is no data race in the implicit write caused by `read` + // here (due to zeroing) because data is no longer accessed by + // other threads (due to there being no more strong refs at this + // point). + let mut swap = Arc::new(ptr::read(&(**weak.ptr).data)); + mem::swap(this, &mut swap); + mem::forget(swap); + } + } else { + // We were the sole reference of either kind; bump back up the + // strong ref count. + this.inner().strong.store(1, Release); + } + + // As with `get_mut()`, the unsafety is ok because our reference was + // either unique to begin with, or became one upon cloning the contents. + unsafe { + let inner = &mut **this.ptr; + &mut inner.data + } + } +} + +impl<T: ?Sized> Arc<T> { + /// Returns a mutable reference to the contained value if the `Arc<T>` has + /// one strong reference and no weak references. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut x = Arc::new(3); + /// *Arc::get_mut(&mut x).unwrap() = 4; + /// assert_eq!(*x, 4); + /// + /// let _y = x.clone(); + /// assert!(Arc::get_mut(&mut x).is_none()); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if this.is_unique() { + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. + unsafe { + let inner = &mut **this.ptr; + Some(&mut inner.data) + } + } else { + None + } + } + + /// Determine whether this is the unique reference (including weak refs) to + /// the underlying data. + /// + /// Note that this requires locking the weak ref count. + fn is_unique(&mut self) -> bool { + // lock the weak pointer count if we appear to be the sole weak pointer + // holder. + // + // The acquire label here ensures a happens-before relationship with any + // writes to `strong` prior to decrements of the `weak` count (via drop, + // which uses Release). + if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { + // Due to the previous acquire read, this will observe any writes to + // `strong` that were due to upgrading weak pointers; only strong + // clones remain, which require that the strong count is > 1 anyway. + let unique = self.inner().strong.load(Relaxed) == 1; + + // The release write here synchronizes with a read in `downgrade`, + // effectively preventing the above read of `strong` from happening + // after the write. + self.inner().weak.store(1, Release); // release the lock + unique + } else { + false + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Drop for Arc<T> { + /// Drops the `Arc<T>`. + /// + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak<T>` ones, + /// `drop`s the inner value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// { + /// let five = Arc::new(5); + /// + /// // stuff + /// + /// drop(five); // explicit drop + /// } + /// { + /// let five = Arc::new(5); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` + #[unsafe_destructor_blind_to_params] + #[inline] + fn drop(&mut self) { + // This structure has #[unsafe_no_drop_flag], so this drop glue may run + // more than once (but it is guaranteed to be zeroed after the first if + // it's run more than once) + let thin = *self.ptr as *const (); + + if thin as usize == mem::POST_DROP_USIZE { + return; + } + + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. + if self.inner().strong.fetch_sub(1, Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + atomic::fence(Acquire); + + unsafe { + self.drop_slow(); + } + } +} + +impl<T: ?Sized> Weak<T> { + /// Upgrades a weak reference to a strong reference. + /// + /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible. + /// + /// Returns `None` if there were no strong references and the data was + /// destroyed. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// + /// let strong_five: Option<Arc<_>> = weak_five.upgrade(); + /// ``` + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn upgrade(&self) -> Option<Arc<T>> { + // We use a CAS loop to increment the strong count instead of a + // fetch_add because once the count hits 0 it must never be above 0. + let inner = self.inner(); + + // Relaxed load because any write of 0 that we can observe + // leaves the field in a permanently zero state (so a + // "stale" read of 0 is fine), and any other value is + // confirmed via the CAS below. + let mut n = inner.strong.load(Relaxed); + + loop { + if n == 0 { + return None; + } + + // See comments in `Arc::clone` for why we do this (for `mem::forget`). + if n > MAX_REFCOUNT { + unsafe { abort(); } + } + + // Relaxed is valid for the same reason it is on Arc's Clone impl + match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { + Ok(_) => return Some(Arc { ptr: self.ptr }), + Err(old) => n = old, + } + } + } + + #[inline] + fn inner(&self) -> &ArcInner<T> { + // See comments above for why this is "safe" + unsafe { &**self.ptr } + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl<T: ?Sized> Clone for Weak<T> { + /// Makes a clone of the `Weak<T>`. + /// + /// This increases the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let weak_five = Arc::downgrade(&Arc::new(5)); + /// + /// weak_five.clone(); + /// ``` + #[inline] + fn clone(&self) -> Weak<T> { + // See comments in Arc::clone() for why this is relaxed. This can use a + // fetch_add (ignoring the lock) because the weak count is only locked + // where are *no other* weak pointers in existence. (So we can't be + // running this code in that case). + let old_size = self.inner().weak.fetch_add(1, Relaxed); + + // See comments in Arc::clone() for why we do this (for mem::forget). + if old_size > MAX_REFCOUNT { + unsafe { + abort(); + } + } + + return Weak { ptr: self.ptr }; + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl<T: ?Sized> Drop for Weak<T> { + /// Drops the `Weak<T>`. + /// + /// This will decrement the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// { + /// let five = Arc::new(5); + /// let weak_five = Arc::downgrade(&five); + /// + /// // stuff + /// + /// drop(weak_five); // explicit drop + /// } + /// { + /// let five = Arc::new(5); + /// let weak_five = Arc::downgrade(&five); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` + fn drop(&mut self) { + let ptr = *self.ptr; + let thin = ptr as *const (); + + // see comments above for why this check is here + if thin as usize == mem::POST_DROP_USIZE { + return; + } + + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings + // + // It's not necessary to check for the locked state here, because the + // weak count can only be locked if there was precisely one weak ref, + // meaning that drop could only subsequently run ON that remaining weak + // ref, which can only happen after the lock is released. + if self.inner().weak.fetch_sub(1, Release) == 1 { + atomic::fence(Acquire); + unsafe { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { + /// Equality for two `Arc<T>`s. + /// + /// Two `Arc<T>`s are equal if their inner value are equal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five == Arc::new(5); + /// ``` + fn eq(&self, other: &Arc<T>) -> bool { + *(*self) == *(*other) + } + + /// Inequality for two `Arc<T>`s. + /// + /// Two `Arc<T>`s are unequal if their inner value are unequal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five != Arc::new(5); + /// ``` + fn ne(&self, other: &Arc<T>) -> bool { + *(*self) != *(*other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { + /// Partial comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five.partial_cmp(&Arc::new(5)); + /// ``` + fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { + (**self).partial_cmp(&**other) + } + + /// Less-than comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five < Arc::new(5); + /// ``` + fn lt(&self, other: &Arc<T>) -> bool { + *(*self) < *(*other) + } + + /// 'Less-than or equal to' comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five <= Arc::new(5); + /// ``` + fn le(&self, other: &Arc<T>) -> bool { + *(*self) <= *(*other) + } + + /// Greater-than comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five > Arc::new(5); + /// ``` + fn gt(&self, other: &Arc<T>) -> bool { + *(*self) > *(*other) + } + + /// 'Greater-than or equal to' comparison for two `Arc<T>`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// five >= Arc::new(5); + /// ``` + fn ge(&self, other: &Arc<T>) -> bool { + *(*self) >= *(*other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Ord> Ord for Arc<T> { + fn cmp(&self, other: &Arc<T>) -> Ordering { + (**self).cmp(&**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Eq> Eq for Arc<T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> fmt::Pointer for Arc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&*self.ptr, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Default> Default for Arc<T> { + fn default() -> Arc<T> { + Arc::new(Default::default()) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Hash> Hash for Arc<T> { + fn hash<H: Hasher>(&self, state: &mut H) { + (**self).hash(state) + } +} + +#[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl<T> From<T> for Arc<T> { + fn from(t: T) -> Self { + Arc::new(t) + } +} + +impl<T> Weak<T> { + /// Constructs a new `Weak<T>` without an accompanying instance of T. + /// + /// This allocates memory for T, but does not initialize it. Calling + /// Weak<T>::upgrade() on the return value always gives None. + /// + /// # Examples + /// + /// ``` + /// #![feature(downgraded_weak)] + /// + /// use std::sync::Weak; + /// + /// let empty: Weak<i64> = Weak::new(); + /// ``` + #[unstable(feature = "downgraded_weak", + reason = "recently added", + issue = "30425")] + pub fn new() -> Weak<T> { + unsafe { + Weak { ptr: Shared::new(Box::into_raw(box ArcInner { + strong: atomic::AtomicUsize::new(0), + weak: atomic::AtomicUsize::new(1), + data: uninitialized(), + }))} + } + } +} + +#[cfg(test)] +mod tests { + use std::clone::Clone; + use std::sync::mpsc::channel; + use std::mem::drop; + use std::ops::Drop; + use std::option::Option; + use std::option::Option::{Some, None}; + use std::sync::atomic; + use std::sync::atomic::Ordering::{Acquire, SeqCst}; + use std::thread; + use std::vec::Vec; + use super::{Arc, Weak}; + use std::sync::Mutex; + use std::convert::From; + + struct Canary(*mut atomic::AtomicUsize); + + impl Drop for Canary { + fn drop(&mut self) { + unsafe { + match *self { + Canary(c) => { + (*c).fetch_add(1, SeqCst); + } + } + } + } + } + + #[test] + fn manually_share_arc() { + let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let arc_v = Arc::new(v); + + let (tx, rx) = channel(); + + let _t = thread::spawn(move || { + let arc_v: Arc<Vec<i32>> = rx.recv().unwrap(); + assert_eq!((*arc_v)[3], 4); + }); + + tx.send(arc_v.clone()).unwrap(); + + assert_eq!((*arc_v)[2], 3); + assert_eq!((*arc_v)[4], 5); + } + + #[test] + fn test_arc_get_mut() { + let mut x = Arc::new(3); + *Arc::get_mut(&mut x).unwrap() = 4; + assert_eq!(*x, 4); + let y = x.clone(); + assert!(Arc::get_mut(&mut x).is_none()); + drop(y); + assert!(Arc::get_mut(&mut x).is_some()); + let _w = Arc::downgrade(&x); + assert!(Arc::get_mut(&mut x).is_none()); + } + + #[test] + fn try_unwrap() { + let x = Arc::new(3); + assert_eq!(Arc::try_unwrap(x), Ok(3)); + let x = Arc::new(4); + let _y = x.clone(); + assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); + let x = Arc::new(5); + let _w = Arc::downgrade(&x); + assert_eq!(Arc::try_unwrap(x), Ok(5)); + } + + #[test] + fn test_cowarc_clone_make_mut() { + let mut cow0 = Arc::new(75); + let mut cow1 = cow0.clone(); + let mut cow2 = cow1.clone(); + + assert!(75 == *Arc::make_mut(&mut cow0)); + assert!(75 == *Arc::make_mut(&mut cow1)); + assert!(75 == *Arc::make_mut(&mut cow2)); + + *Arc::make_mut(&mut cow0) += 1; + *Arc::make_mut(&mut cow1) += 2; + *Arc::make_mut(&mut cow2) += 3; + + assert!(76 == *cow0); + assert!(77 == *cow1); + assert!(78 == *cow2); + + // none should point to the same backing memory + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 != *cow2); + } + + #[test] + fn test_cowarc_clone_unique2() { + let mut cow0 = Arc::new(75); + let cow1 = cow0.clone(); + let cow2 = cow1.clone(); + + assert!(75 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + *Arc::make_mut(&mut cow0) += 1; + assert!(76 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + // cow1 and cow2 should share the same contents + // cow0 should have a unique reference + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 == *cow2); + } + + #[test] + fn test_cowarc_clone_weak() { + let mut cow0 = Arc::new(75); + let cow1_weak = Arc::downgrade(&cow0); + + assert!(75 == *cow0); + assert!(75 == *cow1_weak.upgrade().unwrap()); + + *Arc::make_mut(&mut cow0) += 1; + + assert!(76 == *cow0); + assert!(cow1_weak.upgrade().is_none()); + } + + #[test] + fn test_live() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + assert!(y.upgrade().is_some()); + } + + #[test] + fn test_dead() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn weak_self_cyclic() { + struct Cycle { + x: Mutex<Option<Weak<Cycle>>>, + } + + let a = Arc::new(Cycle { x: Mutex::new(None) }); + let b = Arc::downgrade(&a.clone()); + *a.x.lock().unwrap() = Some(b); + + // hopefully we don't double-free (or leak)... + } + + #[test] + fn drop_arc() { + let mut canary = atomic::AtomicUsize::new(0); + let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); + drop(x); + assert!(canary.load(Acquire) == 1); + } + + #[test] + fn drop_arc_weak() { + let mut canary = atomic::AtomicUsize::new(0); + let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); + let arc_weak = Arc::downgrade(&arc); + assert!(canary.load(Acquire) == 0); + drop(arc); + assert!(canary.load(Acquire) == 1); + drop(arc_weak); + } + + #[test] + fn test_strong_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + let b = w.upgrade().expect(""); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&a) == 2); + drop(w); + drop(a); + assert!(Arc::strong_count(&b) == 1); + let c = b.clone(); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&c) == 2); + } + + #[test] + fn test_weak_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 1); + let x = w.clone(); + assert!(Arc::weak_count(&a) == 2); + drop(w); + drop(x); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let c = a.clone(); + assert!(Arc::strong_count(&a) == 2); + assert!(Arc::weak_count(&a) == 0); + let d = Arc::downgrade(&c); + assert!(Arc::weak_count(&c) == 1); + assert!(Arc::strong_count(&c) == 2); + + drop(a); + drop(c); + drop(d); + } + + #[test] + fn show_arc() { + let a = Arc::new(5); + assert_eq!(format!("{:?}", a), "5"); + } + + // Make sure deriving works with Arc<T> + #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] + struct Foo { + inner: Arc<i32>, + } + + #[test] + fn test_unsized() { + let x: Arc<[i32]> = Arc::new([1, 2, 3]); + assert_eq!(format!("{:?}", x), "[1, 2, 3]"); + let y = Arc::downgrade(&x.clone()); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn test_from_owned() { + let foo = 123; + let foo_arc = Arc::from(foo); + assert!(123 == *foo_arc); + } + + #[test] + fn test_new_weak() { + let foo: Weak<usize> = Weak::new(); + assert!(foo.upgrade().is_none()); + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { + fn borrow(&self) -> &T { + &**self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl<T: ?Sized> AsRef<T> for Arc<T> { + fn as_ref(&self) -> &T { + &**self + } +} diff --git a/liballoc/boxed.rs b/liballoc/boxed.rs new file mode 100644 index 0000000..7bdf9ea --- /dev/null +++ b/liballoc/boxed.rs @@ -0,0 +1,643 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A pointer type for heap allocation. +//! +//! `Box<T>`, casually referred to as a 'box', provides the simplest form of +//! heap allocation in Rust. Boxes provide ownership for this allocation, and +//! drop their contents when they go out of scope. +//! +//! # Examples +//! +//! Creating a box: +//! +//! ``` +//! let x = Box::new(5); +//! ``` +//! +//! Creating a recursive data structure: +//! +//! ``` +//! #[derive(Debug)] +//! enum List<T> { +//! Cons(T, Box<List<T>>), +//! Nil, +//! } +//! +//! fn main() { +//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil)))); +//! println!("{:?}", list); +//! } +//! ``` +//! +//! This will print `Cons(1, Cons(2, Nil))`. +//! +//! Recursive structures must be boxed, because if the definition of `Cons` +//! looked like this: +//! +//! ```rust,ignore +//! Cons(T, List<T>), +//! ``` +//! +//! It wouldn't work. This is because the size of a `List` depends on how many +//! elements are in the list, and so we don't know how much memory to allocate +//! for a `Cons`. By introducing a `Box`, which has a defined size, we know how +//! big `Cons` needs to be. + +#![stable(feature = "rust1", since = "1.0.0")] + +use heap; +use raw_vec::RawVec; + +use core::any::Any; +use core::borrow; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{self, Hash}; +use core::marker::{self, Unsize}; +use core::mem; +use core::ops::{CoerceUnsized, Deref, DerefMut}; +use core::ops::{Placer, Boxed, Place, InPlace, BoxPlace}; +use core::ptr::{self, Unique}; +use core::raw::TraitObject; +use core::convert::From; + +/// A value that represents the heap. This is the default place that the `box` +/// keyword allocates into when no place is supplied. +/// +/// The following two examples are equivalent: +/// +/// ``` +/// #![feature(box_heap)] +/// +/// #![feature(box_syntax, placement_in_syntax)] +/// use std::boxed::HEAP; +/// +/// fn main() { +/// let foo: Box<i32> = in HEAP { 5 }; +/// let foo = box 5; +/// } +/// ``` +#[unstable(feature = "box_heap", + reason = "may be renamed; uncertain about custom allocator design", + issue = "27779")] +pub const HEAP: ExchangeHeapSingleton = ExchangeHeapSingleton { _force_singleton: () }; + +/// This the singleton type used solely for `boxed::HEAP`. +#[unstable(feature = "box_heap", + reason = "may be renamed; uncertain about custom allocator design", + issue = "27779")] +#[derive(Copy, Clone)] +pub struct ExchangeHeapSingleton { + _force_singleton: (), +} + +/// A pointer type for heap allocation. +/// +/// See the [module-level documentation](../../std/boxed/index.html) for more. +#[lang = "owned_box"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Box<T: ?Sized>(Unique<T>); + +/// `IntermediateBox` represents uninitialized backing storage for `Box`. +/// +/// FIXME (pnkfelix): Ideally we would just reuse `Box<T>` instead of +/// introducing a separate `IntermediateBox<T>`; but then you hit +/// issues when you e.g. attempt to destructure an instance of `Box`, +/// since it is a lang item and so it gets special handling by the +/// compiler. Easier just to make this parallel type for now. +/// +/// FIXME (pnkfelix): Currently the `box` protocol only supports +/// creating instances of sized types. This IntermediateBox is +/// designed to be forward-compatible with a future protocol that +/// supports creating instances of unsized types; that is why the type +/// parameter has the `?Sized` generalization marker, and is also why +/// this carries an explicit size. However, it probably does not need +/// to carry the explicit alignment; that is just a work-around for +/// the fact that the `align_of` intrinsic currently requires the +/// input type to be Sized (which I do not think is strictly +/// necessary). +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +pub struct IntermediateBox<T: ?Sized> { + ptr: *mut u8, + size: usize, + align: usize, + marker: marker::PhantomData<*mut T>, +} + +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +impl<T> Place<T> for IntermediateBox<T> { + fn pointer(&mut self) -> *mut T { + self.ptr as *mut T + } +} + +unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> { + let p = b.ptr as *mut T; + mem::forget(b); + mem::transmute(p) +} + +fn make_place<T>() -> IntermediateBox<T> { + let size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + let p = if size == 0 { + heap::EMPTY as *mut u8 + } else { + let p = unsafe { heap::allocate(size, align) }; + if p.is_null() { + panic!("Box make_place allocation failure."); + } + p + }; + + IntermediateBox { + ptr: p, + size: size, + align: align, + marker: marker::PhantomData, + } +} + +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +impl<T> BoxPlace<T> for IntermediateBox<T> { + fn make_place() -> IntermediateBox<T> { + make_place() + } +} + +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +impl<T> InPlace<T> for IntermediateBox<T> { + type Owner = Box<T>; + unsafe fn finalize(self) -> Box<T> { + finalize(self) + } +} + +#[unstable(feature = "placement_new_protocol", issue = "27779")] +impl<T> Boxed for Box<T> { + type Data = T; + type Place = IntermediateBox<T>; + unsafe fn finalize(b: IntermediateBox<T>) -> Box<T> { + finalize(b) + } +} + +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +impl<T> Placer<T> for ExchangeHeapSingleton { + type Place = IntermediateBox<T>; + + fn make_place(self) -> IntermediateBox<T> { + make_place() + } +} + +#[unstable(feature = "placement_in", + reason = "placement box design is still being worked out.", + issue = "27779")] +impl<T: ?Sized> Drop for IntermediateBox<T> { + fn drop(&mut self) { + if self.size > 0 { + unsafe { heap::deallocate(self.ptr, self.size, self.align) } + } + } +} + +impl<T> Box<T> { + /// Allocates memory on the heap and then places `x` into it. + /// + /// # Examples + /// + /// ``` + /// let five = Box::new(5); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline(always)] + pub fn new(x: T) -> Box<T> { + box x + } +} + +impl<T: ?Sized> Box<T> { + /// Constructs a box from a raw pointer. + /// + /// After calling this function, the raw pointer is owned by the + /// resulting `Box`. Specifically, the `Box` destructor will call + /// the destructor of `T` and free the allocated memory. Since the + /// way `Box` allocates and releases memory is unspecified, the + /// only valid pointer to pass to this function is the one taken + /// from another `Box` via the `Box::into_raw` function. + /// + /// This function is unsafe because improper use may lead to + /// memory problems. For example, a double-free may occur if the + /// function is called twice on the same raw pointer. + #[stable(feature = "box_raw", since = "1.4.0")] + #[inline] + pub unsafe fn from_raw(raw: *mut T) -> Self { + mem::transmute(raw) + } + + /// Consumes the `Box`, returning the wrapped raw pointer. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `Box`. In particular, the + /// caller should properly destroy `T` and release the memory. The + /// proper way to do so is to convert the raw pointer back into a + /// `Box` with the `Box::from_raw` function. + /// + /// # Examples + /// + /// ``` + /// let seventeen = Box::new(17); + /// let raw = Box::into_raw(seventeen); + /// let boxed_again = unsafe { Box::from_raw(raw) }; + /// ``` + #[stable(feature = "box_raw", since = "1.4.0")] + #[inline] + pub fn into_raw(b: Box<T>) -> *mut T { + unsafe { mem::transmute(b) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Default> Default for Box<T> { + fn default() -> Box<T> { + box Default::default() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T> Default for Box<[T]> { + fn default() -> Box<[T]> { + Box::<[T; 0]>::new([]) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Clone> Clone for Box<T> { + /// Returns a new box with a `clone()` of this box's contents. + /// + /// # Examples + /// + /// ``` + /// let x = Box::new(5); + /// let y = x.clone(); + /// ``` + #[rustfmt_skip] + #[inline] + fn clone(&self) -> Box<T> { + box { (**self).clone() } + } + /// Copies `source`'s contents into `self` without creating a new allocation. + /// + /// # Examples + /// + /// ``` + /// let x = Box::new(5); + /// let mut y = Box::new(10); + /// + /// y.clone_from(&x); + /// + /// assert_eq!(*y, 5); + /// ``` + #[inline] + fn clone_from(&mut self, source: &Box<T>) { + (**self).clone_from(&(**source)); + } +} + + +#[stable(feature = "box_slice_clone", since = "1.3.0")] +impl Clone for Box<str> { + fn clone(&self) -> Self { + let len = self.len(); + let buf = RawVec::with_capacity(len); + unsafe { + ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len); + mem::transmute(buf.into_box()) // bytes to str ~magic + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialEq> PartialEq for Box<T> { + #[inline] + fn eq(&self, other: &Box<T>) -> bool { + PartialEq::eq(&**self, &**other) + } + #[inline] + fn ne(&self, other: &Box<T>) -> bool { + PartialEq::ne(&**self, &**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> { + #[inline] + fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> { + PartialOrd::partial_cmp(&**self, &**other) + } + #[inline] + fn lt(&self, other: &Box<T>) -> bool { + PartialOrd::lt(&**self, &**other) + } + #[inline] + fn le(&self, other: &Box<T>) -> bool { + PartialOrd::le(&**self, &**other) + } + #[inline] + fn ge(&self, other: &Box<T>) -> bool { + PartialOrd::ge(&**self, &**other) + } + #[inline] + fn gt(&self, other: &Box<T>) -> bool { + PartialOrd::gt(&**self, &**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Ord> Ord for Box<T> { + #[inline] + fn cmp(&self, other: &Box<T>) -> Ordering { + Ord::cmp(&**self, &**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Eq> Eq for Box<T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Hash> Hash for Box<T> { + fn hash<H: hash::Hasher>(&self, state: &mut H) { + (**self).hash(state); + } +} + +#[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl<T> From<T> for Box<T> { + fn from(t: T) -> Self { + Box::new(t) + } +} + +impl Box<Any> { + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + /// Attempt to downcast the box to a concrete type. + pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any>> { + if self.is::<T>() { + unsafe { + // Get the raw representation of the trait object + let raw = Box::into_raw(self); + let to: TraitObject = mem::transmute::<*mut Any, TraitObject>(raw); + + // Extract the data pointer + Ok(Box::from_raw(to.data as *mut T)) + } + } else { + Err(self) + } + } +} + +impl Box<Any + Send> { + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + /// Attempt to downcast the box to a concrete type. + pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any + Send>> { + <Box<Any>>::downcast(self).map_err(|s| unsafe { + // reapply the Send marker + mem::transmute::<Box<Any>, Box<Any + Send>>(s) + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: fmt::Display + ?Sized> fmt::Display for Box<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: fmt::Debug + ?Sized> fmt::Debug for Box<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> fmt::Pointer for Box<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // It's not possible to extract the inner Uniq directly from the Box, + // instead we cast it to a *const which aliases the Unique + let ptr: *const T = &**self; + fmt::Pointer::fmt(&ptr, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Deref for Box<T> { + type Target = T; + + fn deref(&self) -> &T { + &**self + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> DerefMut for Box<T> { + fn deref_mut(&mut self) -> &mut T { + &mut **self + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<I: Iterator + ?Sized> Iterator for Box<I> { + type Item = I::Item; + fn next(&mut self) -> Option<I::Item> { + (**self).next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + (**self).size_hint() + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<I> { + fn next_back(&mut self) -> Option<I::Item> { + (**self).next_back() + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<I> {} + + +/// `FnBox` is a version of the `FnOnce` intended for use with boxed +/// closure objects. The idea is that where one would normally store a +/// `Box<FnOnce()>` in a data structure, you should use +/// `Box<FnBox()>`. The two traits behave essentially the same, except +/// that a `FnBox` closure can only be called if it is boxed. (Note +/// that `FnBox` may be deprecated in the future if `Box<FnOnce()>` +/// closures become directly usable.) +/// +/// ### Example +/// +/// Here is a snippet of code which creates a hashmap full of boxed +/// once closures and then removes them one by one, calling each +/// closure as it is removed. Note that the type of the closures +/// stored in the map is `Box<FnBox() -> i32>` and not `Box<FnOnce() +/// -> i32>`. +/// +/// ``` +/// #![feature(fnbox)] +/// +/// use std::boxed::FnBox; +/// use std::collections::HashMap; +/// +/// fn make_map() -> HashMap<i32, Box<FnBox() -> i32>> { +/// let mut map: HashMap<i32, Box<FnBox() -> i32>> = HashMap::new(); +/// map.insert(1, Box::new(|| 22)); +/// map.insert(2, Box::new(|| 44)); +/// map +/// } +/// +/// fn main() { +/// let mut map = make_map(); +/// for i in &[1, 2] { +/// let f = map.remove(&i).unwrap(); +/// assert_eq!(f(), i * 22); +/// } +/// } +/// ``` +#[rustc_paren_sugar] +#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")] +pub trait FnBox<A> { + type Output; + + fn call_box(self: Box<Self>, args: A) -> Self::Output; +} + +#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")] +impl<A, F> FnBox<A> for F where F: FnOnce<A> +{ + type Output = F::Output; + + fn call_box(self: Box<F>, args: A) -> F::Output { + self.call_once(args) + } +} + +#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")] +impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + 'a> { + type Output = R; + + extern "rust-call" fn call_once(self, args: A) -> R { + self.call_box(args) + } +} + +#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "28796")] +impl<'a, A, R> FnOnce<A> for Box<FnBox<A, Output = R> + Send + 'a> { + type Output = R; + + extern "rust-call" fn call_once(self, args: A) -> R { + self.call_box(args) + } +} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {} + +#[stable(feature = "box_slice_clone", since = "1.3.0")] +impl<T: Clone> Clone for Box<[T]> { + fn clone(&self) -> Self { + let mut new = BoxBuilder { + data: RawVec::with_capacity(self.len()), + len: 0, + }; + + let mut target = new.data.ptr(); + + for item in self.iter() { + unsafe { + ptr::write(target, item.clone()); + target = target.offset(1); + }; + + new.len += 1; + } + + return unsafe { new.into_box() }; + + // Helper type for responding to panics correctly. + struct BoxBuilder<T> { + data: RawVec<T>, + len: usize, + } + + impl<T> BoxBuilder<T> { + unsafe fn into_box(self) -> Box<[T]> { + let raw = ptr::read(&self.data); + mem::forget(self); + raw.into_box() + } + } + + impl<T> Drop for BoxBuilder<T> { + fn drop(&mut self) { + let mut data = self.data.ptr(); + let max = unsafe { data.offset(self.len as isize) }; + + while data != max { + unsafe { + ptr::read(data); + data = data.offset(1); + } + } + } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> borrow::Borrow<T> for Box<T> { + fn borrow(&self) -> &T { + &**self + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> borrow::BorrowMut<T> for Box<T> { + fn borrow_mut(&mut self) -> &mut T { + &mut **self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl<T: ?Sized> AsRef<T> for Box<T> { + fn as_ref(&self) -> &T { + &**self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl<T: ?Sized> AsMut<T> for Box<T> { + fn as_mut(&mut self) -> &mut T { + &mut **self + } +} diff --git a/liballoc/boxed_test.rs b/liballoc/boxed_test.rs new file mode 100644 index 0000000..120301a --- /dev/null +++ b/liballoc/boxed_test.rs @@ -0,0 +1,119 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Test for `boxed` mod. + +use core::any::Any; +use core::ops::Deref; +use core::result::Result::{Ok, Err}; +use core::clone::Clone; + +use std::boxed::Box; + +#[test] +fn test_owned_clone() { + let a = Box::new(5); + let b: Box<i32> = a.clone(); + assert!(a == b); +} + +#[derive(PartialEq, Eq)] +struct Test; + +#[test] +fn any_move() { + let a = Box::new(8) as Box<Any>; + let b = Box::new(Test) as Box<Any>; + + match a.downcast::<i32>() { + Ok(a) => { + assert!(a == Box::new(8)); + } + Err(..) => panic!(), + } + match b.downcast::<Test>() { + Ok(a) => { + assert!(a == Box::new(Test)); + } + Err(..) => panic!(), + } + + let a = Box::new(8) as Box<Any>; + let b = Box::new(Test) as Box<Any>; + + assert!(a.downcast::<Box<Test>>().is_err()); + assert!(b.downcast::<Box<i32>>().is_err()); +} + +#[test] +fn test_show() { + let a = Box::new(8) as Box<Any>; + let b = Box::new(Test) as Box<Any>; + let a_str = format!("{:?}", a); + let b_str = format!("{:?}", b); + assert_eq!(a_str, "Any"); + assert_eq!(b_str, "Any"); + + static EIGHT: usize = 8; + static TEST: Test = Test; + let a = &EIGHT as &Any; + let b = &TEST as &Any; + let s = format!("{:?}", a); + assert_eq!(s, "Any"); + let s = format!("{:?}", b); + assert_eq!(s, "Any"); +} + +#[test] +fn deref() { + fn homura<T: Deref<Target = i32>>(_: T) {} + homura(Box::new(765)); +} + +#[test] +fn raw_sized() { + let x = Box::new(17); + let p = Box::into_raw(x); + unsafe { + assert_eq!(17, *p); + *p = 19; + let y = Box::from_raw(p); + assert_eq!(19, *y); + } +} + +#[test] +fn raw_trait() { + trait Foo { + fn get(&self) -> u32; + fn set(&mut self, value: u32); + } + + struct Bar(u32); + + impl Foo for Bar { + fn get(&self) -> u32 { + self.0 + } + + fn set(&mut self, value: u32) { + self.0 = value; + } + } + + let x: Box<Foo> = Box::new(Bar(17)); + let p = Box::into_raw(x); + unsafe { + assert_eq!(17, (*p).get()); + (*p).set(19); + let y: Box<Foo> = Box::from_raw(p); + assert_eq!(19, y.get()); + } +} diff --git a/liballoc/heap.rs b/liballoc/heap.rs new file mode 100644 index 0000000..08b403a --- /dev/null +++ b/liballoc/heap.rs @@ -0,0 +1,190 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "heap_api", + reason = "the precise API and guarantees it provides may be tweaked \ + slightly, especially to possibly take into account the \ + types being stored to make room for a future \ + tracing garbage collector", + issue = "27700")] + +use core::{isize, usize}; +#[cfg(not(test))] +use core::intrinsics::{size_of, min_align_of}; + +#[allow(improper_ctypes)] +extern "C" { + #[allocator] + fn __rust_allocate(size: usize, align: usize) -> *mut u8; + fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize); + fn __rust_reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8; + fn __rust_reallocate_inplace(ptr: *mut u8, + old_size: usize, + size: usize, + align: usize) + -> usize; + fn __rust_usable_size(size: usize, align: usize) -> usize; +} + +#[inline(always)] +fn check_size_and_alignment(size: usize, align: usize) { + debug_assert!(size != 0); + debug_assert!(size <= isize::MAX as usize, + "Tried to allocate too much: {} bytes", + size); + debug_assert!(usize::is_power_of_two(align), + "Invalid alignment of allocation: {}", + align); +} + +// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias` + +/// Return a pointer to `size` bytes of memory aligned to `align`. +/// +/// On failure, return a null pointer. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a +/// power of 2. The alignment must be no larger than the largest supported page +/// size on the platform. +#[inline] +pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 { + check_size_and_alignment(size, align); + __rust_allocate(size, align) +} + +/// Resize the allocation referenced by `ptr` to `size` bytes. +/// +/// On failure, return a null pointer and leave the original allocation intact. +/// +/// If the allocation was relocated, the memory at the passed-in pointer is +/// undefined after the call. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a +/// power of 2. The alignment must be no larger than the largest supported page +/// size on the platform. +/// +/// The `old_size` and `align` parameters are the parameters that were used to +/// create the allocation referenced by `ptr`. The `old_size` parameter may be +/// any value in range_inclusive(requested_size, usable_size). +#[inline] +pub unsafe fn reallocate(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 { + check_size_and_alignment(size, align); + __rust_reallocate(ptr, old_size, size, align) +} + +/// Resize the allocation referenced by `ptr` to `size` bytes. +/// +/// If the operation succeeds, it returns `usable_size(size, align)` and if it +/// fails (or is a no-op) it returns `usable_size(old_size, align)`. +/// +/// Behavior is undefined if the requested size is 0 or the alignment is not a +/// power of 2. The alignment must be no larger than the largest supported page +/// size on the platform. +/// +/// The `old_size` and `align` parameters are the parameters that were used to +/// create the allocation referenced by `ptr`. The `old_size` parameter may be +/// any value in range_inclusive(requested_size, usable_size). +#[inline] +pub unsafe fn reallocate_inplace(ptr: *mut u8, + old_size: usize, + size: usize, + align: usize) + -> usize { + check_size_and_alignment(size, align); + __rust_reallocate_inplace(ptr, old_size, size, align) +} + +/// Deallocates the memory referenced by `ptr`. +/// +/// The `ptr` parameter must not be null. +/// +/// The `old_size` and `align` parameters are the parameters that were used to +/// create the allocation referenced by `ptr`. The `old_size` parameter may be +/// any value in range_inclusive(requested_size, usable_size). +#[inline] +pub unsafe fn deallocate(ptr: *mut u8, old_size: usize, align: usize) { + __rust_deallocate(ptr, old_size, align) +} + +/// Returns the usable size of an allocation created with the specified the +/// `size` and `align`. +#[inline] +pub fn usable_size(size: usize, align: usize) -> usize { + unsafe { __rust_usable_size(size, align) } +} + +/// An arbitrary non-null address to represent zero-size allocations. +/// +/// This preserves the non-null invariant for types like `Box<T>`. The address +/// may overlap with non-zero-size memory allocations. +pub const EMPTY: *mut () = 0x1 as *mut (); + +/// The allocator for unique pointers. +#[cfg(not(test))] +#[lang = "exchange_malloc"] +#[inline] +unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { + if size == 0 { + EMPTY as *mut u8 + } else { + let ptr = allocate(size, align); + if ptr.is_null() { + ::oom() + } + ptr + } +} + +#[cfg(not(test))] +#[lang = "exchange_free"] +#[inline] +unsafe fn exchange_free(ptr: *mut u8, old_size: usize, align: usize) { + deallocate(ptr, old_size, align); +} + +#[cfg(not(test))] +#[lang = "box_free"] +#[inline] +unsafe fn box_free<T>(ptr: *mut T) { + let size = size_of::<T>(); + // We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary. + if size != 0 { + deallocate(ptr as *mut u8, size, min_align_of::<T>()); + } +} + +#[cfg(test)] +mod tests { + extern crate test; + use self::test::Bencher; + use boxed::Box; + use heap; + + #[test] + fn basic_reallocate_inplace_noop() { + unsafe { + let size = 4000; + let ptr = heap::allocate(size, 8); + if ptr.is_null() { + ::oom() + } + let ret = heap::reallocate_inplace(ptr, size, size, 8); + heap::deallocate(ptr, size, 8); + assert_eq!(ret, heap::usable_size(size, 8)); + } + } + + #[bench] + fn alloc_owned_small(b: &mut Bencher) { + b.iter(|| { + let _: Box<_> = box 10; + }) + } +} diff --git a/liballoc/lib.rs b/liballoc/lib.rs new file mode 100644 index 0000000..c2dad9a --- /dev/null +++ b/liballoc/lib.rs @@ -0,0 +1,127 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # The Rust core allocation library +//! +//! This is the lowest level library through which allocation in Rust can be +//! performed. +//! +//! This library, like libcore, is not intended for general usage, but rather as +//! a building block of other libraries. The types and interfaces in this +//! library are reexported through the [standard library](../std/index.html), +//! and should not be used through this library. +//! +//! Currently, there are four major definitions in this library. +//! +//! ## Boxed values +//! +//! The [`Box`](boxed/index.html) type is a smart pointer type. There can +//! only be one owner of a `Box`, and the owner can decide to mutate the +//! contents, which live on the heap. +//! +//! This type can be sent among threads efficiently as the size of a `Box` value +//! is the same as that of a pointer. Tree-like data structures are often built +//! with boxes because each node often has only one owner, the parent. +//! +//! ## Reference counted pointers +//! +//! The [`Rc`](rc/index.html) type is a non-threadsafe reference-counted pointer +//! type intended for sharing memory within a thread. An `Rc` pointer wraps a +//! type, `T`, and only allows access to `&T`, a shared reference. +//! +//! This type is useful when inherited mutability (such as using `Box`) is too +//! constraining for an application, and is often paired with the `Cell` or +//! `RefCell` types in order to allow mutation. +//! +//! ## Atomically reference counted pointers +//! +//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc` +//! type. It provides all the same functionality of `Rc`, except it requires +//! that the contained type `T` is shareable. Additionally, `Arc<T>` is itself +//! sendable while `Rc<T>` is not. +//! +//! This types allows for shared access to the contained data, and is often +//! paired with synchronization primitives such as mutexes to allow mutation of +//! shared resources. +//! +//! ## Heap interfaces +//! +//! The [`heap`](heap/index.html) module defines the low-level interface to the +//! default global allocator. It is not compatible with the libc allocator API. + +#![crate_name = "alloc"] +#![crate_type = "rlib"] +#![allow(unused_attributes)] +#![unstable(feature = "alloc", + reason = "this library is unlikely to be stabilized in its current \ + form or name", + issue = "27783")] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/", + issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", + test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] +#![no_std] +#![needs_allocator] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(allocator)] +#![feature(box_syntax)] +#![feature(coerce_unsized)] +#![feature(const_fn)] +#![feature(core_intrinsics)] +#![feature(custom_attribute)] +#![feature(dropck_parametricity)] +#![feature(fundamental)] +#![feature(lang_items)] +#![feature(needs_allocator)] +#![feature(optin_builtin_traits)] +#![feature(placement_in_syntax)] +#![feature(shared)] +#![feature(staged_api)] +#![feature(unboxed_closures)] +#![feature(unique)] +#![feature(unsafe_no_drop_flag, filling_drop)] +#![feature(unsize)] +#![feature(extended_compare_and_swap)] + +#![cfg_attr(not(test), feature(raw, fn_traits, placement_new_protocol))] +#![cfg_attr(test, feature(test, box_heap))] + +// Allow testing this library + +#[cfg(test)] +#[macro_use] +extern crate std; + +// Heaps provided for low-level allocation strategies + +pub mod heap; + +// Primitive types using the heaps above + +// Need to conditionally define the mod from `boxed.rs` to avoid +// duplicating the lang-items when building in test cfg; but also need +// to allow code to have `use boxed::HEAP;` +// and `use boxed::Box;` declarations. +#[cfg(not(test))] +pub mod boxed; +#[cfg(test)] +mod boxed { + pub use std::boxed::{Box, HEAP}; +} +#[cfg(test)] +mod boxed_test; +pub mod arc; +pub mod rc; +pub mod raw_vec; +pub mod oom; + +pub use oom::oom; diff --git a/liballoc/oom.rs b/liballoc/oom.rs new file mode 100644 index 0000000..d355d59 --- /dev/null +++ b/liballoc/oom.rs @@ -0,0 +1,42 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::sync::atomic::{AtomicPtr, Ordering}; +use core::mem; +use core::intrinsics; + +static OOM_HANDLER: AtomicPtr<()> = AtomicPtr::new(default_oom_handler as *mut ()); + +fn default_oom_handler() -> ! { + // The default handler can't do much more since we can't assume the presence + // of libc or any way of printing an error message. + unsafe { intrinsics::abort() } +} + +/// Common out-of-memory routine +#[cold] +#[inline(never)] +#[unstable(feature = "oom", reason = "not a scrutinized interface", + issue = "27700")] +pub fn oom() -> ! { + let value = OOM_HANDLER.load(Ordering::SeqCst); + let handler: fn() -> ! = unsafe { mem::transmute(value) }; + handler(); +} + +/// Set a custom handler for out-of-memory conditions +/// +/// To avoid recursive OOM failures, it is critical that the OOM handler does +/// not allocate any memory itself. +#[unstable(feature = "oom", reason = "not a scrutinized interface", + issue = "27700")] +pub fn set_oom_handler(handler: fn() -> !) { + OOM_HANDLER.store(handler as *mut (), Ordering::SeqCst); +} diff --git a/liballoc/raw_vec.rs b/liballoc/raw_vec.rs new file mode 100644 index 0000000..c407cef --- /dev/null +++ b/liballoc/raw_vec.rs @@ -0,0 +1,629 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::ptr::Unique; +use core::mem; +use core::slice; +use heap; +use super::oom; +use super::boxed::Box; +use core::ops::Drop; +use core::cmp; + +/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a +/// a buffer of memory on the heap without having to worry about all the corner cases +/// involved. This type is excellent for building your own data structures like Vec and VecDeque. +/// In particular: +/// +/// * Produces heap::EMPTY on zero-sized types +/// * Produces heap::EMPTY on zero-length allocations +/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics) +/// * Guards against 32-bit systems allocating more than isize::MAX bytes +/// * Guards against overflowing your length +/// * Aborts on OOM +/// * Avoids freeing heap::EMPTY +/// * Contains a ptr::Unique and thus endows the user with all related benefits +/// +/// This type does not in anyway inspect the memory that it manages. When dropped it *will* +/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec +/// to handle the actual things *stored* inside of a RawVec. +/// +/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types. +/// This enables you to use capacity growing logic catch the overflows in your length +/// that might occur with zero-sized types. +/// +/// However this means that you need to be careful when roundtripping this type +/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`, +/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity +/// field. This allows zero-sized types to not be special-cased by consumers of +/// this type. +#[unsafe_no_drop_flag] +pub struct RawVec<T> { + ptr: Unique<T>, + cap: usize, +} + +impl<T> RawVec<T> { + /// Creates the biggest possible RawVec without allocating. If T has positive + /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it + /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing + /// delayed allocation. + pub fn new() -> Self { + unsafe { + // !0 is usize::MAX. This branch should be stripped at compile time. + let cap = if mem::size_of::<T>() == 0 { + !0 + } else { + 0 + }; + + // heap::EMPTY doubles as "unallocated" and "zero-sized allocation" + RawVec { + ptr: Unique::new(heap::EMPTY as *mut T), + cap: cap, + } + } + } + + /// Creates a RawVec with exactly the capacity and alignment requirements + /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0 + /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not* + /// get a RawVec with the requested capacity! + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM + pub fn with_capacity(cap: usize) -> Self { + unsafe { + let elem_size = mem::size_of::<T>(); + + let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); + alloc_guard(alloc_size); + + // handles ZSTs and `cap = 0` alike + let ptr = if alloc_size == 0 { + heap::EMPTY as *mut u8 + } else { + let align = mem::align_of::<T>(); + let ptr = heap::allocate(alloc_size, align); + if ptr.is_null() { + oom() + } + ptr + }; + + RawVec { + ptr: Unique::new(ptr as *mut _), + cap: cap, + } + } + } + + /// Reconstitutes a RawVec from a pointer and capacity. + /// + /// # Undefined Behavior + /// + /// The ptr must be allocated, and with the given capacity. The + /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). + /// If the ptr and capacity come from a RawVec, then this is guaranteed. + pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self { + RawVec { + ptr: Unique::new(ptr), + cap: cap, + } + } + + /// Converts a `Box<[T]>` into a `RawVec<T>`. + pub fn from_box(mut slice: Box<[T]>) -> Self { + unsafe { + let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len()); + mem::forget(slice); + result + } + } +} + +impl<T> RawVec<T> { + /// Gets a raw pointer to the start of the allocation. Note that this is + /// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must + /// be careful. + pub fn ptr(&self) -> *mut T { + *self.ptr + } + + /// Gets the capacity of the allocation. + /// + /// This will always be `usize::MAX` if `T` is zero-sized. + pub fn cap(&self) -> usize { + if mem::size_of::<T>() == 0 { + !0 + } else { + self.cap + } + } + + /// Doubles the size of the type's backing allocation. This is common enough + /// to want to do that it's easiest to just have a dedicated method. Slightly + /// more efficient logic can be provided for this than the general case. + /// + /// This function is ideal for when pushing elements one-at-a-time because + /// you don't need to incur the costs of the more general computations + /// reserve needs to do to guard against overflow. You do however need to + /// manually check if your `len == cap`. + /// + /// # Panics + /// + /// * Panics if T is zero-sized on the assumption that you managed to exhaust + /// all `usize::MAX` slots in your imaginary buffer. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM + /// + /// # Examples + /// + /// ```ignore + /// struct MyVec<T> { + /// buf: RawVec<T>, + /// len: usize, + /// } + /// + /// impl<T> MyVec<T> { + /// pub fn push(&mut self, elem: T) { + /// if self.len == self.buf.cap() { self.buf.double(); } + /// // double would have aborted or panicked if the len exceeded + /// // `isize::MAX` so this is safe to do unchecked now. + /// unsafe { + /// ptr::write(self.buf.ptr().offset(self.len as isize), elem); + /// } + /// self.len += 1; + /// } + /// } + /// ``` + #[inline(never)] + #[cold] + pub fn double(&mut self) { + unsafe { + let elem_size = mem::size_of::<T>(); + + // since we set the capacity to usize::MAX when elem_size is + // 0, getting to here necessarily means the RawVec is overfull. + assert!(elem_size != 0, "capacity overflow"); + + let align = mem::align_of::<T>(); + + let (new_cap, ptr) = if self.cap == 0 { + // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow + let new_cap = if elem_size > (!0) / 8 { + 1 + } else { + 4 + }; + let ptr = heap::allocate(new_cap * elem_size, align); + (new_cap, ptr) + } else { + // Since we guarantee that we never allocate more than isize::MAX bytes, + // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow + let new_cap = 2 * self.cap; + let new_alloc_size = new_cap * elem_size; + alloc_guard(new_alloc_size); + let ptr = heap::reallocate(self.ptr() as *mut _, + self.cap * elem_size, + new_alloc_size, + align); + (new_cap, ptr) + }; + + // If allocate or reallocate fail, we'll get `null` back + if ptr.is_null() { + oom() + } + + self.ptr = Unique::new(ptr as *mut _); + self.cap = new_cap; + } + } + + /// Attempts to double the size of the type's backing allocation in place. This is common + /// enough to want to do that it's easiest to just have a dedicated method. Slightly + /// more efficient logic can be provided for this than the general case. + /// + /// Returns true if the reallocation attempt has succeeded, or false otherwise. + /// + /// # Panics + /// + /// * Panics if T is zero-sized on the assumption that you managed to exhaust + /// all `usize::MAX` slots in your imaginary buffer. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + #[inline(never)] + #[cold] + pub fn double_in_place(&mut self) -> bool { + unsafe { + let elem_size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + // since we set the capacity to usize::MAX when elem_size is + // 0, getting to here necessarily means the RawVec is overfull. + assert!(elem_size != 0, "capacity overflow"); + + // Since we guarantee that we never allocate more than isize::MAX bytes, + // `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow + let new_cap = 2 * self.cap; + let new_alloc_size = new_cap * elem_size; + + alloc_guard(new_alloc_size); + let size = heap::reallocate_inplace(self.ptr() as *mut _, + self.cap * elem_size, + new_alloc_size, + align); + if size >= new_alloc_size { + // We can't directly divide `size`. + self.cap = new_cap; + } + size >= new_alloc_size + } + } + + /// Ensures that the buffer contains at least enough space to hold + /// `used_cap + needed_extra_cap` elements. If it doesn't already, + /// will reallocate the minimum possible amount of memory necessary. + /// Generally this will be exactly the amount of memory necessary, + /// but in principle the allocator is free to give back more than + /// we asked for. + /// + /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM + pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { + unsafe { + let elem_size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. + // Wrapping in case they gave a bad `used_cap`. + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { + return; + } + + // Nothing we can really do about these checks :( + let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow"); + let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow"); + alloc_guard(new_alloc_size); + + let ptr = if self.cap == 0 { + heap::allocate(new_alloc_size, align) + } else { + heap::reallocate(self.ptr() as *mut _, + self.cap * elem_size, + new_alloc_size, + align) + }; + + // If allocate or reallocate fail, we'll get `null` back + if ptr.is_null() { + oom() + } + + self.ptr = Unique::new(ptr as *mut _); + self.cap = new_cap; + } + } + + /// Calculates the buffer's new size given that it'll hold `used_cap + + /// needed_extra_cap` elements. This logic is used in amortized reserve methods. + /// Returns `(new_capacity, new_alloc_size)`. + fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> (usize, usize) { + let elem_size = mem::size_of::<T>(); + // Nothing we can really do about these checks :( + let required_cap = used_cap.checked_add(needed_extra_cap) + .expect("capacity overflow"); + // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. + let double_cap = self.cap * 2; + // `double_cap` guarantees exponential growth. + let new_cap = cmp::max(double_cap, required_cap); + let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow"); + (new_cap, new_alloc_size) + } + + /// Ensures that the buffer contains at least enough space to hold + /// `used_cap + needed_extra_cap` elements. If it doesn't already have + /// enough capacity, will reallocate enough space plus comfortable slack + /// space to get amortized `O(1)` behavior. Will limit this behavior + /// if it would needlessly cause itself to panic. + /// + /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behavior of this function may break. + /// + /// This is ideal for implementing a bulk-push operation like `extend`. + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + /// + /// # Aborts + /// + /// Aborts on OOM + /// + /// # Examples + /// + /// ```ignore + /// struct MyVec<T> { + /// buf: RawVec<T>, + /// len: usize, + /// } + /// + /// impl<T> MyVec<T> { + /// pub fn push_all(&mut self, elems: &[T]) { + /// self.buf.reserve(self.len, elems.len()); + /// // reserve would have aborted or panicked if the len exceeded + /// // `isize::MAX` so this is safe to do unchecked now. + /// for x in elems { + /// unsafe { + /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone()); + /// } + /// self.len += 1; + /// } + /// } + /// } + /// ``` + pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { + unsafe { + let elem_size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. + // Wrapping in case they give a bad `used_cap` + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { + return; + } + + let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); + // FIXME: may crash and burn on over-reserve + alloc_guard(new_alloc_size); + + let ptr = if self.cap == 0 { + heap::allocate(new_alloc_size, align) + } else { + heap::reallocate(self.ptr() as *mut _, + self.cap * elem_size, + new_alloc_size, + align) + }; + + // If allocate or reallocate fail, we'll get `null` back + if ptr.is_null() { + oom() + } + + self.ptr = Unique::new(ptr as *mut _); + self.cap = new_cap; + } + } + + /// Attempts to ensure that the buffer contains at least enough space to hold + /// `used_cap + needed_extra_cap` elements. If it doesn't already have + /// enough capacity, will reallocate in place enough space plus comfortable slack + /// space to get amortized `O(1)` behaviour. Will limit this behaviour + /// if it would needlessly cause itself to panic. + /// + /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate + /// the requested space. This is not really unsafe, but the unsafe + /// code *you* write that relies on the behaviour of this function may break. + /// + /// Returns true if the reallocation attempt has succeeded, or false otherwise. + /// + /// # Panics + /// + /// * Panics if the requested capacity exceeds `usize::MAX` bytes. + /// * Panics on 32-bit platforms if the requested capacity exceeds + /// `isize::MAX` bytes. + pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool { + unsafe { + let elem_size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. If the current `cap` is 0, we can't + // reallocate in place. + // Wrapping in case they give a bad `used_cap` + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap || self.cap == 0 { + return false; + } + + let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap); + // FIXME: may crash and burn on over-reserve + alloc_guard(new_alloc_size); + + let size = heap::reallocate_inplace(self.ptr() as *mut _, + self.cap * elem_size, + new_alloc_size, + align); + if size >= new_alloc_size { + self.cap = new_alloc_size / elem_size; + } + size >= new_alloc_size + } + } + + /// Shrinks the allocation down to the specified amount. If the given amount + /// is 0, actually completely deallocates. + /// + /// # Panics + /// + /// Panics if the given amount is *larger* than the current capacity. + /// + /// # Aborts + /// + /// Aborts on OOM. + pub fn shrink_to_fit(&mut self, amount: usize) { + let elem_size = mem::size_of::<T>(); + let align = mem::align_of::<T>(); + + // Set the `cap` because they might be about to promote to a `Box<[T]>` + if elem_size == 0 { + self.cap = amount; + return; + } + + // This check is my waterloo; it's the only thing Vec wouldn't have to do. + assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); + + if amount == 0 { + mem::replace(self, RawVec::new()); + } else if self.cap != amount { + unsafe { + // Overflow check is unnecessary as the vector is already at + // least this large. + let ptr = heap::reallocate(self.ptr() as *mut _, + self.cap * elem_size, + amount * elem_size, + align); + if ptr.is_null() { + oom() + } + self.ptr = Unique::new(ptr as *mut _); + } + self.cap = amount; + } + } + + /// Converts the entire buffer into `Box<[T]>`. + /// + /// While it is not *strictly* Undefined Behavior to call + /// this procedure while some of the RawVec is unintialized, + /// it cetainly makes it trivial to trigger it. + /// + /// Note that this will correctly reconstitute any `cap` changes + /// that may have been performed. (see description of type for details) + pub unsafe fn into_box(self) -> Box<[T]> { + // NOTE: not calling `cap()` here, actually using the real `cap` field! + let slice = slice::from_raw_parts_mut(self.ptr(), self.cap); + let output: Box<[T]> = Box::from_raw(slice); + mem::forget(self); + output + } + + /// This is a stupid name in the hopes that someone will find this in the + /// not too distant future and remove it with the rest of + /// #[unsafe_no_drop_flag] + pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool { + self.cap != mem::POST_DROP_USIZE + } +} + +impl<T> Drop for RawVec<T> { + #[unsafe_destructor_blind_to_params] + /// Frees the memory owned by the RawVec *without* trying to Drop its contents. + fn drop(&mut self) { + let elem_size = mem::size_of::<T>(); + if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() { + let align = mem::align_of::<T>(); + + let num_bytes = elem_size * self.cap; + unsafe { + heap::deallocate(*self.ptr as *mut _, num_bytes, align); + } + } + } +} + + + +// We need to guarantee the following: +// * We don't ever allocate `> isize::MAX` byte-size objects +// * We don't overflow `usize::MAX` and actually allocate too little +// +// On 64-bit we just need to check for overflow since trying to allocate +// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra +// guard for this in case we're running on a platform which can use all 4GB in +// user-space. e.g. PAE or x32 + +#[inline] +fn alloc_guard(alloc_size: usize) { + if mem::size_of::<usize>() < 8 { + assert!(alloc_size <= ::core::isize::MAX as usize, + "capacity overflow"); + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reserve_does_not_overallocate() { + { + let mut v: RawVec<u32> = RawVec::new(); + // First `reserve` allocates like `reserve_exact` + v.reserve(0, 9); + assert_eq!(9, v.cap()); + } + + { + let mut v: RawVec<u32> = RawVec::new(); + v.reserve(0, 7); + assert_eq!(7, v.cap()); + // 97 if more than double of 7, so `reserve` should work + // like `reserve_exact`. + v.reserve(7, 90); + assert_eq!(97, v.cap()); + } + + { + let mut v: RawVec<u32> = RawVec::new(); + v.reserve(0, 12); + assert_eq!(12, v.cap()); + v.reserve(12, 3); + // 3 is less than half of 12, so `reserve` must grow + // exponentially. At the time of writing this test grow + // factor is 2, so new capacity is 24, however, grow factor + // of 1.5 is OK too. Hence `>= 18` in assert. + assert!(v.cap() >= 12 + 12 / 2); + } + } + +} diff --git a/liballoc/rc.rs b/liballoc/rc.rs new file mode 100644 index 0000000..c2f0a96 --- /dev/null +++ b/liballoc/rc.rs @@ -0,0 +1,1169 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(deprecated)] + +//! Thread-local reference-counted boxes (the `Rc<T>` type). +//! +//! The `Rc<T>` type provides shared ownership of an immutable value. +//! Destruction is deterministic, and will occur as soon as the last owner is +//! gone. It is marked as non-sendable because it avoids the overhead of atomic +//! reference counting. +//! +//! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer +//! to the box. A `Weak<T>` pointer can be upgraded to an `Rc<T>` pointer, but +//! will return `None` if the value has already been dropped. +//! +//! For example, a tree with parent pointers can be represented by putting the +//! nodes behind strong `Rc<T>` pointers, and then storing the parent pointers +//! as `Weak<T>` pointers. +//! +//! # Examples +//! +//! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`. +//! We want to have our `Gadget`s point to their `Owner`. We can't do this with +//! unique ownership, because more than one gadget may belong to the same +//! `Owner`. `Rc<T>` allows us to share an `Owner` between multiple `Gadget`s, +//! and have the `Owner` remain allocated as long as any `Gadget` points at it. +//! +//! ```rust +//! use std::rc::Rc; +//! +//! struct Owner { +//! name: String +//! // ...other fields +//! } +//! +//! struct Gadget { +//! id: i32, +//! owner: Rc<Owner> +//! // ...other fields +//! } +//! +//! fn main() { +//! // Create a reference counted Owner. +//! let gadget_owner : Rc<Owner> = Rc::new( +//! Owner { name: String::from("Gadget Man") } +//! ); +//! +//! // Create Gadgets belonging to gadget_owner. To increment the reference +//! // count we clone the `Rc<T>` object. +//! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; +//! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; +//! +//! drop(gadget_owner); +//! +//! // Despite dropping gadget_owner, we're still able to print out the name +//! // of the Owner of the Gadgets. This is because we've only dropped the +//! // reference count object, not the Owner it wraps. As long as there are +//! // other `Rc<T>` objects pointing at the same Owner, it will remain +//! // allocated. Notice that the `Rc<T>` wrapper around Gadget.owner gets +//! // automatically dereferenced for us. +//! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); +//! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); +//! +//! // At the end of the method, gadget1 and gadget2 get destroyed, and with +//! // them the last counted references to our Owner. Gadget Man now gets +//! // destroyed as well. +//! } +//! ``` +//! +//! If our requirements change, and we also need to be able to traverse from +//! Owner → Gadget, we will run into problems: an `Rc<T>` pointer from Owner +//! → Gadget introduces a cycle between the objects. This means that their +//! reference counts can never reach 0, and the objects will remain allocated: a +//! memory leak. In order to get around this, we can use `Weak<T>` pointers. +//! These pointers don't contribute to the total count. +//! +//! Rust actually makes it somewhat difficult to produce this loop in the first +//! place: in order to end up with two objects that point at each other, one of +//! them needs to be mutable. This is problematic because `Rc<T>` enforces +//! memory safety by only giving out shared references to the object it wraps, +//! and these don't allow direct mutation. We need to wrap the part of the +//! object we wish to mutate in a `RefCell`, which provides *interior +//! mutability*: a method to achieve mutability through a shared reference. +//! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` +//! documentation for more details on interior mutability. +//! +//! ```rust +//! use std::rc::Rc; +//! use std::rc::Weak; +//! use std::cell::RefCell; +//! +//! struct Owner { +//! name: String, +//! gadgets: RefCell<Vec<Weak<Gadget>>>, +//! // ...other fields +//! } +//! +//! struct Gadget { +//! id: i32, +//! owner: Rc<Owner>, +//! // ...other fields +//! } +//! +//! fn main() { +//! // Create a reference counted Owner. Note the fact that we've put the +//! // Owner's vector of Gadgets inside a RefCell so that we can mutate it +//! // through a shared reference. +//! let gadget_owner : Rc<Owner> = Rc::new( +//! Owner { +//! name: "Gadget Man".to_string(), +//! gadgets: RefCell::new(Vec::new()), +//! } +//! ); +//! +//! // Create Gadgets belonging to gadget_owner as before. +//! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); +//! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); +//! +//! // Add the Gadgets to their Owner. To do this we mutably borrow from +//! // the RefCell holding the Owner's Gadgets. +//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget1)); +//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget2)); +//! +//! // Iterate over our Gadgets, printing their details out +//! for gadget_opt in gadget_owner.gadgets.borrow().iter() { +//! +//! // gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee +//! // that their object is still allocated, we need to call upgrade() +//! // on them to turn them into a strong reference. This returns an +//! // Option, which contains a reference to our object if it still +//! // exists. +//! let gadget = gadget_opt.upgrade().unwrap(); +//! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); +//! } +//! +//! // At the end of the method, gadget_owner, gadget1 and gadget2 get +//! // destroyed. There are now no strong (`Rc<T>`) references to the gadgets. +//! // Once they get destroyed, the Gadgets get destroyed. This zeroes the +//! // reference count on Gadget Man, they get destroyed as well. +//! } +//! ``` + +#![stable(feature = "rust1", since = "1.0.0")] + +#[cfg(not(test))] +use boxed::Box; +#[cfg(test)] +use std::boxed::Box; + +use core::borrow; +use core::cell::Cell; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hasher, Hash}; +use core::intrinsics::{assume, abort}; +use core::marker; +use core::marker::Unsize; +use core::mem::{self, align_of_val, size_of_val, forget, uninitialized}; +use core::ops::Deref; +use core::ops::CoerceUnsized; +use core::ptr::{self, Shared}; +use core::convert::From; + +use heap::deallocate; + +struct RcBox<T: ?Sized> { + strong: Cell<usize>, + weak: Cell<usize>, + value: T, +} + + +/// A reference-counted pointer type over an immutable value. +/// +/// See the [module level documentation](./index.html) for more details. +#[unsafe_no_drop_flag] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Rc<T: ?Sized> { + ptr: Shared<RcBox<T>>, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> !marker::Send for Rc<T> {} +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> !marker::Sync for Rc<T> {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {} + +impl<T> Rc<T> { + /// Constructs a new `Rc<T>`. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new(value: T) -> Rc<T> { + unsafe { + Rc { + // there is an implicit weak pointer owned by all the strong + // pointers, which ensures that the weak destructor never frees + // the allocation while the strong destructor is running, even + // if the weak pointer is stored inside the strong one. + ptr: Shared::new(Box::into_raw(box RcBox { + strong: Cell::new(1), + weak: Cell::new(1), + value: value, + })), + } + } + } + + /// Unwraps the contained value if the `Rc<T>` has exactly one strong reference. + /// + /// Otherwise, an `Err` is returned with the same `Rc<T>`. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let x = Rc::new(3); + /// assert_eq!(Rc::try_unwrap(x), Ok(3)); + /// + /// let x = Rc::new(4); + /// let _y = x.clone(); + /// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); + /// ``` + #[inline] + #[stable(feature = "rc_unique", since = "1.4.0")] + pub fn try_unwrap(this: Self) -> Result<T, Self> { + if Rc::would_unwrap(&this) { + unsafe { + let val = ptr::read(&*this); // copy the contained object + + // Indicate to Weaks that they can't be promoted by decrememting + // the strong count, and then remove the implicit "strong weak" + // pointer while also handling drop logic by just crafting a + // fake Weak. + this.dec_strong(); + let _weak = Weak { ptr: this.ptr }; + forget(this); + Ok(val) + } + } else { + Err(this) + } + } + + /// Checks if `Rc::try_unwrap` would return `Ok`. + #[unstable(feature = "rc_would_unwrap", + reason = "just added for niche usecase", + issue = "28356")] + pub fn would_unwrap(this: &Self) -> bool { + Rc::strong_count(&this) == 1 + } +} + +impl<T: ?Sized> Rc<T> { + /// Downgrades the `Rc<T>` to a `Weak<T>` reference. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// let weak_five = Rc::downgrade(&five); + /// ``` + #[stable(feature = "rc_weak", since = "1.4.0")] + pub fn downgrade(this: &Self) -> Weak<T> { + this.inc_weak(); + Weak { ptr: this.ptr } + } + + /// Get the number of weak references to this value. + #[inline] + #[unstable(feature = "rc_counts", reason = "not clearly useful", + issue = "28356")] + pub fn weak_count(this: &Self) -> usize { + this.weak() - 1 + } + + /// Get the number of strong references to this value. + #[inline] + #[unstable(feature = "rc_counts", reason = "not clearly useful", + issue = "28356")] + pub fn strong_count(this: &Self) -> usize { + this.strong() + } + + /// Returns true if there are no other `Rc` or `Weak<T>` values that share + /// the same inner value. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_counts)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// assert!(Rc::is_unique(&five)); + /// ``` + #[inline] + #[unstable(feature = "rc_counts", reason = "uniqueness has unclear meaning", + issue = "28356")] + pub fn is_unique(this: &Self) -> bool { + Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1 + } + + /// Returns a mutable reference to the contained value if the `Rc<T>` has + /// one strong reference and no weak references. + /// + /// Returns `None` if the `Rc<T>` is not unique. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let mut x = Rc::new(3); + /// *Rc::get_mut(&mut x).unwrap() = 4; + /// assert_eq!(*x, 4); + /// + /// let _y = x.clone(); + /// assert!(Rc::get_mut(&mut x).is_none()); + /// ``` + #[inline] + #[stable(feature = "rc_unique", since = "1.4.0")] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if Rc::is_unique(this) { + let inner = unsafe { &mut **this.ptr }; + Some(&mut inner.value) + } else { + None + } + } +} + +impl<T: Clone> Rc<T> { + /// Make a mutable reference into the given `Rc<T>` by cloning the inner + /// data if the `Rc<T>` doesn't have one strong reference and no weak + /// references. + /// + /// This is also referred to as a copy-on-write. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let mut data = Rc::new(5); + /// + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = data.clone(); // Won't clone inner data + /// *Rc::make_mut(&mut data) += 1; // Clones inner data + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// + /// // Note: data and other_data now point to different numbers + /// assert_eq!(*data, 8); + /// assert_eq!(*other_data, 12); + /// + /// ``` + #[inline] + #[stable(feature = "rc_unique", since = "1.4.0")] + pub fn make_mut(this: &mut Self) -> &mut T { + if Rc::strong_count(this) != 1 { + // Gotta clone the data, there are other Rcs + *this = Rc::new((**this).clone()) + } else if Rc::weak_count(this) != 0 { + // Can just steal the data, all that's left is Weaks + unsafe { + let mut swap = Rc::new(ptr::read(&(**this.ptr).value)); + mem::swap(this, &mut swap); + swap.dec_strong(); + // Remove implicit strong-weak ref (no need to craft a fake + // Weak here -- we know other Weaks can clean up for us) + swap.dec_weak(); + forget(swap); + } + } + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the `Rc<T>` itself to be `mut`, so we're returning the only possible + // reference to the inner value. + let inner = unsafe { &mut **this.ptr }; + &mut inner.value + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Deref for Rc<T> { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + &self.inner().value + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Drop for Rc<T> { + /// Drops the `Rc<T>`. + /// + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak<T>` ones, + /// `drop`s the inner value. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// { + /// let five = Rc::new(5); + /// + /// // stuff + /// + /// drop(five); // explicit drop + /// } + /// { + /// let five = Rc::new(5); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` + #[unsafe_destructor_blind_to_params] + fn drop(&mut self) { + unsafe { + let ptr = *self.ptr; + let thin = ptr as *const (); + + if thin as usize != mem::POST_DROP_USIZE { + self.dec_strong(); + if self.strong() == 0 { + // destroy the contained object + ptr::drop_in_place(&mut (*ptr).value); + + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. + self.dec_weak(); + + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) + } + } + } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> Clone for Rc<T> { + /// Makes a clone of the `Rc<T>`. + /// + /// When you clone an `Rc<T>`, it will create another pointer to the data and + /// increase the strong reference counter. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five.clone(); + /// ``` + #[inline] + fn clone(&self) -> Rc<T> { + self.inc_strong(); + Rc { ptr: self.ptr } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Default> Default for Rc<T> { + /// Creates a new `Rc<T>`, with the `Default` value for `T`. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let x: Rc<i32> = Default::default(); + /// ``` + #[inline] + fn default() -> Rc<T> { + Rc::new(Default::default()) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialEq> PartialEq for Rc<T> { + /// Equality for two `Rc<T>`s. + /// + /// Two `Rc<T>`s are equal if their inner value are equal. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five == Rc::new(5); + /// ``` + #[inline(always)] + fn eq(&self, other: &Rc<T>) -> bool { + **self == **other + } + + /// Inequality for two `Rc<T>`s. + /// + /// Two `Rc<T>`s are unequal if their inner value are unequal. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five != Rc::new(5); + /// ``` + #[inline(always)] + fn ne(&self, other: &Rc<T>) -> bool { + **self != **other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Eq> Eq for Rc<T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> { + /// Partial comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five.partial_cmp(&Rc::new(5)); + /// ``` + #[inline(always)] + fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> { + (**self).partial_cmp(&**other) + } + + /// Less-than comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five < Rc::new(5); + /// ``` + #[inline(always)] + fn lt(&self, other: &Rc<T>) -> bool { + **self < **other + } + + /// 'Less-than or equal to' comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five <= Rc::new(5); + /// ``` + #[inline(always)] + fn le(&self, other: &Rc<T>) -> bool { + **self <= **other + } + + /// Greater-than comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five > Rc::new(5); + /// ``` + #[inline(always)] + fn gt(&self, other: &Rc<T>) -> bool { + **self > **other + } + + /// 'Greater-than or equal to' comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five >= Rc::new(5); + /// ``` + #[inline(always)] + fn ge(&self, other: &Rc<T>) -> bool { + **self >= **other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Ord> Ord for Rc<T> { + /// Comparison for two `Rc<T>`s. + /// + /// The two are compared by calling `cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// five.partial_cmp(&Rc::new(5)); + /// ``` + #[inline] + fn cmp(&self, other: &Rc<T>) -> Ordering { + (**self).cmp(&**other) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + Hash> Hash for Rc<T> { + fn hash<H: Hasher>(&self, state: &mut H) { + (**self).hash(state); + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + fmt::Display> fmt::Display for Rc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> fmt::Pointer for Rc<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&*self.ptr, f) + } +} + +#[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl<T> From<T> for Rc<T> { + fn from(t: T) -> Self { + Rc::new(t) + } +} + +/// A weak version of `Rc<T>`. +/// +/// Weak references do not count when determining if the inner value should be +/// dropped. +/// +/// See the [module level documentation](./index.html) for more. +#[unsafe_no_drop_flag] +#[stable(feature = "rc_weak", since = "1.4.0")] +pub struct Weak<T: ?Sized> { + ptr: Shared<RcBox<T>>, +} + +#[stable(feature = "rc_weak", since = "1.4.0")] +impl<T: ?Sized> !marker::Send for Weak<T> {} +#[stable(feature = "rc_weak", since = "1.4.0")] +impl<T: ?Sized> !marker::Sync for Weak<T> {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {} + +impl<T: ?Sized> Weak<T> { + /// Upgrades a weak reference to a strong reference. + /// + /// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible. + /// + /// Returns `None` if there were no strong references and the data was + /// destroyed. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// + /// let weak_five = Rc::downgrade(&five); + /// + /// let strong_five: Option<Rc<_>> = weak_five.upgrade(); + /// ``` + #[stable(feature = "rc_weak", since = "1.4.0")] + pub fn upgrade(&self) -> Option<Rc<T>> { + if self.strong() == 0 { + None + } else { + self.inc_strong(); + Some(Rc { ptr: self.ptr }) + } + } +} + +#[stable(feature = "rc_weak", since = "1.4.0")] +impl<T: ?Sized> Drop for Weak<T> { + /// Drops the `Weak<T>`. + /// + /// This will decrement the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// { + /// let five = Rc::new(5); + /// let weak_five = Rc::downgrade(&five); + /// + /// // stuff + /// + /// drop(weak_five); // explicit drop + /// } + /// { + /// let five = Rc::new(5); + /// let weak_five = Rc::downgrade(&five); + /// + /// // stuff + /// + /// } // implicit drop + /// ``` + fn drop(&mut self) { + unsafe { + let ptr = *self.ptr; + let thin = ptr as *const (); + + if thin as usize != mem::POST_DROP_USIZE { + self.dec_weak(); + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) + } + } + } + } +} + +#[stable(feature = "rc_weak", since = "1.4.0")] +impl<T: ?Sized> Clone for Weak<T> { + /// Makes a clone of the `Weak<T>`. + /// + /// This increases the weak reference count. + /// + /// # Examples + /// + /// ``` + /// use std::rc::Rc; + /// + /// let weak_five = Rc::downgrade(&Rc::new(5)); + /// + /// weak_five.clone(); + /// ``` + #[inline] + fn clone(&self) -> Weak<T> { + self.inc_weak(); + Weak { ptr: self.ptr } + } +} + +#[stable(feature = "rc_weak", since = "1.4.0")] +impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "(Weak)") + } +} + +impl<T> Weak<T> { + /// Constructs a new `Weak<T>` without an accompanying instance of T. + /// + /// This allocates memory for T, but does not initialize it. Calling + /// Weak<T>::upgrade() on the return value always gives None. + /// + /// # Examples + /// + /// ``` + /// #![feature(downgraded_weak)] + /// + /// use std::rc::Weak; + /// + /// let empty: Weak<i64> = Weak::new(); + /// ``` + #[unstable(feature = "downgraded_weak", + reason = "recently added", + issue="30425")] + pub fn new() -> Weak<T> { + unsafe { + Weak { + ptr: Shared::new(Box::into_raw(box RcBox { + strong: Cell::new(0), + weak: Cell::new(1), + value: uninitialized(), + })), + } + } + } +} + +// NOTE: We checked_add here to deal with mem::forget safety. In particular +// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then +// you can free the allocation while outstanding Rcs (or Weaks) exist. +// We abort because this is such a degenerate scenario that we don't care about +// what happens -- no real program should ever experience this. +// +// This should have negligible overhead since you don't actually need to +// clone these much in Rust thanks to ownership and move-semantics. + +#[doc(hidden)] +trait RcBoxPtr<T: ?Sized> { + fn inner(&self) -> &RcBox<T>; + + #[inline] + fn strong(&self) -> usize { + self.inner().strong.get() + } + + #[inline] + fn inc_strong(&self) { + self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() })); + } + + #[inline] + fn dec_strong(&self) { + self.inner().strong.set(self.strong() - 1); + } + + #[inline] + fn weak(&self) -> usize { + self.inner().weak.get() + } + + #[inline] + fn inc_weak(&self) { + self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() })); + } + + #[inline] + fn dec_weak(&self) { + self.inner().weak.set(self.weak() - 1); + } +} + +impl<T: ?Sized> RcBoxPtr<T> for Rc<T> { + #[inline(always)] + fn inner(&self) -> &RcBox<T> { + unsafe { + // Safe to assume this here, as if it weren't true, we'd be breaking + // the contract anyway. + // This allows the null check to be elided in the destructor if we + // manipulated the reference count in the same function. + assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); + &(**self.ptr) + } + } +} + +impl<T: ?Sized> RcBoxPtr<T> for Weak<T> { + #[inline(always)] + fn inner(&self) -> &RcBox<T> { + unsafe { + // Safe to assume this here, as if it weren't true, we'd be breaking + // the contract anyway. + // This allows the null check to be elided in the destructor if we + // manipulated the reference count in the same function. + assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); + &(**self.ptr) + } + } +} + +#[cfg(test)] +mod tests { + use super::{Rc, Weak}; + use std::boxed::Box; + use std::cell::RefCell; + use std::option::Option; + use std::option::Option::{Some, None}; + use std::result::Result::{Err, Ok}; + use std::mem::drop; + use std::clone::Clone; + use std::convert::From; + + #[test] + fn test_clone() { + let x = Rc::new(RefCell::new(5)); + let y = x.clone(); + *x.borrow_mut() = 20; + assert_eq!(*y.borrow(), 20); + } + + #[test] + fn test_simple() { + let x = Rc::new(5); + assert_eq!(*x, 5); + } + + #[test] + fn test_simple_clone() { + let x = Rc::new(5); + let y = x.clone(); + assert_eq!(*x, 5); + assert_eq!(*y, 5); + } + + #[test] + fn test_destructor() { + let x: Rc<Box<_>> = Rc::new(box 5); + assert_eq!(**x, 5); + } + + #[test] + fn test_live() { + let x = Rc::new(5); + let y = Rc::downgrade(&x); + assert!(y.upgrade().is_some()); + } + + #[test] + fn test_dead() { + let x = Rc::new(5); + let y = Rc::downgrade(&x); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn weak_self_cyclic() { + struct Cycle { + x: RefCell<Option<Weak<Cycle>>>, + } + + let a = Rc::new(Cycle { x: RefCell::new(None) }); + let b = Rc::downgrade(&a.clone()); + *a.x.borrow_mut() = Some(b); + + // hopefully we don't double-free (or leak)... + } + + #[test] + fn is_unique() { + let x = Rc::new(3); + assert!(Rc::is_unique(&x)); + let y = x.clone(); + assert!(!Rc::is_unique(&x)); + drop(y); + assert!(Rc::is_unique(&x)); + let w = Rc::downgrade(&x); + assert!(!Rc::is_unique(&x)); + drop(w); + assert!(Rc::is_unique(&x)); + } + + #[test] + fn test_strong_count() { + let a = Rc::new(0); + assert!(Rc::strong_count(&a) == 1); + let w = Rc::downgrade(&a); + assert!(Rc::strong_count(&a) == 1); + let b = w.upgrade().expect("upgrade of live rc failed"); + assert!(Rc::strong_count(&b) == 2); + assert!(Rc::strong_count(&a) == 2); + drop(w); + drop(a); + assert!(Rc::strong_count(&b) == 1); + let c = b.clone(); + assert!(Rc::strong_count(&b) == 2); + assert!(Rc::strong_count(&c) == 2); + } + + #[test] + fn test_weak_count() { + let a = Rc::new(0); + assert!(Rc::strong_count(&a) == 1); + assert!(Rc::weak_count(&a) == 0); + let w = Rc::downgrade(&a); + assert!(Rc::strong_count(&a) == 1); + assert!(Rc::weak_count(&a) == 1); + drop(w); + assert!(Rc::strong_count(&a) == 1); + assert!(Rc::weak_count(&a) == 0); + let c = a.clone(); + assert!(Rc::strong_count(&a) == 2); + assert!(Rc::weak_count(&a) == 0); + drop(c); + } + + #[test] + fn try_unwrap() { + let x = Rc::new(3); + assert_eq!(Rc::try_unwrap(x), Ok(3)); + let x = Rc::new(4); + let _y = x.clone(); + assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); + let x = Rc::new(5); + let _w = Rc::downgrade(&x); + assert_eq!(Rc::try_unwrap(x), Ok(5)); + } + + #[test] + fn get_mut() { + let mut x = Rc::new(3); + *Rc::get_mut(&mut x).unwrap() = 4; + assert_eq!(*x, 4); + let y = x.clone(); + assert!(Rc::get_mut(&mut x).is_none()); + drop(y); + assert!(Rc::get_mut(&mut x).is_some()); + let _w = Rc::downgrade(&x); + assert!(Rc::get_mut(&mut x).is_none()); + } + + #[test] + fn test_cowrc_clone_make_unique() { + let mut cow0 = Rc::new(75); + let mut cow1 = cow0.clone(); + let mut cow2 = cow1.clone(); + + assert!(75 == *Rc::make_mut(&mut cow0)); + assert!(75 == *Rc::make_mut(&mut cow1)); + assert!(75 == *Rc::make_mut(&mut cow2)); + + *Rc::make_mut(&mut cow0) += 1; + *Rc::make_mut(&mut cow1) += 2; + *Rc::make_mut(&mut cow2) += 3; + + assert!(76 == *cow0); + assert!(77 == *cow1); + assert!(78 == *cow2); + + // none should point to the same backing memory + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 != *cow2); + } + + #[test] + fn test_cowrc_clone_unique2() { + let mut cow0 = Rc::new(75); + let cow1 = cow0.clone(); + let cow2 = cow1.clone(); + + assert!(75 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + *Rc::make_mut(&mut cow0) += 1; + + assert!(76 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + // cow1 and cow2 should share the same contents + // cow0 should have a unique reference + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 == *cow2); + } + + #[test] + fn test_cowrc_clone_weak() { + let mut cow0 = Rc::new(75); + let cow1_weak = Rc::downgrade(&cow0); + + assert!(75 == *cow0); + assert!(75 == *cow1_weak.upgrade().unwrap()); + + *Rc::make_mut(&mut cow0) += 1; + + assert!(76 == *cow0); + assert!(cow1_weak.upgrade().is_none()); + } + + #[test] + fn test_show() { + let foo = Rc::new(75); + assert_eq!(format!("{:?}", foo), "75"); + } + + #[test] + fn test_unsized() { + let foo: Rc<[i32]> = Rc::new([1, 2, 3]); + assert_eq!(foo, foo.clone()); + } + + #[test] + fn test_from_owned() { + let foo = 123; + let foo_rc = Rc::from(foo); + assert!(123 == *foo_rc); + } + + #[test] + fn test_new_weak() { + let foo: Weak<usize> = Weak::new(); + assert!(foo.upgrade().is_none()); + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: ?Sized> borrow::Borrow<T> for Rc<T> { + fn borrow(&self) -> &T { + &**self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl<T: ?Sized> AsRef<T> for Rc<T> { + fn as_ref(&self) -> &T { + &**self + } +} |