// the value usize::MAX acts as a sentinel for temporarily "locking" the // ability to upgrade weak pointers or downgrade strong ones; this is used // to avoid races in `make_mut` and `get_mut`. weak: atomic::AtomicUsize,
data: T, }
Comparing with std::rc::Rc
内部可变形
其实你应该记得 Rc 实现用了一堆 Cell 来表示 counter,
1 2 3 4 5 6 7 8 9 10 11 12
#[cfg_attr(not(test), lang = "rc")] #[stable(feature = "rust1", since = "1.0.0")] pubstructRc<T: ?Sized> { ptr: NonNull<RcBox<T>>, phantom: PhantomData<T>, }
#[inline] #[stable(feature = "rust1", since = "1.0.0")] pubfnnew(data: T) -> Arc<T> { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info letx: Box<_> = box ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data, }; Self::from_inner(Box::into_raw_non_null(x)) }
#[inline] fnclone(&self) -> Arc<T> { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) letold_size = self.inner().strong.fetch_add(1, Relaxed);
// However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { unsafe { abort(); } }
#[inline] fndrop(&mutself) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. ifself.inner().strong.fetch_sub(1, Release) != 1 { return; }
// This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // In particular, while the contents of an Arc are usually immutable, it's // possible to have interior writes to something like a Mutex<T>. Since a // Mutex is not acquired when it is deleted, we can't rely on its // synchronization logic to make writes in thread A visible to a destructor // running in thread B. // // Also note that the Acquire fence here could probably be replaced with an // Acquire load, which could improve performance in highly-contended // situations. See [2]. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: (https://github.com/rust-lang/rust/pull/41714) atomic::fence(Acquire);
unsafe { self.drop_slow(); } }
考虑一下这段代码:
deletion of the data. Because it is marked Release, the decreasing of the reference count synchronizes with this Acquire fence. This means that use of the data happens before decreasing the reference count, which happens before this fence, which happens before the deletion of the data.
// Non-inlined part of `drop`. #[inline(never)] unsafefndrop_slow(&mutself) { // Destroy the data at this time, even though we may not free the box // allocation itself (there may still be weak pointers lying around). ptr::drop_in_place(&mutself.ptr.as_mut().data);
#[inline] #[stable(feature = "arc_counts", since = "1.15.0")] pubfnweak_count(this: &Self) ->usize { letcnt = this.inner().weak.load(SeqCst); // If the weak count is currently locked, the value of the // count was 0 just before taking the lock. if cnt == usize::MAX { 0 } else { cnt - 1 } } #[inline] #[stable(feature = "arc_counts", since = "1.15.0")] pubfnstrong_count(this: &Self) ->usize { this.inner().strong.load(SeqCst) }
#[unstable(feature = "weak_counts", issue = "57977")] pubfnweak_count(&self) ->Option<usize> { // Due to the implicit weak pointer added when any strong pointers are // around, we cannot implement `weak_count` correctly since it // necessarily requires accessing the strong count and weak count in an // unsynchronized fashion. So this version is a bit racy. self.inner().map(|inner| { letstrong = inner.strong.load(SeqCst); letweak = inner.weak.load(SeqCst); if strong == 0 { // If the last `Arc` has *just* been dropped, it might not yet // have removed the implicit weak count, so the value we get // here might be 1 too high. weak } else { // As long as there's still at least 1 `Arc` around, subtract // the implicit weak pointer. // Note that the last `Arc` might get dropped between the 2 // loads we do above, removing the implicit weak pointer. This // means that the value might be 1 too low here. In order to not // return 0 here (which would happen if we're the only weak // pointer), we guard against that specifically. cmp::max(1, weak - 1) } }) }
/// Determine whether this is the unique reference (including weak refs) to /// the underlying data. /// /// Note that this requires locking the weak ref count. fnis_unique(&mutself) ->bool { // lock the weak pointer count if we appear to be the sole weak pointer // holder. // // The acquire label here ensures a happens-before relationship with any // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. ifself.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { // This needs to be an `Acquire` to synchronize with the decrement of the `strong` // counter in `drop` -- the only access that happens when any but the last reference // is being dropped. letunique = self.inner().strong.load(Acquire) == 1;
// The release write here synchronizes with a read in `downgrade`, // effectively preventing the above read of `strong` from happening // after the write. self.inner().weak.store(1, Release); // release the lock unique } else { false } }