alloc/sync.rs
1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12use core::cell::CloneFromCell;
13#[cfg(not(no_global_oom_handling))]
14use core::clone::CloneToUninit;
15use core::clone::UseCloned;
16use core::cmp::Ordering;
17use core::hash::{Hash, Hasher};
18use core::intrinsics::abort;
19#[cfg(not(no_global_oom_handling))]
20use core::iter;
21use core::marker::{PhantomData, Unsize};
22use core::mem::{self, ManuallyDrop, align_of_val_raw};
23use core::num::NonZeroUsize;
24use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
25#[cfg(not(no_global_oom_handling))]
26use core::ops::{Residual, Try};
27use core::panic::{RefUnwindSafe, UnwindSafe};
28use core::pin::{Pin, PinCoerceUnsized};
29use core::ptr::{self, NonNull};
30#[cfg(not(no_global_oom_handling))]
31use core::slice::from_raw_parts_mut;
32use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
33use core::sync::atomic::{self, Atomic};
34use core::{borrow, fmt, hint};
35
36#[cfg(not(no_global_oom_handling))]
37use crate::alloc::handle_alloc_error;
38use crate::alloc::{AllocError, Allocator, Global, Layout};
39use crate::borrow::{Cow, ToOwned};
40use crate::boxed::Box;
41use crate::rc::is_dangling;
42#[cfg(not(no_global_oom_handling))]
43use crate::string::String;
44#[cfg(not(no_global_oom_handling))]
45use crate::vec::Vec;
46
47/// A soft limit on the amount of references that may be made to an `Arc`.
48///
49/// Going above this limit will abort your program (although not
50/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
51/// Trying to go above it might call a `panic` (if not actually going above it).
52///
53/// This is a global invariant, and also applies when using a compare-exchange loop.
54///
55/// See comment in `Arc::clone`.
56const MAX_REFCOUNT: usize = (isize::MAX) as usize;
57
58/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
59const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
60
61#[cfg(not(sanitize = "thread"))]
62macro_rules! acquire {
63 ($x:expr) => {
64 atomic::fence(Acquire)
65 };
66}
67
68// ThreadSanitizer does not support memory fences. To avoid false positive
69// reports in Arc / Weak implementation use atomic loads for synchronization
70// instead.
71#[cfg(sanitize = "thread")]
72macro_rules! acquire {
73 ($x:expr) => {
74 $x.load(Acquire)
75 };
76}
77
78/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
79/// Reference Counted'.
80///
81/// The type `Arc<T>` provides shared ownership of a value of type `T`,
82/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
83/// a new `Arc` instance, which points to the same allocation on the heap as the
84/// source `Arc`, while increasing a reference count. When the last `Arc`
85/// pointer to a given allocation is destroyed, the value stored in that allocation (often
86/// referred to as "inner value") is also dropped.
87///
88/// Shared references in Rust disallow mutation by default, and `Arc` is no
89/// exception: you cannot generally obtain a mutable reference to something
90/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
91///
92/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
93/// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
94///
95/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
96/// without requiring interior mutability. This approach clones the data only when
97/// needed (when there are multiple references) and can be more efficient when mutations
98/// are infrequent.
99///
100/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
101/// which provides direct mutable access to the inner value without any cloning.
102///
103/// ```
104/// use std::sync::Arc;
105///
106/// let mut data = Arc::new(vec![1, 2, 3]);
107///
108/// // This will clone the vector only if there are other references to it
109/// Arc::make_mut(&mut data).push(4);
110///
111/// assert_eq!(*data, vec![1, 2, 3, 4]);
112/// ```
113///
114/// **Note**: This type is only available on platforms that support atomic
115/// loads and stores of pointers, which includes all platforms that support
116/// the `std` crate but not all those which only support [`alloc`](crate).
117/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
118///
119/// ## Thread Safety
120///
121/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
122/// counting. This means that it is thread-safe. The disadvantage is that
123/// atomic operations are more expensive than ordinary memory accesses. If you
124/// are not sharing reference-counted allocations between threads, consider using
125/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
126/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
127/// However, a library might choose `Arc<T>` in order to give library consumers
128/// more flexibility.
129///
130/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
131/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
132/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
133/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
134/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
135/// data, but it doesn't add thread safety to its data. Consider
136/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
137/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
138/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
139/// non-atomic operations.
140///
141/// In the end, this means that you may need to pair `Arc<T>` with some sort of
142/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
143///
144/// ## Breaking cycles with `Weak`
145///
146/// The [`downgrade`][downgrade] method can be used to create a non-owning
147/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
148/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
149/// already been dropped. In other words, `Weak` pointers do not keep the value
150/// inside the allocation alive; however, they *do* keep the allocation
151/// (the backing store for the value) alive.
152///
153/// A cycle between `Arc` pointers will never be deallocated. For this reason,
154/// [`Weak`] is used to break cycles. For example, a tree could have
155/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
156/// pointers from children back to their parents.
157///
158/// # Cloning references
159///
160/// Creating a new reference from an existing reference-counted pointer is done using the
161/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
162///
163/// ```
164/// use std::sync::Arc;
165/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
166/// // The two syntaxes below are equivalent.
167/// let a = foo.clone();
168/// let b = Arc::clone(&foo);
169/// // a, b, and foo are all Arcs that point to the same memory location
170/// ```
171///
172/// ## `Deref` behavior
173///
174/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
175/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
176/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
177/// functions, called using [fully qualified syntax]:
178///
179/// ```
180/// use std::sync::Arc;
181///
182/// let my_arc = Arc::new(());
183/// let my_weak = Arc::downgrade(&my_arc);
184/// ```
185///
186/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
187/// fully qualified syntax. Some people prefer to use fully qualified syntax,
188/// while others prefer using method-call syntax.
189///
190/// ```
191/// use std::sync::Arc;
192///
193/// let arc = Arc::new(());
194/// // Method-call syntax
195/// let arc2 = arc.clone();
196/// // Fully qualified syntax
197/// let arc3 = Arc::clone(&arc);
198/// ```
199///
200/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
201/// already been dropped.
202///
203/// [`Rc<T>`]: crate::rc::Rc
204/// [clone]: Clone::clone
205/// [mutex]: ../../std/sync/struct.Mutex.html
206/// [rwlock]: ../../std/sync/struct.RwLock.html
207/// [atomic]: core::sync::atomic
208/// [downgrade]: Arc::downgrade
209/// [upgrade]: Weak::upgrade
210/// [RefCell\<T>]: core::cell::RefCell
211/// [`RefCell<T>`]: core::cell::RefCell
212/// [`std::sync`]: ../../std/sync/index.html
213/// [`Arc::clone(&from)`]: Arc::clone
214/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
215///
216/// # Examples
217///
218/// Sharing some immutable data between threads:
219///
220/// ```
221/// use std::sync::Arc;
222/// use std::thread;
223///
224/// let five = Arc::new(5);
225///
226/// for _ in 0..10 {
227/// let five = Arc::clone(&five);
228///
229/// thread::spawn(move || {
230/// println!("{five:?}");
231/// });
232/// }
233/// ```
234///
235/// Sharing a mutable [`AtomicUsize`]:
236///
237/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
238///
239/// ```
240/// use std::sync::Arc;
241/// use std::sync::atomic::{AtomicUsize, Ordering};
242/// use std::thread;
243///
244/// let val = Arc::new(AtomicUsize::new(5));
245///
246/// for _ in 0..10 {
247/// let val = Arc::clone(&val);
248///
249/// thread::spawn(move || {
250/// let v = val.fetch_add(1, Ordering::Relaxed);
251/// println!("{v:?}");
252/// });
253/// }
254/// ```
255///
256/// See the [`rc` documentation][rc_examples] for more examples of reference
257/// counting in general.
258///
259/// [rc_examples]: crate::rc#examples
260#[doc(search_unbox)]
261#[rustc_diagnostic_item = "Arc"]
262#[stable(feature = "rust1", since = "1.0.0")]
263#[rustc_insignificant_dtor]
264pub struct Arc<
265 T: ?Sized,
266 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
267> {
268 ptr: NonNull<ArcInner<T>>,
269 phantom: PhantomData<ArcInner<T>>,
270 alloc: A,
271}
272
273#[stable(feature = "rust1", since = "1.0.0")]
274unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
275#[stable(feature = "rust1", since = "1.0.0")]
276unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
277
278#[stable(feature = "catch_unwind", since = "1.9.0")]
279impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
280
281#[unstable(feature = "coerce_unsized", issue = "18598")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
283
284#[unstable(feature = "dispatch_from_dyn", issue = "none")]
285impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
286
287// SAFETY: `Arc::clone` doesn't access any `Cell`s which could contain the `Arc` being cloned.
288#[unstable(feature = "cell_get_cloned", issue = "145329")]
289unsafe impl<T: ?Sized> CloneFromCell for Arc<T> {}
290
291impl<T: ?Sized> Arc<T> {
292 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
293 unsafe { Self::from_inner_in(ptr, Global) }
294 }
295
296 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
297 unsafe { Self::from_ptr_in(ptr, Global) }
298 }
299}
300
301impl<T: ?Sized, A: Allocator> Arc<T, A> {
302 #[inline]
303 fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
304 let this = mem::ManuallyDrop::new(this);
305 (this.ptr, unsafe { ptr::read(&this.alloc) })
306 }
307
308 #[inline]
309 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
310 Self { ptr, phantom: PhantomData, alloc }
311 }
312
313 #[inline]
314 unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
315 unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
316 }
317}
318
319/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
320/// managed allocation.
321///
322/// The allocation is accessed by calling [`upgrade`] on the `Weak`
323/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
324///
325/// Since a `Weak` reference does not count towards ownership, it will not
326/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
327/// guarantees about the value still being present. Thus it may return [`None`]
328/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
329/// itself (the backing store) from being deallocated.
330///
331/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
332/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
333/// prevent circular references between [`Arc`] pointers, since mutual owning references
334/// would never allow either [`Arc`] to be dropped. For example, a tree could
335/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
336/// pointers from children back to their parents.
337///
338/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
339///
340/// [`upgrade`]: Weak::upgrade
341#[stable(feature = "arc_weak", since = "1.4.0")]
342#[rustc_diagnostic_item = "ArcWeak"]
343pub struct Weak<
344 T: ?Sized,
345 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
346> {
347 // This is a `NonNull` to allow optimizing the size of this type in enums,
348 // but it is not necessarily a valid pointer.
349 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
350 // to allocate space on the heap. That's not a value a real pointer
351 // will ever have because ArcInner has alignment at least 2.
352 ptr: NonNull<ArcInner<T>>,
353 alloc: A,
354}
355
356#[stable(feature = "arc_weak", since = "1.4.0")]
357unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
358#[stable(feature = "arc_weak", since = "1.4.0")]
359unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
360
361#[unstable(feature = "coerce_unsized", issue = "18598")]
362impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
363#[unstable(feature = "dispatch_from_dyn", issue = "none")]
364impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
365
366// SAFETY: `Weak::clone` doesn't access any `Cell`s which could contain the `Weak` being cloned.
367#[unstable(feature = "cell_get_cloned", issue = "145329")]
368unsafe impl<T: ?Sized> CloneFromCell for Weak<T> {}
369
370#[stable(feature = "arc_weak", since = "1.4.0")]
371impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
372 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
373 write!(f, "(Weak)")
374 }
375}
376
377// This is repr(C) to future-proof against possible field-reordering, which
378// would interfere with otherwise safe [into|from]_raw() of transmutable
379// inner types.
380// Unlike RcInner, repr(align(2)) is not strictly required because atomic types
381// have the alignment same as its size, but we use it for consistency and clarity.
382#[repr(C, align(2))]
383struct ArcInner<T: ?Sized> {
384 strong: Atomic<usize>,
385
386 // the value usize::MAX acts as a sentinel for temporarily "locking" the
387 // ability to upgrade weak pointers or downgrade strong ones; this is used
388 // to avoid races in `make_mut` and `get_mut`.
389 weak: Atomic<usize>,
390
391 data: T,
392}
393
394/// Calculate layout for `ArcInner<T>` using the inner value's layout
395fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
396 // Calculate layout using the given value layout.
397 // Previously, layout was calculated on the expression
398 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
399 // reference (see #54908).
400 Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
401}
402
403unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
404unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
405
406impl<T> Arc<T> {
407 /// Constructs a new `Arc<T>`.
408 ///
409 /// # Examples
410 ///
411 /// ```
412 /// use std::sync::Arc;
413 ///
414 /// let five = Arc::new(5);
415 /// ```
416 #[cfg(not(no_global_oom_handling))]
417 #[inline]
418 #[stable(feature = "rust1", since = "1.0.0")]
419 pub fn new(data: T) -> Arc<T> {
420 // Start the weak pointer count as 1 which is the weak pointer that's
421 // held by all the strong pointers (kinda), see std/rc.rs for more info
422 let x: Box<_> = Box::new(ArcInner {
423 strong: atomic::AtomicUsize::new(1),
424 weak: atomic::AtomicUsize::new(1),
425 data,
426 });
427 unsafe { Self::from_inner(Box::leak(x).into()) }
428 }
429
430 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
431 /// to allow you to construct a `T` which holds a weak pointer to itself.
432 ///
433 /// Generally, a structure circularly referencing itself, either directly or
434 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
435 /// Using this function, you get access to the weak pointer during the
436 /// initialization of `T`, before the `Arc<T>` is created, such that you can
437 /// clone and store it inside the `T`.
438 ///
439 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
440 /// then calls your closure, giving it a `Weak<T>` to this allocation,
441 /// and only afterwards completes the construction of the `Arc<T>` by placing
442 /// the `T` returned from your closure into the allocation.
443 ///
444 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
445 /// returns, calling [`upgrade`] on the weak reference inside your closure will
446 /// fail and result in a `None` value.
447 ///
448 /// # Panics
449 ///
450 /// If `data_fn` panics, the panic is propagated to the caller, and the
451 /// temporary [`Weak<T>`] is dropped normally.
452 ///
453 /// # Example
454 ///
455 /// ```
456 /// # #![allow(dead_code)]
457 /// use std::sync::{Arc, Weak};
458 ///
459 /// struct Gadget {
460 /// me: Weak<Gadget>,
461 /// }
462 ///
463 /// impl Gadget {
464 /// /// Constructs a reference counted Gadget.
465 /// fn new() -> Arc<Self> {
466 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
467 /// // `Arc` we're constructing.
468 /// Arc::new_cyclic(|me| {
469 /// // Create the actual struct here.
470 /// Gadget { me: me.clone() }
471 /// })
472 /// }
473 ///
474 /// /// Returns a reference counted pointer to Self.
475 /// fn me(&self) -> Arc<Self> {
476 /// self.me.upgrade().unwrap()
477 /// }
478 /// }
479 /// ```
480 /// [`upgrade`]: Weak::upgrade
481 #[cfg(not(no_global_oom_handling))]
482 #[inline]
483 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
484 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
485 where
486 F: FnOnce(&Weak<T>) -> T,
487 {
488 Self::new_cyclic_in(data_fn, Global)
489 }
490
491 /// Constructs a new `Arc` with uninitialized contents.
492 ///
493 /// # Examples
494 ///
495 /// ```
496 /// use std::sync::Arc;
497 ///
498 /// let mut five = Arc::<u32>::new_uninit();
499 ///
500 /// // Deferred initialization:
501 /// Arc::get_mut(&mut five).unwrap().write(5);
502 ///
503 /// let five = unsafe { five.assume_init() };
504 ///
505 /// assert_eq!(*five, 5)
506 /// ```
507 #[cfg(not(no_global_oom_handling))]
508 #[inline]
509 #[stable(feature = "new_uninit", since = "1.82.0")]
510 #[must_use]
511 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
512 unsafe {
513 Arc::from_ptr(Arc::allocate_for_layout(
514 Layout::new::<T>(),
515 |layout| Global.allocate(layout),
516 <*mut u8>::cast,
517 ))
518 }
519 }
520
521 /// Constructs a new `Arc` with uninitialized contents, with the memory
522 /// being filled with `0` bytes.
523 ///
524 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
525 /// of this method.
526 ///
527 /// # Examples
528 ///
529 /// ```
530 /// use std::sync::Arc;
531 ///
532 /// let zero = Arc::<u32>::new_zeroed();
533 /// let zero = unsafe { zero.assume_init() };
534 ///
535 /// assert_eq!(*zero, 0)
536 /// ```
537 ///
538 /// [zeroed]: mem::MaybeUninit::zeroed
539 #[cfg(not(no_global_oom_handling))]
540 #[inline]
541 #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
542 #[must_use]
543 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
544 unsafe {
545 Arc::from_ptr(Arc::allocate_for_layout(
546 Layout::new::<T>(),
547 |layout| Global.allocate_zeroed(layout),
548 <*mut u8>::cast,
549 ))
550 }
551 }
552
553 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
554 /// `data` will be pinned in memory and unable to be moved.
555 #[cfg(not(no_global_oom_handling))]
556 #[stable(feature = "pin", since = "1.33.0")]
557 #[must_use]
558 pub fn pin(data: T) -> Pin<Arc<T>> {
559 unsafe { Pin::new_unchecked(Arc::new(data)) }
560 }
561
562 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
563 #[unstable(feature = "allocator_api", issue = "32838")]
564 #[inline]
565 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
566 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
567 }
568
569 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
570 ///
571 /// # Examples
572 ///
573 /// ```
574 /// #![feature(allocator_api)]
575 /// use std::sync::Arc;
576 ///
577 /// let five = Arc::try_new(5)?;
578 /// # Ok::<(), std::alloc::AllocError>(())
579 /// ```
580 #[unstable(feature = "allocator_api", issue = "32838")]
581 #[inline]
582 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
583 // Start the weak pointer count as 1 which is the weak pointer that's
584 // held by all the strong pointers (kinda), see std/rc.rs for more info
585 let x: Box<_> = Box::try_new(ArcInner {
586 strong: atomic::AtomicUsize::new(1),
587 weak: atomic::AtomicUsize::new(1),
588 data,
589 })?;
590 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
591 }
592
593 /// Constructs a new `Arc` with uninitialized contents, returning an error
594 /// if allocation fails.
595 ///
596 /// # Examples
597 ///
598 /// ```
599 /// #![feature(allocator_api)]
600 ///
601 /// use std::sync::Arc;
602 ///
603 /// let mut five = Arc::<u32>::try_new_uninit()?;
604 ///
605 /// // Deferred initialization:
606 /// Arc::get_mut(&mut five).unwrap().write(5);
607 ///
608 /// let five = unsafe { five.assume_init() };
609 ///
610 /// assert_eq!(*five, 5);
611 /// # Ok::<(), std::alloc::AllocError>(())
612 /// ```
613 #[unstable(feature = "allocator_api", issue = "32838")]
614 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
615 unsafe {
616 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
617 Layout::new::<T>(),
618 |layout| Global.allocate(layout),
619 <*mut u8>::cast,
620 )?))
621 }
622 }
623
624 /// Constructs a new `Arc` with uninitialized contents, with the memory
625 /// being filled with `0` bytes, returning an error if allocation fails.
626 ///
627 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
628 /// of this method.
629 ///
630 /// # Examples
631 ///
632 /// ```
633 /// #![feature( allocator_api)]
634 ///
635 /// use std::sync::Arc;
636 ///
637 /// let zero = Arc::<u32>::try_new_zeroed()?;
638 /// let zero = unsafe { zero.assume_init() };
639 ///
640 /// assert_eq!(*zero, 0);
641 /// # Ok::<(), std::alloc::AllocError>(())
642 /// ```
643 ///
644 /// [zeroed]: mem::MaybeUninit::zeroed
645 #[unstable(feature = "allocator_api", issue = "32838")]
646 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
647 unsafe {
648 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
649 Layout::new::<T>(),
650 |layout| Global.allocate_zeroed(layout),
651 <*mut u8>::cast,
652 )?))
653 }
654 }
655
656 /// Maps the value in an `Arc`, reusing the allocation if possible.
657 ///
658 /// `f` is called on a reference to the value in the `Arc`, and the result is returned, also in
659 /// an `Arc`.
660 ///
661 /// Note: this is an associated function, which means that you have
662 /// to call it as `Arc::map(a, f)` instead of `r.map(a)`. This
663 /// is so that there is no conflict with a method on the inner type.
664 ///
665 /// # Examples
666 ///
667 /// ```
668 /// #![feature(smart_pointer_try_map)]
669 ///
670 /// use std::sync::Arc;
671 ///
672 /// let r = Arc::new(7);
673 /// let new = Arc::map(r, |i| i + 7);
674 /// assert_eq!(*new, 14);
675 /// ```
676 #[cfg(not(no_global_oom_handling))]
677 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
678 pub fn map<U>(this: Self, f: impl FnOnce(&T) -> U) -> Arc<U> {
679 if size_of::<T>() == size_of::<U>()
680 && align_of::<T>() == align_of::<U>()
681 && Arc::is_unique(&this)
682 {
683 unsafe {
684 let ptr = Arc::into_raw(this);
685 let value = ptr.read();
686 let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
687
688 Arc::get_mut_unchecked(&mut allocation).write(f(&value));
689 allocation.assume_init()
690 }
691 } else {
692 Arc::new(f(&*this))
693 }
694 }
695
696 /// Attempts to map the value in an `Arc`, reusing the allocation if possible.
697 ///
698 /// `f` is called on a reference to the value in the `Arc`, and if the operation succeeds, the
699 /// result is returned, also in an `Arc`.
700 ///
701 /// Note: this is an associated function, which means that you have
702 /// to call it as `Arc::try_map(a, f)` instead of `a.try_map(f)`. This
703 /// is so that there is no conflict with a method on the inner type.
704 ///
705 /// # Examples
706 ///
707 /// ```
708 /// #![feature(smart_pointer_try_map)]
709 ///
710 /// use std::sync::Arc;
711 ///
712 /// let b = Arc::new(7);
713 /// let new = Arc::try_map(b, |&i| u32::try_from(i)).unwrap();
714 /// assert_eq!(*new, 7);
715 /// ```
716 #[cfg(not(no_global_oom_handling))]
717 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
718 pub fn try_map<R>(
719 this: Self,
720 f: impl FnOnce(&T) -> R,
721 ) -> <R::Residual as Residual<Arc<R::Output>>>::TryType
722 where
723 R: Try,
724 R::Residual: Residual<Arc<R::Output>>,
725 {
726 if size_of::<T>() == size_of::<R::Output>()
727 && align_of::<T>() == align_of::<R::Output>()
728 && Arc::is_unique(&this)
729 {
730 unsafe {
731 let ptr = Arc::into_raw(this);
732 let value = ptr.read();
733 let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
734
735 Arc::get_mut_unchecked(&mut allocation).write(f(&value)?);
736 try { allocation.assume_init() }
737 }
738 } else {
739 try { Arc::new(f(&*this)?) }
740 }
741 }
742}
743
744impl<T, A: Allocator> Arc<T, A> {
745 /// Constructs a new `Arc<T>` in the provided allocator.
746 ///
747 /// # Examples
748 ///
749 /// ```
750 /// #![feature(allocator_api)]
751 ///
752 /// use std::sync::Arc;
753 /// use std::alloc::System;
754 ///
755 /// let five = Arc::new_in(5, System);
756 /// ```
757 #[inline]
758 #[cfg(not(no_global_oom_handling))]
759 #[unstable(feature = "allocator_api", issue = "32838")]
760 pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
761 // Start the weak pointer count as 1 which is the weak pointer that's
762 // held by all the strong pointers (kinda), see std/rc.rs for more info
763 let x = Box::new_in(
764 ArcInner {
765 strong: atomic::AtomicUsize::new(1),
766 weak: atomic::AtomicUsize::new(1),
767 data,
768 },
769 alloc,
770 );
771 let (ptr, alloc) = Box::into_unique(x);
772 unsafe { Self::from_inner_in(ptr.into(), alloc) }
773 }
774
775 /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
776 ///
777 /// # Examples
778 ///
779 /// ```
780 /// #![feature(get_mut_unchecked)]
781 /// #![feature(allocator_api)]
782 ///
783 /// use std::sync::Arc;
784 /// use std::alloc::System;
785 ///
786 /// let mut five = Arc::<u32, _>::new_uninit_in(System);
787 ///
788 /// let five = unsafe {
789 /// // Deferred initialization:
790 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
791 ///
792 /// five.assume_init()
793 /// };
794 ///
795 /// assert_eq!(*five, 5)
796 /// ```
797 #[cfg(not(no_global_oom_handling))]
798 #[unstable(feature = "allocator_api", issue = "32838")]
799 #[inline]
800 pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
801 unsafe {
802 Arc::from_ptr_in(
803 Arc::allocate_for_layout(
804 Layout::new::<T>(),
805 |layout| alloc.allocate(layout),
806 <*mut u8>::cast,
807 ),
808 alloc,
809 )
810 }
811 }
812
813 /// Constructs a new `Arc` with uninitialized contents, with the memory
814 /// being filled with `0` bytes, in the provided allocator.
815 ///
816 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
817 /// of this method.
818 ///
819 /// # Examples
820 ///
821 /// ```
822 /// #![feature(allocator_api)]
823 ///
824 /// use std::sync::Arc;
825 /// use std::alloc::System;
826 ///
827 /// let zero = Arc::<u32, _>::new_zeroed_in(System);
828 /// let zero = unsafe { zero.assume_init() };
829 ///
830 /// assert_eq!(*zero, 0)
831 /// ```
832 ///
833 /// [zeroed]: mem::MaybeUninit::zeroed
834 #[cfg(not(no_global_oom_handling))]
835 #[unstable(feature = "allocator_api", issue = "32838")]
836 #[inline]
837 pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
838 unsafe {
839 Arc::from_ptr_in(
840 Arc::allocate_for_layout(
841 Layout::new::<T>(),
842 |layout| alloc.allocate_zeroed(layout),
843 <*mut u8>::cast,
844 ),
845 alloc,
846 )
847 }
848 }
849
850 /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
851 /// to allow you to construct a `T` which holds a weak pointer to itself.
852 ///
853 /// Generally, a structure circularly referencing itself, either directly or
854 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
855 /// Using this function, you get access to the weak pointer during the
856 /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
857 /// clone and store it inside the `T`.
858 ///
859 /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
860 /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
861 /// and only afterwards completes the construction of the `Arc<T, A>` by placing
862 /// the `T` returned from your closure into the allocation.
863 ///
864 /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
865 /// returns, calling [`upgrade`] on the weak reference inside your closure will
866 /// fail and result in a `None` value.
867 ///
868 /// # Panics
869 ///
870 /// If `data_fn` panics, the panic is propagated to the caller, and the
871 /// temporary [`Weak<T>`] is dropped normally.
872 ///
873 /// # Example
874 ///
875 /// See [`new_cyclic`]
876 ///
877 /// [`new_cyclic`]: Arc::new_cyclic
878 /// [`upgrade`]: Weak::upgrade
879 #[cfg(not(no_global_oom_handling))]
880 #[inline]
881 #[unstable(feature = "allocator_api", issue = "32838")]
882 pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
883 where
884 F: FnOnce(&Weak<T, A>) -> T,
885 {
886 // Construct the inner in the "uninitialized" state with a single
887 // weak reference.
888 let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
889 ArcInner {
890 strong: atomic::AtomicUsize::new(0),
891 weak: atomic::AtomicUsize::new(1),
892 data: mem::MaybeUninit::<T>::uninit(),
893 },
894 alloc,
895 ));
896 let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
897 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
898
899 let weak = Weak { ptr: init_ptr, alloc };
900
901 // It's important we don't give up ownership of the weak pointer, or
902 // else the memory might be freed by the time `data_fn` returns. If
903 // we really wanted to pass ownership, we could create an additional
904 // weak pointer for ourselves, but this would result in additional
905 // updates to the weak reference count which might not be necessary
906 // otherwise.
907 let data = data_fn(&weak);
908
909 // Now we can properly initialize the inner value and turn our weak
910 // reference into a strong reference.
911 let strong = unsafe {
912 let inner = init_ptr.as_ptr();
913 ptr::write(&raw mut (*inner).data, data);
914
915 // The above write to the data field must be visible to any threads which
916 // observe a non-zero strong count. Therefore we need at least "Release" ordering
917 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
918 //
919 // "Acquire" ordering is not required. When considering the possible behaviors
920 // of `data_fn` we only need to look at what it could do with a reference to a
921 // non-upgradeable `Weak`:
922 // - It can *clone* the `Weak`, increasing the weak reference count.
923 // - It can drop those clones, decreasing the weak reference count (but never to zero).
924 //
925 // These side effects do not impact us in any way, and no other side effects are
926 // possible with safe code alone.
927 let prev_value = (*inner).strong.fetch_add(1, Release);
928 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
929
930 // Strong references should collectively own a shared weak reference,
931 // so don't run the destructor for our old weak reference.
932 // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
933 // and forgetting the weak reference.
934 let alloc = weak.into_raw_with_allocator().1;
935
936 Arc::from_inner_in(init_ptr, alloc)
937 };
938
939 strong
940 }
941
942 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
943 /// then `data` will be pinned in memory and unable to be moved.
944 #[cfg(not(no_global_oom_handling))]
945 #[unstable(feature = "allocator_api", issue = "32838")]
946 #[inline]
947 pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
948 where
949 A: 'static,
950 {
951 unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
952 }
953
954 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
955 /// fails.
956 #[inline]
957 #[unstable(feature = "allocator_api", issue = "32838")]
958 pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
959 where
960 A: 'static,
961 {
962 unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
963 }
964
965 /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
966 ///
967 /// # Examples
968 ///
969 /// ```
970 /// #![feature(allocator_api)]
971 ///
972 /// use std::sync::Arc;
973 /// use std::alloc::System;
974 ///
975 /// let five = Arc::try_new_in(5, System)?;
976 /// # Ok::<(), std::alloc::AllocError>(())
977 /// ```
978 #[unstable(feature = "allocator_api", issue = "32838")]
979 #[inline]
980 pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
981 // Start the weak pointer count as 1 which is the weak pointer that's
982 // held by all the strong pointers (kinda), see std/rc.rs for more info
983 let x = Box::try_new_in(
984 ArcInner {
985 strong: atomic::AtomicUsize::new(1),
986 weak: atomic::AtomicUsize::new(1),
987 data,
988 },
989 alloc,
990 )?;
991 let (ptr, alloc) = Box::into_unique(x);
992 Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
993 }
994
995 /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
996 /// error if allocation fails.
997 ///
998 /// # Examples
999 ///
1000 /// ```
1001 /// #![feature(allocator_api)]
1002 /// #![feature(get_mut_unchecked)]
1003 ///
1004 /// use std::sync::Arc;
1005 /// use std::alloc::System;
1006 ///
1007 /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
1008 ///
1009 /// let five = unsafe {
1010 /// // Deferred initialization:
1011 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
1012 ///
1013 /// five.assume_init()
1014 /// };
1015 ///
1016 /// assert_eq!(*five, 5);
1017 /// # Ok::<(), std::alloc::AllocError>(())
1018 /// ```
1019 #[unstable(feature = "allocator_api", issue = "32838")]
1020 #[inline]
1021 pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1022 unsafe {
1023 Ok(Arc::from_ptr_in(
1024 Arc::try_allocate_for_layout(
1025 Layout::new::<T>(),
1026 |layout| alloc.allocate(layout),
1027 <*mut u8>::cast,
1028 )?,
1029 alloc,
1030 ))
1031 }
1032 }
1033
1034 /// Constructs a new `Arc` with uninitialized contents, with the memory
1035 /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
1036 /// fails.
1037 ///
1038 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
1039 /// of this method.
1040 ///
1041 /// # Examples
1042 ///
1043 /// ```
1044 /// #![feature(allocator_api)]
1045 ///
1046 /// use std::sync::Arc;
1047 /// use std::alloc::System;
1048 ///
1049 /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
1050 /// let zero = unsafe { zero.assume_init() };
1051 ///
1052 /// assert_eq!(*zero, 0);
1053 /// # Ok::<(), std::alloc::AllocError>(())
1054 /// ```
1055 ///
1056 /// [zeroed]: mem::MaybeUninit::zeroed
1057 #[unstable(feature = "allocator_api", issue = "32838")]
1058 #[inline]
1059 pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1060 unsafe {
1061 Ok(Arc::from_ptr_in(
1062 Arc::try_allocate_for_layout(
1063 Layout::new::<T>(),
1064 |layout| alloc.allocate_zeroed(layout),
1065 <*mut u8>::cast,
1066 )?,
1067 alloc,
1068 ))
1069 }
1070 }
1071 /// Returns the inner value, if the `Arc` has exactly one strong reference.
1072 ///
1073 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
1074 /// passed in.
1075 ///
1076 /// This will succeed even if there are outstanding weak references.
1077 ///
1078 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
1079 /// keep the `Arc` in the [`Err`] case.
1080 /// Immediately dropping the [`Err`]-value, as the expression
1081 /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
1082 /// drop to zero and the inner value of the `Arc` to be dropped.
1083 /// For instance, if two threads execute such an expression in parallel,
1084 /// there is a race condition without the possibility of unsafety:
1085 /// The threads could first both check whether they own the last instance
1086 /// in `Arc::try_unwrap`, determine that they both do not, and then both
1087 /// discard and drop their instance in the call to [`ok`][`Result::ok`].
1088 /// In this scenario, the value inside the `Arc` is safely destroyed
1089 /// by exactly one of the threads, but neither thread will ever be able
1090 /// to use the value.
1091 ///
1092 /// # Examples
1093 ///
1094 /// ```
1095 /// use std::sync::Arc;
1096 ///
1097 /// let x = Arc::new(3);
1098 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1099 ///
1100 /// let x = Arc::new(4);
1101 /// let _y = Arc::clone(&x);
1102 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1103 /// ```
1104 #[inline]
1105 #[stable(feature = "arc_unique", since = "1.4.0")]
1106 pub fn try_unwrap(this: Self) -> Result<T, Self> {
1107 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1108 return Err(this);
1109 }
1110
1111 acquire!(this.inner().strong);
1112
1113 let this = ManuallyDrop::new(this);
1114 let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1115 let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1116
1117 // Make a weak pointer to clean up the implicit strong-weak reference
1118 let _weak = Weak { ptr: this.ptr, alloc };
1119
1120 Ok(elem)
1121 }
1122
1123 /// Returns the inner value, if the `Arc` has exactly one strong reference.
1124 ///
1125 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1126 ///
1127 /// This will succeed even if there are outstanding weak references.
1128 ///
1129 /// If `Arc::into_inner` is called on every clone of this `Arc`,
1130 /// it is guaranteed that exactly one of the calls returns the inner value.
1131 /// This means in particular that the inner value is not dropped.
1132 ///
1133 /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1134 /// is meant for different use-cases. If used as a direct replacement
1135 /// for `Arc::into_inner` anyway, such as with the expression
1136 /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1137 /// **not** give the same guarantee as described in the previous paragraph.
1138 /// For more information, see the examples below and read the documentation
1139 /// of [`Arc::try_unwrap`].
1140 ///
1141 /// # Examples
1142 ///
1143 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1144 /// ```
1145 /// use std::sync::Arc;
1146 ///
1147 /// let x = Arc::new(3);
1148 /// let y = Arc::clone(&x);
1149 ///
1150 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1151 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1152 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1153 ///
1154 /// let x_inner_value = x_thread.join().unwrap();
1155 /// let y_inner_value = y_thread.join().unwrap();
1156 ///
1157 /// // One of the threads is guaranteed to receive the inner value:
1158 /// assert!(matches!(
1159 /// (x_inner_value, y_inner_value),
1160 /// (None, Some(3)) | (Some(3), None)
1161 /// ));
1162 /// // The result could also be `(None, None)` if the threads called
1163 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1164 /// ```
1165 ///
1166 /// A more practical example demonstrating the need for `Arc::into_inner`:
1167 /// ```
1168 /// use std::sync::Arc;
1169 ///
1170 /// // Definition of a simple singly linked list using `Arc`:
1171 /// #[derive(Clone)]
1172 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1173 /// struct Node<T>(T, Option<Arc<Node<T>>>);
1174 ///
1175 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1176 /// // can cause a stack overflow. To prevent this, we can provide a
1177 /// // manual `Drop` implementation that does the destruction in a loop:
1178 /// impl<T> Drop for LinkedList<T> {
1179 /// fn drop(&mut self) {
1180 /// let mut link = self.0.take();
1181 /// while let Some(arc_node) = link.take() {
1182 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1183 /// link = next;
1184 /// }
1185 /// }
1186 /// }
1187 /// }
1188 ///
1189 /// // Implementation of `new` and `push` omitted
1190 /// impl<T> LinkedList<T> {
1191 /// /* ... */
1192 /// # fn new() -> Self {
1193 /// # LinkedList(None)
1194 /// # }
1195 /// # fn push(&mut self, x: T) {
1196 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
1197 /// # }
1198 /// }
1199 ///
1200 /// // The following code could have still caused a stack overflow
1201 /// // despite the manual `Drop` impl if that `Drop` impl had used
1202 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1203 ///
1204 /// // Create a long list and clone it
1205 /// let mut x = LinkedList::new();
1206 /// let size = 100000;
1207 /// # let size = if cfg!(miri) { 100 } else { size };
1208 /// for i in 0..size {
1209 /// x.push(i); // Adds i to the front of x
1210 /// }
1211 /// let y = x.clone();
1212 ///
1213 /// // Drop the clones in parallel
1214 /// let x_thread = std::thread::spawn(|| drop(x));
1215 /// let y_thread = std::thread::spawn(|| drop(y));
1216 /// x_thread.join().unwrap();
1217 /// y_thread.join().unwrap();
1218 /// ```
1219 #[inline]
1220 #[stable(feature = "arc_into_inner", since = "1.70.0")]
1221 pub fn into_inner(this: Self) -> Option<T> {
1222 // Make sure that the ordinary `Drop` implementation isn’t called as well
1223 let mut this = mem::ManuallyDrop::new(this);
1224
1225 // Following the implementation of `drop` and `drop_slow`
1226 if this.inner().strong.fetch_sub(1, Release) != 1 {
1227 return None;
1228 }
1229
1230 acquire!(this.inner().strong);
1231
1232 // SAFETY: This mirrors the line
1233 //
1234 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1235 //
1236 // in `drop_slow`. Instead of dropping the value behind the pointer,
1237 // it is read and eventually returned; `ptr::read` has the same
1238 // safety conditions as `ptr::drop_in_place`.
1239
1240 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1241 let alloc = unsafe { ptr::read(&this.alloc) };
1242
1243 drop(Weak { ptr: this.ptr, alloc });
1244
1245 Some(inner)
1246 }
1247}
1248
1249impl<T> Arc<[T]> {
1250 /// Constructs a new atomically reference-counted slice with uninitialized contents.
1251 ///
1252 /// # Examples
1253 ///
1254 /// ```
1255 /// use std::sync::Arc;
1256 ///
1257 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1258 ///
1259 /// // Deferred initialization:
1260 /// let data = Arc::get_mut(&mut values).unwrap();
1261 /// data[0].write(1);
1262 /// data[1].write(2);
1263 /// data[2].write(3);
1264 ///
1265 /// let values = unsafe { values.assume_init() };
1266 ///
1267 /// assert_eq!(*values, [1, 2, 3])
1268 /// ```
1269 #[cfg(not(no_global_oom_handling))]
1270 #[inline]
1271 #[stable(feature = "new_uninit", since = "1.82.0")]
1272 #[must_use]
1273 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1274 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1275 }
1276
1277 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1278 /// filled with `0` bytes.
1279 ///
1280 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1281 /// incorrect usage of this method.
1282 ///
1283 /// # Examples
1284 ///
1285 /// ```
1286 /// use std::sync::Arc;
1287 ///
1288 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1289 /// let values = unsafe { values.assume_init() };
1290 ///
1291 /// assert_eq!(*values, [0, 0, 0])
1292 /// ```
1293 ///
1294 /// [zeroed]: mem::MaybeUninit::zeroed
1295 #[cfg(not(no_global_oom_handling))]
1296 #[inline]
1297 #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
1298 #[must_use]
1299 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1300 unsafe {
1301 Arc::from_ptr(Arc::allocate_for_layout(
1302 Layout::array::<T>(len).unwrap(),
1303 |layout| Global.allocate_zeroed(layout),
1304 |mem| {
1305 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1306 as *mut ArcInner<[mem::MaybeUninit<T>]>
1307 },
1308 ))
1309 }
1310 }
1311
1312 /// Converts the reference-counted slice into a reference-counted array.
1313 ///
1314 /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1315 ///
1316 /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1317 #[unstable(feature = "slice_as_array", issue = "133508")]
1318 #[inline]
1319 #[must_use]
1320 pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1321 if self.len() == N {
1322 let ptr = Self::into_raw(self) as *const [T; N];
1323
1324 // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1325 let me = unsafe { Arc::from_raw(ptr) };
1326 Some(me)
1327 } else {
1328 None
1329 }
1330 }
1331}
1332
1333impl<T, A: Allocator> Arc<[T], A> {
1334 /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1335 /// provided allocator.
1336 ///
1337 /// # Examples
1338 ///
1339 /// ```
1340 /// #![feature(get_mut_unchecked)]
1341 /// #![feature(allocator_api)]
1342 ///
1343 /// use std::sync::Arc;
1344 /// use std::alloc::System;
1345 ///
1346 /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1347 ///
1348 /// let values = unsafe {
1349 /// // Deferred initialization:
1350 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1351 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1352 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1353 ///
1354 /// values.assume_init()
1355 /// };
1356 ///
1357 /// assert_eq!(*values, [1, 2, 3])
1358 /// ```
1359 #[cfg(not(no_global_oom_handling))]
1360 #[unstable(feature = "allocator_api", issue = "32838")]
1361 #[inline]
1362 pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1363 unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1364 }
1365
1366 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1367 /// filled with `0` bytes, in the provided allocator.
1368 ///
1369 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1370 /// incorrect usage of this method.
1371 ///
1372 /// # Examples
1373 ///
1374 /// ```
1375 /// #![feature(allocator_api)]
1376 ///
1377 /// use std::sync::Arc;
1378 /// use std::alloc::System;
1379 ///
1380 /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1381 /// let values = unsafe { values.assume_init() };
1382 ///
1383 /// assert_eq!(*values, [0, 0, 0])
1384 /// ```
1385 ///
1386 /// [zeroed]: mem::MaybeUninit::zeroed
1387 #[cfg(not(no_global_oom_handling))]
1388 #[unstable(feature = "allocator_api", issue = "32838")]
1389 #[inline]
1390 pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1391 unsafe {
1392 Arc::from_ptr_in(
1393 Arc::allocate_for_layout(
1394 Layout::array::<T>(len).unwrap(),
1395 |layout| alloc.allocate_zeroed(layout),
1396 |mem| {
1397 ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1398 as *mut ArcInner<[mem::MaybeUninit<T>]>
1399 },
1400 ),
1401 alloc,
1402 )
1403 }
1404 }
1405}
1406
1407impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1408 /// Converts to `Arc<T>`.
1409 ///
1410 /// # Safety
1411 ///
1412 /// As with [`MaybeUninit::assume_init`],
1413 /// it is up to the caller to guarantee that the inner value
1414 /// really is in an initialized state.
1415 /// Calling this when the content is not yet fully initialized
1416 /// causes immediate undefined behavior.
1417 ///
1418 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1419 ///
1420 /// # Examples
1421 ///
1422 /// ```
1423 /// use std::sync::Arc;
1424 ///
1425 /// let mut five = Arc::<u32>::new_uninit();
1426 ///
1427 /// // Deferred initialization:
1428 /// Arc::get_mut(&mut five).unwrap().write(5);
1429 ///
1430 /// let five = unsafe { five.assume_init() };
1431 ///
1432 /// assert_eq!(*five, 5)
1433 /// ```
1434 #[stable(feature = "new_uninit", since = "1.82.0")]
1435 #[must_use = "`self` will be dropped if the result is not used"]
1436 #[inline]
1437 pub unsafe fn assume_init(self) -> Arc<T, A> {
1438 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1439 unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1440 }
1441}
1442
1443impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1444 /// Converts to `Arc<[T]>`.
1445 ///
1446 /// # Safety
1447 ///
1448 /// As with [`MaybeUninit::assume_init`],
1449 /// it is up to the caller to guarantee that the inner value
1450 /// really is in an initialized state.
1451 /// Calling this when the content is not yet fully initialized
1452 /// causes immediate undefined behavior.
1453 ///
1454 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1455 ///
1456 /// # Examples
1457 ///
1458 /// ```
1459 /// use std::sync::Arc;
1460 ///
1461 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1462 ///
1463 /// // Deferred initialization:
1464 /// let data = Arc::get_mut(&mut values).unwrap();
1465 /// data[0].write(1);
1466 /// data[1].write(2);
1467 /// data[2].write(3);
1468 ///
1469 /// let values = unsafe { values.assume_init() };
1470 ///
1471 /// assert_eq!(*values, [1, 2, 3])
1472 /// ```
1473 #[stable(feature = "new_uninit", since = "1.82.0")]
1474 #[must_use = "`self` will be dropped if the result is not used"]
1475 #[inline]
1476 pub unsafe fn assume_init(self) -> Arc<[T], A> {
1477 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1478 unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1479 }
1480}
1481
1482impl<T: ?Sized> Arc<T> {
1483 /// Constructs an `Arc<T>` from a raw pointer.
1484 ///
1485 /// The raw pointer must have been previously returned by a call to
1486 /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1487 ///
1488 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1489 /// is trivially true if `U` is `T`.
1490 /// * If `U` is unsized, its data pointer must have the same size and
1491 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1492 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1493 /// coercion].
1494 ///
1495 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1496 /// and alignment, this is basically like transmuting references of
1497 /// different types. See [`mem::transmute`][transmute] for more information
1498 /// on what restrictions apply in this case.
1499 ///
1500 /// The raw pointer must point to a block of memory allocated by the global allocator.
1501 ///
1502 /// The user of `from_raw` has to make sure a specific value of `T` is only
1503 /// dropped once.
1504 ///
1505 /// This function is unsafe because improper use may lead to memory unsafety,
1506 /// even if the returned `Arc<T>` is never accessed.
1507 ///
1508 /// [into_raw]: Arc::into_raw
1509 /// [transmute]: core::mem::transmute
1510 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1511 ///
1512 /// # Examples
1513 ///
1514 /// ```
1515 /// use std::sync::Arc;
1516 ///
1517 /// let x = Arc::new("hello".to_owned());
1518 /// let x_ptr = Arc::into_raw(x);
1519 ///
1520 /// unsafe {
1521 /// // Convert back to an `Arc` to prevent leak.
1522 /// let x = Arc::from_raw(x_ptr);
1523 /// assert_eq!(&*x, "hello");
1524 ///
1525 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1526 /// }
1527 ///
1528 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1529 /// ```
1530 ///
1531 /// Convert a slice back into its original array:
1532 ///
1533 /// ```
1534 /// use std::sync::Arc;
1535 ///
1536 /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1537 /// let x_ptr: *const [u32] = Arc::into_raw(x);
1538 ///
1539 /// unsafe {
1540 /// let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1541 /// assert_eq!(&*x, &[1, 2, 3]);
1542 /// }
1543 /// ```
1544 #[inline]
1545 #[stable(feature = "rc_raw", since = "1.17.0")]
1546 pub unsafe fn from_raw(ptr: *const T) -> Self {
1547 unsafe { Arc::from_raw_in(ptr, Global) }
1548 }
1549
1550 /// Consumes the `Arc`, returning the wrapped pointer.
1551 ///
1552 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1553 /// [`Arc::from_raw`].
1554 ///
1555 /// # Examples
1556 ///
1557 /// ```
1558 /// use std::sync::Arc;
1559 ///
1560 /// let x = Arc::new("hello".to_owned());
1561 /// let x_ptr = Arc::into_raw(x);
1562 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1563 /// # // Prevent leaks for Miri.
1564 /// # drop(unsafe { Arc::from_raw(x_ptr) });
1565 /// ```
1566 #[must_use = "losing the pointer will leak memory"]
1567 #[stable(feature = "rc_raw", since = "1.17.0")]
1568 #[rustc_never_returns_null_ptr]
1569 pub fn into_raw(this: Self) -> *const T {
1570 let this = ManuallyDrop::new(this);
1571 Self::as_ptr(&*this)
1572 }
1573
1574 /// Increments the strong reference count on the `Arc<T>` associated with the
1575 /// provided pointer by one.
1576 ///
1577 /// # Safety
1578 ///
1579 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1580 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1581 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1582 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1583 /// allocated by the global allocator.
1584 ///
1585 /// [from_raw_in]: Arc::from_raw_in
1586 ///
1587 /// # Examples
1588 ///
1589 /// ```
1590 /// use std::sync::Arc;
1591 ///
1592 /// let five = Arc::new(5);
1593 ///
1594 /// unsafe {
1595 /// let ptr = Arc::into_raw(five);
1596 /// Arc::increment_strong_count(ptr);
1597 ///
1598 /// // This assertion is deterministic because we haven't shared
1599 /// // the `Arc` between threads.
1600 /// let five = Arc::from_raw(ptr);
1601 /// assert_eq!(2, Arc::strong_count(&five));
1602 /// # // Prevent leaks for Miri.
1603 /// # Arc::decrement_strong_count(ptr);
1604 /// }
1605 /// ```
1606 #[inline]
1607 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1608 pub unsafe fn increment_strong_count(ptr: *const T) {
1609 unsafe { Arc::increment_strong_count_in(ptr, Global) }
1610 }
1611
1612 /// Decrements the strong reference count on the `Arc<T>` associated with the
1613 /// provided pointer by one.
1614 ///
1615 /// # Safety
1616 ///
1617 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1618 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1619 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1620 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1621 /// allocated by the global allocator. This method can be used to release the final
1622 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1623 /// released.
1624 ///
1625 /// [from_raw_in]: Arc::from_raw_in
1626 ///
1627 /// # Examples
1628 ///
1629 /// ```
1630 /// use std::sync::Arc;
1631 ///
1632 /// let five = Arc::new(5);
1633 ///
1634 /// unsafe {
1635 /// let ptr = Arc::into_raw(five);
1636 /// Arc::increment_strong_count(ptr);
1637 ///
1638 /// // Those assertions are deterministic because we haven't shared
1639 /// // the `Arc` between threads.
1640 /// let five = Arc::from_raw(ptr);
1641 /// assert_eq!(2, Arc::strong_count(&five));
1642 /// Arc::decrement_strong_count(ptr);
1643 /// assert_eq!(1, Arc::strong_count(&five));
1644 /// }
1645 /// ```
1646 #[inline]
1647 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1648 pub unsafe fn decrement_strong_count(ptr: *const T) {
1649 unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1650 }
1651}
1652
1653impl<T: ?Sized, A: Allocator> Arc<T, A> {
1654 /// Returns a reference to the underlying allocator.
1655 ///
1656 /// Note: this is an associated function, which means that you have
1657 /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1658 /// is so that there is no conflict with a method on the inner type.
1659 #[inline]
1660 #[unstable(feature = "allocator_api", issue = "32838")]
1661 pub fn allocator(this: &Self) -> &A {
1662 &this.alloc
1663 }
1664
1665 /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1666 ///
1667 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1668 /// [`Arc::from_raw_in`].
1669 ///
1670 /// # Examples
1671 ///
1672 /// ```
1673 /// #![feature(allocator_api)]
1674 /// use std::sync::Arc;
1675 /// use std::alloc::System;
1676 ///
1677 /// let x = Arc::new_in("hello".to_owned(), System);
1678 /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1679 /// assert_eq!(unsafe { &*ptr }, "hello");
1680 /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1681 /// assert_eq!(&*x, "hello");
1682 /// ```
1683 #[must_use = "losing the pointer will leak memory"]
1684 #[unstable(feature = "allocator_api", issue = "32838")]
1685 pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1686 let this = mem::ManuallyDrop::new(this);
1687 let ptr = Self::as_ptr(&this);
1688 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1689 let alloc = unsafe { ptr::read(&this.alloc) };
1690 (ptr, alloc)
1691 }
1692
1693 /// Provides a raw pointer to the data.
1694 ///
1695 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1696 /// as long as there are strong counts in the `Arc`.
1697 ///
1698 /// # Examples
1699 ///
1700 /// ```
1701 /// use std::sync::Arc;
1702 ///
1703 /// let x = Arc::new("hello".to_owned());
1704 /// let y = Arc::clone(&x);
1705 /// let x_ptr = Arc::as_ptr(&x);
1706 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1707 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1708 /// ```
1709 #[must_use]
1710 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1711 #[rustc_never_returns_null_ptr]
1712 pub fn as_ptr(this: &Self) -> *const T {
1713 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1714
1715 // SAFETY: This cannot go through Deref::deref or ArcInnerPtr::inner because
1716 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1717 // write through the pointer after the Arc is recovered through `from_raw`.
1718 unsafe { &raw mut (*ptr).data }
1719 }
1720
1721 /// Constructs an `Arc<T, A>` from a raw pointer.
1722 ///
1723 /// The raw pointer must have been previously returned by a call to [`Arc<U,
1724 /// A>::into_raw`][into_raw] with the following requirements:
1725 ///
1726 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1727 /// is trivially true if `U` is `T`.
1728 /// * If `U` is unsized, its data pointer must have the same size and
1729 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1730 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1731 /// coercion].
1732 ///
1733 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1734 /// and alignment, this is basically like transmuting references of
1735 /// different types. See [`mem::transmute`][transmute] for more information
1736 /// on what restrictions apply in this case.
1737 ///
1738 /// The raw pointer must point to a block of memory allocated by `alloc`
1739 ///
1740 /// The user of `from_raw` has to make sure a specific value of `T` is only
1741 /// dropped once.
1742 ///
1743 /// This function is unsafe because improper use may lead to memory unsafety,
1744 /// even if the returned `Arc<T>` is never accessed.
1745 ///
1746 /// [into_raw]: Arc::into_raw
1747 /// [transmute]: core::mem::transmute
1748 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1749 ///
1750 /// # Examples
1751 ///
1752 /// ```
1753 /// #![feature(allocator_api)]
1754 ///
1755 /// use std::sync::Arc;
1756 /// use std::alloc::System;
1757 ///
1758 /// let x = Arc::new_in("hello".to_owned(), System);
1759 /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1760 ///
1761 /// unsafe {
1762 /// // Convert back to an `Arc` to prevent leak.
1763 /// let x = Arc::from_raw_in(x_ptr, System);
1764 /// assert_eq!(&*x, "hello");
1765 ///
1766 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1767 /// }
1768 ///
1769 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1770 /// ```
1771 ///
1772 /// Convert a slice back into its original array:
1773 ///
1774 /// ```
1775 /// #![feature(allocator_api)]
1776 ///
1777 /// use std::sync::Arc;
1778 /// use std::alloc::System;
1779 ///
1780 /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1781 /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1782 ///
1783 /// unsafe {
1784 /// let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1785 /// assert_eq!(&*x, &[1, 2, 3]);
1786 /// }
1787 /// ```
1788 #[inline]
1789 #[unstable(feature = "allocator_api", issue = "32838")]
1790 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1791 unsafe {
1792 let offset = data_offset(ptr);
1793
1794 // Reverse the offset to find the original ArcInner.
1795 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1796
1797 Self::from_ptr_in(arc_ptr, alloc)
1798 }
1799 }
1800
1801 /// Creates a new [`Weak`] pointer to this allocation.
1802 ///
1803 /// # Examples
1804 ///
1805 /// ```
1806 /// use std::sync::Arc;
1807 ///
1808 /// let five = Arc::new(5);
1809 ///
1810 /// let weak_five = Arc::downgrade(&five);
1811 /// ```
1812 #[must_use = "this returns a new `Weak` pointer, \
1813 without modifying the original `Arc`"]
1814 #[stable(feature = "arc_weak", since = "1.4.0")]
1815 pub fn downgrade(this: &Self) -> Weak<T, A>
1816 where
1817 A: Clone,
1818 {
1819 // This Relaxed is OK because we're checking the value in the CAS
1820 // below.
1821 let mut cur = this.inner().weak.load(Relaxed);
1822
1823 loop {
1824 // check if the weak counter is currently "locked"; if so, spin.
1825 if cur == usize::MAX {
1826 hint::spin_loop();
1827 cur = this.inner().weak.load(Relaxed);
1828 continue;
1829 }
1830
1831 // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1832 assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1833
1834 // NOTE: this code currently ignores the possibility of overflow
1835 // into usize::MAX; in general both Rc and Arc need to be adjusted
1836 // to deal with overflow.
1837
1838 // Unlike with Clone(), we need this to be an Acquire read to
1839 // synchronize with the write coming from `is_unique`, so that the
1840 // events prior to that write happen before this read.
1841 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1842 Ok(_) => {
1843 // Make sure we do not create a dangling Weak
1844 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1845 return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1846 }
1847 Err(old) => cur = old,
1848 }
1849 }
1850 }
1851
1852 /// Gets the number of [`Weak`] pointers to this allocation.
1853 ///
1854 /// # Safety
1855 ///
1856 /// This method by itself is safe, but using it correctly requires extra care.
1857 /// Another thread can change the weak count at any time,
1858 /// including potentially between calling this method and acting on the result.
1859 ///
1860 /// # Examples
1861 ///
1862 /// ```
1863 /// use std::sync::Arc;
1864 ///
1865 /// let five = Arc::new(5);
1866 /// let _weak_five = Arc::downgrade(&five);
1867 ///
1868 /// // This assertion is deterministic because we haven't shared
1869 /// // the `Arc` or `Weak` between threads.
1870 /// assert_eq!(1, Arc::weak_count(&five));
1871 /// ```
1872 #[inline]
1873 #[must_use]
1874 #[stable(feature = "arc_counts", since = "1.15.0")]
1875 pub fn weak_count(this: &Self) -> usize {
1876 let cnt = this.inner().weak.load(Relaxed);
1877 // If the weak count is currently locked, the value of the
1878 // count was 0 just before taking the lock.
1879 if cnt == usize::MAX { 0 } else { cnt - 1 }
1880 }
1881
1882 /// Gets the number of strong (`Arc`) pointers to this allocation.
1883 ///
1884 /// # Safety
1885 ///
1886 /// This method by itself is safe, but using it correctly requires extra care.
1887 /// Another thread can change the strong count at any time,
1888 /// including potentially between calling this method and acting on the result.
1889 ///
1890 /// # Examples
1891 ///
1892 /// ```
1893 /// use std::sync::Arc;
1894 ///
1895 /// let five = Arc::new(5);
1896 /// let _also_five = Arc::clone(&five);
1897 ///
1898 /// // This assertion is deterministic because we haven't shared
1899 /// // the `Arc` between threads.
1900 /// assert_eq!(2, Arc::strong_count(&five));
1901 /// ```
1902 #[inline]
1903 #[must_use]
1904 #[stable(feature = "arc_counts", since = "1.15.0")]
1905 pub fn strong_count(this: &Self) -> usize {
1906 this.inner().strong.load(Relaxed)
1907 }
1908
1909 /// Increments the strong reference count on the `Arc<T>` associated with the
1910 /// provided pointer by one.
1911 ///
1912 /// # Safety
1913 ///
1914 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1915 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1916 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1917 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1918 /// allocated by `alloc`.
1919 ///
1920 /// [from_raw_in]: Arc::from_raw_in
1921 ///
1922 /// # Examples
1923 ///
1924 /// ```
1925 /// #![feature(allocator_api)]
1926 ///
1927 /// use std::sync::Arc;
1928 /// use std::alloc::System;
1929 ///
1930 /// let five = Arc::new_in(5, System);
1931 ///
1932 /// unsafe {
1933 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1934 /// Arc::increment_strong_count_in(ptr, System);
1935 ///
1936 /// // This assertion is deterministic because we haven't shared
1937 /// // the `Arc` between threads.
1938 /// let five = Arc::from_raw_in(ptr, System);
1939 /// assert_eq!(2, Arc::strong_count(&five));
1940 /// # // Prevent leaks for Miri.
1941 /// # Arc::decrement_strong_count_in(ptr, System);
1942 /// }
1943 /// ```
1944 #[inline]
1945 #[unstable(feature = "allocator_api", issue = "32838")]
1946 pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1947 where
1948 A: Clone,
1949 {
1950 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1951 let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1952 // Now increase refcount, but don't drop new refcount either
1953 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1954 }
1955
1956 /// Decrements the strong reference count on the `Arc<T>` associated with the
1957 /// provided pointer by one.
1958 ///
1959 /// # Safety
1960 ///
1961 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1962 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1963 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1964 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1965 /// allocated by `alloc`. This method can be used to release the final
1966 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1967 /// released.
1968 ///
1969 /// [from_raw_in]: Arc::from_raw_in
1970 ///
1971 /// # Examples
1972 ///
1973 /// ```
1974 /// #![feature(allocator_api)]
1975 ///
1976 /// use std::sync::Arc;
1977 /// use std::alloc::System;
1978 ///
1979 /// let five = Arc::new_in(5, System);
1980 ///
1981 /// unsafe {
1982 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1983 /// Arc::increment_strong_count_in(ptr, System);
1984 ///
1985 /// // Those assertions are deterministic because we haven't shared
1986 /// // the `Arc` between threads.
1987 /// let five = Arc::from_raw_in(ptr, System);
1988 /// assert_eq!(2, Arc::strong_count(&five));
1989 /// Arc::decrement_strong_count_in(ptr, System);
1990 /// assert_eq!(1, Arc::strong_count(&five));
1991 /// }
1992 /// ```
1993 #[inline]
1994 #[unstable(feature = "allocator_api", issue = "32838")]
1995 pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1996 unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1997 }
1998
1999 #[inline]
2000 fn inner(&self) -> &ArcInner<T> {
2001 // This unsafety is ok because while this arc is alive we're guaranteed
2002 // that the inner pointer is valid. Furthermore, we know that the
2003 // `ArcInner` structure itself is `Sync` because the inner data is
2004 // `Sync` as well, so we're ok loaning out an immutable pointer to these
2005 // contents.
2006 unsafe { self.ptr.as_ref() }
2007 }
2008
2009 // Non-inlined part of `drop`.
2010 #[inline(never)]
2011 unsafe fn drop_slow(&mut self) {
2012 // Drop the weak ref collectively held by all strong references when this
2013 // variable goes out of scope. This ensures that the memory is deallocated
2014 // even if the destructor of `T` panics.
2015 // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
2016 // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
2017 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
2018
2019 // Destroy the data at this time, even though we must not free the box
2020 // allocation itself (there might still be weak pointers lying around).
2021 // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
2022 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
2023 }
2024
2025 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
2026 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
2027 ///
2028 /// # Examples
2029 ///
2030 /// ```
2031 /// use std::sync::Arc;
2032 ///
2033 /// let five = Arc::new(5);
2034 /// let same_five = Arc::clone(&five);
2035 /// let other_five = Arc::new(5);
2036 ///
2037 /// assert!(Arc::ptr_eq(&five, &same_five));
2038 /// assert!(!Arc::ptr_eq(&five, &other_five));
2039 /// ```
2040 ///
2041 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2042 #[inline]
2043 #[must_use]
2044 #[stable(feature = "ptr_eq", since = "1.17.0")]
2045 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
2046 ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
2047 }
2048}
2049
2050impl<T: ?Sized> Arc<T> {
2051 /// Allocates an `ArcInner<T>` with sufficient space for
2052 /// a possibly-unsized inner value where the value has the layout provided.
2053 ///
2054 /// The function `mem_to_arcinner` is called with the data pointer
2055 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2056 #[cfg(not(no_global_oom_handling))]
2057 unsafe fn allocate_for_layout(
2058 value_layout: Layout,
2059 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2060 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2061 ) -> *mut ArcInner<T> {
2062 let layout = arcinner_layout_for_value_layout(value_layout);
2063
2064 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
2065
2066 unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
2067 }
2068
2069 /// Allocates an `ArcInner<T>` with sufficient space for
2070 /// a possibly-unsized inner value where the value has the layout provided,
2071 /// returning an error if allocation fails.
2072 ///
2073 /// The function `mem_to_arcinner` is called with the data pointer
2074 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2075 unsafe fn try_allocate_for_layout(
2076 value_layout: Layout,
2077 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2078 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2079 ) -> Result<*mut ArcInner<T>, AllocError> {
2080 let layout = arcinner_layout_for_value_layout(value_layout);
2081
2082 let ptr = allocate(layout)?;
2083
2084 let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
2085
2086 Ok(inner)
2087 }
2088
2089 unsafe fn initialize_arcinner(
2090 ptr: NonNull<[u8]>,
2091 layout: Layout,
2092 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2093 ) -> *mut ArcInner<T> {
2094 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2095 debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2096
2097 unsafe {
2098 (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2099 (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2100 }
2101
2102 inner
2103 }
2104}
2105
2106impl<T: ?Sized, A: Allocator> Arc<T, A> {
2107 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2108 #[inline]
2109 #[cfg(not(no_global_oom_handling))]
2110 unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2111 // Allocate for the `ArcInner<T>` using the given value.
2112 unsafe {
2113 Arc::allocate_for_layout(
2114 Layout::for_value_raw(ptr),
2115 |layout| alloc.allocate(layout),
2116 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2117 )
2118 }
2119 }
2120
2121 #[cfg(not(no_global_oom_handling))]
2122 fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2123 unsafe {
2124 let value_size = size_of_val(&*src);
2125 let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2126
2127 // Copy value as bytes
2128 ptr::copy_nonoverlapping(
2129 (&raw const *src) as *const u8,
2130 (&raw mut (*ptr).data) as *mut u8,
2131 value_size,
2132 );
2133
2134 // Free the allocation without dropping its contents
2135 let (bptr, alloc) = Box::into_raw_with_allocator(src);
2136 let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2137 drop(src);
2138
2139 Self::from_ptr_in(ptr, alloc)
2140 }
2141 }
2142}
2143
2144impl<T> Arc<[T]> {
2145 /// Allocates an `ArcInner<[T]>` with the given length.
2146 #[cfg(not(no_global_oom_handling))]
2147 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2148 unsafe {
2149 Self::allocate_for_layout(
2150 Layout::array::<T>(len).unwrap(),
2151 |layout| Global.allocate(layout),
2152 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2153 )
2154 }
2155 }
2156
2157 /// Copy elements from slice into newly allocated `Arc<[T]>`
2158 ///
2159 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2160 #[cfg(not(no_global_oom_handling))]
2161 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2162 unsafe {
2163 let ptr = Self::allocate_for_slice(v.len());
2164
2165 ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2166
2167 Self::from_ptr(ptr)
2168 }
2169 }
2170
2171 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2172 ///
2173 /// Behavior is undefined should the size be wrong.
2174 #[cfg(not(no_global_oom_handling))]
2175 unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2176 // Panic guard while cloning T elements.
2177 // In the event of a panic, elements that have been written
2178 // into the new ArcInner will be dropped, then the memory freed.
2179 struct Guard<T> {
2180 mem: NonNull<u8>,
2181 elems: *mut T,
2182 layout: Layout,
2183 n_elems: usize,
2184 }
2185
2186 impl<T> Drop for Guard<T> {
2187 fn drop(&mut self) {
2188 unsafe {
2189 let slice = from_raw_parts_mut(self.elems, self.n_elems);
2190 ptr::drop_in_place(slice);
2191
2192 Global.deallocate(self.mem, self.layout);
2193 }
2194 }
2195 }
2196
2197 unsafe {
2198 let ptr = Self::allocate_for_slice(len);
2199
2200 let mem = ptr as *mut _ as *mut u8;
2201 let layout = Layout::for_value_raw(ptr);
2202
2203 // Pointer to first element
2204 let elems = (&raw mut (*ptr).data) as *mut T;
2205
2206 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2207
2208 for (i, item) in iter.enumerate() {
2209 ptr::write(elems.add(i), item);
2210 guard.n_elems += 1;
2211 }
2212
2213 // All clear. Forget the guard so it doesn't free the new ArcInner.
2214 mem::forget(guard);
2215
2216 Self::from_ptr(ptr)
2217 }
2218 }
2219}
2220
2221impl<T, A: Allocator> Arc<[T], A> {
2222 /// Allocates an `ArcInner<[T]>` with the given length.
2223 #[inline]
2224 #[cfg(not(no_global_oom_handling))]
2225 unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2226 unsafe {
2227 Arc::allocate_for_layout(
2228 Layout::array::<T>(len).unwrap(),
2229 |layout| alloc.allocate(layout),
2230 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2231 )
2232 }
2233 }
2234}
2235
2236/// Specialization trait used for `From<&[T]>`.
2237#[cfg(not(no_global_oom_handling))]
2238trait ArcFromSlice<T> {
2239 fn from_slice(slice: &[T]) -> Self;
2240}
2241
2242#[cfg(not(no_global_oom_handling))]
2243impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2244 #[inline]
2245 default fn from_slice(v: &[T]) -> Self {
2246 unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2247 }
2248}
2249
2250#[cfg(not(no_global_oom_handling))]
2251impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2252 #[inline]
2253 fn from_slice(v: &[T]) -> Self {
2254 unsafe { Arc::copy_from_slice(v) }
2255 }
2256}
2257
2258#[stable(feature = "rust1", since = "1.0.0")]
2259impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2260 /// Makes a clone of the `Arc` pointer.
2261 ///
2262 /// This creates another pointer to the same allocation, increasing the
2263 /// strong reference count.
2264 ///
2265 /// # Examples
2266 ///
2267 /// ```
2268 /// use std::sync::Arc;
2269 ///
2270 /// let five = Arc::new(5);
2271 ///
2272 /// let _ = Arc::clone(&five);
2273 /// ```
2274 #[inline]
2275 fn clone(&self) -> Arc<T, A> {
2276 // Using a relaxed ordering is alright here, as knowledge of the
2277 // original reference prevents other threads from erroneously deleting
2278 // the object.
2279 //
2280 // As explained in the [Boost documentation][1], Increasing the
2281 // reference counter can always be done with memory_order_relaxed: New
2282 // references to an object can only be formed from an existing
2283 // reference, and passing an existing reference from one thread to
2284 // another must already provide any required synchronization.
2285 //
2286 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2287 let old_size = self.inner().strong.fetch_add(1, Relaxed);
2288
2289 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2290 // Arcs. If we don't do this the count can overflow and users will use-after free. This
2291 // branch will never be taken in any realistic program. We abort because such a program is
2292 // incredibly degenerate, and we don't care to support it.
2293 //
2294 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2295 // But we do that check *after* having done the increment, so there is a chance here that
2296 // the worst already happened and we actually do overflow the `usize` counter. However, that
2297 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2298 // above and the `abort` below, which seems exceedingly unlikely.
2299 //
2300 // This is a global invariant, and also applies when using a compare-exchange loop to increment
2301 // counters in other methods.
2302 // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2303 // and then overflow using a few `fetch_add`s.
2304 if old_size > MAX_REFCOUNT {
2305 abort();
2306 }
2307
2308 unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2309 }
2310}
2311
2312#[unstable(feature = "ergonomic_clones", issue = "132290")]
2313impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2314
2315#[stable(feature = "rust1", since = "1.0.0")]
2316impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2317 type Target = T;
2318
2319 #[inline]
2320 fn deref(&self) -> &T {
2321 &self.inner().data
2322 }
2323}
2324
2325#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2326unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2327
2328#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2329unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2330
2331#[unstable(feature = "deref_pure_trait", issue = "87121")]
2332unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2333
2334#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2335impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2336
2337#[cfg(not(no_global_oom_handling))]
2338impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2339 /// Makes a mutable reference into the given `Arc`.
2340 ///
2341 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2342 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
2343 /// referred to as clone-on-write.
2344 ///
2345 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2346 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2347 /// be cloned.
2348 ///
2349 /// See also [`get_mut`], which will fail rather than cloning the inner value
2350 /// or dissociating [`Weak`] pointers.
2351 ///
2352 /// [`clone`]: Clone::clone
2353 /// [`get_mut`]: Arc::get_mut
2354 ///
2355 /// # Examples
2356 ///
2357 /// ```
2358 /// use std::sync::Arc;
2359 ///
2360 /// let mut data = Arc::new(5);
2361 ///
2362 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2363 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2364 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
2365 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2366 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
2367 ///
2368 /// // Now `data` and `other_data` point to different allocations.
2369 /// assert_eq!(*data, 8);
2370 /// assert_eq!(*other_data, 12);
2371 /// ```
2372 ///
2373 /// [`Weak`] pointers will be dissociated:
2374 ///
2375 /// ```
2376 /// use std::sync::Arc;
2377 ///
2378 /// let mut data = Arc::new(75);
2379 /// let weak = Arc::downgrade(&data);
2380 ///
2381 /// assert!(75 == *data);
2382 /// assert!(75 == *weak.upgrade().unwrap());
2383 ///
2384 /// *Arc::make_mut(&mut data) += 1;
2385 ///
2386 /// assert!(76 == *data);
2387 /// assert!(weak.upgrade().is_none());
2388 /// ```
2389 #[inline]
2390 #[stable(feature = "arc_unique", since = "1.4.0")]
2391 pub fn make_mut(this: &mut Self) -> &mut T {
2392 let size_of_val = size_of_val::<T>(&**this);
2393
2394 // Note that we hold both a strong reference and a weak reference.
2395 // Thus, releasing our strong reference only will not, by itself, cause
2396 // the memory to be deallocated.
2397 //
2398 // Use Acquire to ensure that we see any writes to `weak` that happen
2399 // before release writes (i.e., decrements) to `strong`. Since we hold a
2400 // weak count, there's no chance the ArcInner itself could be
2401 // deallocated.
2402 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2403 // Another strong pointer exists, so we must clone.
2404
2405 let this_data_ref: &T = &**this;
2406 // `in_progress` drops the allocation if we panic before finishing initializing it.
2407 let mut in_progress: UniqueArcUninit<T, A> =
2408 UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2409
2410 let initialized_clone = unsafe {
2411 // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2412 this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2413 // Cast type of pointer, now that it is initialized.
2414 in_progress.into_arc()
2415 };
2416 *this = initialized_clone;
2417 } else if this.inner().weak.load(Relaxed) != 1 {
2418 // Relaxed suffices in the above because this is fundamentally an
2419 // optimization: we are always racing with weak pointers being
2420 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2421
2422 // We removed the last strong ref, but there are additional weak
2423 // refs remaining. We'll move the contents to a new Arc, and
2424 // invalidate the other weak refs.
2425
2426 // Note that it is not possible for the read of `weak` to yield
2427 // usize::MAX (i.e., locked), since the weak count can only be
2428 // locked by a thread with a strong reference.
2429
2430 // Materialize our own implicit weak pointer, so that it can clean
2431 // up the ArcInner as needed.
2432 let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2433
2434 // Can just steal the data, all that's left is Weaks
2435 //
2436 // We don't need panic-protection like the above branch does, but we might as well
2437 // use the same mechanism.
2438 let mut in_progress: UniqueArcUninit<T, A> =
2439 UniqueArcUninit::new(&**this, this.alloc.clone());
2440 unsafe {
2441 // Initialize `in_progress` with move of **this.
2442 // We have to express this in terms of bytes because `T: ?Sized`; there is no
2443 // operation that just copies a value based on its `size_of_val()`.
2444 ptr::copy_nonoverlapping(
2445 ptr::from_ref(&**this).cast::<u8>(),
2446 in_progress.data_ptr().cast::<u8>(),
2447 size_of_val,
2448 );
2449
2450 ptr::write(this, in_progress.into_arc());
2451 }
2452 } else {
2453 // We were the sole reference of either kind; bump back up the
2454 // strong ref count.
2455 this.inner().strong.store(1, Release);
2456 }
2457
2458 // As with `get_mut()`, the unsafety is ok because our reference was
2459 // either unique to begin with, or became one upon cloning the contents.
2460 unsafe { Self::get_mut_unchecked(this) }
2461 }
2462}
2463
2464impl<T: Clone, A: Allocator> Arc<T, A> {
2465 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2466 /// clone.
2467 ///
2468 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2469 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2470 ///
2471 /// # Examples
2472 ///
2473 /// ```
2474 /// # use std::{ptr, sync::Arc};
2475 /// let inner = String::from("test");
2476 /// let ptr = inner.as_ptr();
2477 ///
2478 /// let arc = Arc::new(inner);
2479 /// let inner = Arc::unwrap_or_clone(arc);
2480 /// // The inner value was not cloned
2481 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2482 ///
2483 /// let arc = Arc::new(inner);
2484 /// let arc2 = arc.clone();
2485 /// let inner = Arc::unwrap_or_clone(arc);
2486 /// // Because there were 2 references, we had to clone the inner value.
2487 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2488 /// // `arc2` is the last reference, so when we unwrap it we get back
2489 /// // the original `String`.
2490 /// let inner = Arc::unwrap_or_clone(arc2);
2491 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2492 /// ```
2493 #[inline]
2494 #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2495 pub fn unwrap_or_clone(this: Self) -> T {
2496 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2497 }
2498}
2499
2500impl<T: ?Sized, A: Allocator> Arc<T, A> {
2501 /// Returns a mutable reference into the given `Arc`, if there are
2502 /// no other `Arc` or [`Weak`] pointers to the same allocation.
2503 ///
2504 /// Returns [`None`] otherwise, because it is not safe to
2505 /// mutate a shared value.
2506 ///
2507 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2508 /// the inner value when there are other `Arc` pointers.
2509 ///
2510 /// [make_mut]: Arc::make_mut
2511 /// [clone]: Clone::clone
2512 ///
2513 /// # Examples
2514 ///
2515 /// ```
2516 /// use std::sync::Arc;
2517 ///
2518 /// let mut x = Arc::new(3);
2519 /// *Arc::get_mut(&mut x).unwrap() = 4;
2520 /// assert_eq!(*x, 4);
2521 ///
2522 /// let _y = Arc::clone(&x);
2523 /// assert!(Arc::get_mut(&mut x).is_none());
2524 /// ```
2525 #[inline]
2526 #[stable(feature = "arc_unique", since = "1.4.0")]
2527 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2528 if Self::is_unique(this) {
2529 // This unsafety is ok because we're guaranteed that the pointer
2530 // returned is the *only* pointer that will ever be returned to T. Our
2531 // reference count is guaranteed to be 1 at this point, and we required
2532 // the Arc itself to be `mut`, so we're returning the only possible
2533 // reference to the inner data.
2534 unsafe { Some(Arc::get_mut_unchecked(this)) }
2535 } else {
2536 None
2537 }
2538 }
2539
2540 /// Returns a mutable reference into the given `Arc`,
2541 /// without any check.
2542 ///
2543 /// See also [`get_mut`], which is safe and does appropriate checks.
2544 ///
2545 /// [`get_mut`]: Arc::get_mut
2546 ///
2547 /// # Safety
2548 ///
2549 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2550 /// they must not be dereferenced or have active borrows for the duration
2551 /// of the returned borrow, and their inner type must be exactly the same as the
2552 /// inner type of this Arc (including lifetimes). This is trivially the case if no
2553 /// such pointers exist, for example immediately after `Arc::new`.
2554 ///
2555 /// # Examples
2556 ///
2557 /// ```
2558 /// #![feature(get_mut_unchecked)]
2559 ///
2560 /// use std::sync::Arc;
2561 ///
2562 /// let mut x = Arc::new(String::new());
2563 /// unsafe {
2564 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
2565 /// }
2566 /// assert_eq!(*x, "foo");
2567 /// ```
2568 /// Other `Arc` pointers to the same allocation must be to the same type.
2569 /// ```no_run
2570 /// #![feature(get_mut_unchecked)]
2571 ///
2572 /// use std::sync::Arc;
2573 ///
2574 /// let x: Arc<str> = Arc::from("Hello, world!");
2575 /// let mut y: Arc<[u8]> = x.clone().into();
2576 /// unsafe {
2577 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
2578 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2579 /// }
2580 /// println!("{}", &*x); // Invalid UTF-8 in a str
2581 /// ```
2582 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2583 /// ```no_run
2584 /// #![feature(get_mut_unchecked)]
2585 ///
2586 /// use std::sync::Arc;
2587 ///
2588 /// let x: Arc<&str> = Arc::new("Hello, world!");
2589 /// {
2590 /// let s = String::from("Oh, no!");
2591 /// let mut y: Arc<&str> = x.clone();
2592 /// unsafe {
2593 /// // this is Undefined Behavior, because x's inner type
2594 /// // is &'long str, not &'short str
2595 /// *Arc::get_mut_unchecked(&mut y) = &s;
2596 /// }
2597 /// }
2598 /// println!("{}", &*x); // Use-after-free
2599 /// ```
2600 #[inline]
2601 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2602 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2603 // We are careful to *not* create a reference covering the "count" fields, as
2604 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2605 unsafe { &mut (*this.ptr.as_ptr()).data }
2606 }
2607
2608 /// Determine whether this is the unique reference to the underlying data.
2609 ///
2610 /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2611 /// returns `false` otherwise.
2612 ///
2613 /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2614 /// on this `Arc`, so long as no clones occur in between.
2615 ///
2616 /// # Examples
2617 ///
2618 /// ```
2619 /// #![feature(arc_is_unique)]
2620 ///
2621 /// use std::sync::Arc;
2622 ///
2623 /// let x = Arc::new(3);
2624 /// assert!(Arc::is_unique(&x));
2625 ///
2626 /// let y = Arc::clone(&x);
2627 /// assert!(!Arc::is_unique(&x));
2628 /// drop(y);
2629 ///
2630 /// // Weak references also count, because they could be upgraded at any time.
2631 /// let z = Arc::downgrade(&x);
2632 /// assert!(!Arc::is_unique(&x));
2633 /// ```
2634 ///
2635 /// # Pointer invalidation
2636 ///
2637 /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2638 /// unlike that operation it does not produce any mutable references to the underlying data,
2639 /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2640 /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2641 ///
2642 /// ```
2643 /// #![feature(arc_is_unique)]
2644 ///
2645 /// use std::sync::Arc;
2646 ///
2647 /// let arc = Arc::new(5);
2648 /// let pointer: *const i32 = &*arc;
2649 /// assert!(Arc::is_unique(&arc));
2650 /// assert_eq!(unsafe { *pointer }, 5);
2651 /// ```
2652 ///
2653 /// # Atomic orderings
2654 ///
2655 /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2656 /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2657 /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2658 ///
2659 /// Note that this operation requires locking the weak ref count, so concurrent calls to
2660 /// `downgrade` may spin-loop for a short period of time.
2661 ///
2662 /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2663 #[inline]
2664 #[unstable(feature = "arc_is_unique", issue = "138938")]
2665 pub fn is_unique(this: &Self) -> bool {
2666 // lock the weak pointer count if we appear to be the sole weak pointer
2667 // holder.
2668 //
2669 // The acquire label here ensures a happens-before relationship with any
2670 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2671 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2672 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2673 if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2674 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2675 // counter in `drop` -- the only access that happens when any but the last reference
2676 // is being dropped.
2677 let unique = this.inner().strong.load(Acquire) == 1;
2678
2679 // The release write here synchronizes with a read in `downgrade`,
2680 // effectively preventing the above read of `strong` from happening
2681 // after the write.
2682 this.inner().weak.store(1, Release); // release the lock
2683 unique
2684 } else {
2685 false
2686 }
2687 }
2688}
2689
2690#[stable(feature = "rust1", since = "1.0.0")]
2691unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2692 /// Drops the `Arc`.
2693 ///
2694 /// This will decrement the strong reference count. If the strong reference
2695 /// count reaches zero then the only other references (if any) are
2696 /// [`Weak`], so we `drop` the inner value.
2697 ///
2698 /// # Examples
2699 ///
2700 /// ```
2701 /// use std::sync::Arc;
2702 ///
2703 /// struct Foo;
2704 ///
2705 /// impl Drop for Foo {
2706 /// fn drop(&mut self) {
2707 /// println!("dropped!");
2708 /// }
2709 /// }
2710 ///
2711 /// let foo = Arc::new(Foo);
2712 /// let foo2 = Arc::clone(&foo);
2713 ///
2714 /// drop(foo); // Doesn't print anything
2715 /// drop(foo2); // Prints "dropped!"
2716 /// ```
2717 #[inline]
2718 fn drop(&mut self) {
2719 // Because `fetch_sub` is already atomic, we do not need to synchronize
2720 // with other threads unless we are going to delete the object. This
2721 // same logic applies to the below `fetch_sub` to the `weak` count.
2722 if self.inner().strong.fetch_sub(1, Release) != 1 {
2723 return;
2724 }
2725
2726 // This fence is needed to prevent reordering of use of the data and
2727 // deletion of the data. Because it is marked `Release`, the decreasing
2728 // of the reference count synchronizes with this `Acquire` fence. This
2729 // means that use of the data happens before decreasing the reference
2730 // count, which happens before this fence, which happens before the
2731 // deletion of the data.
2732 //
2733 // As explained in the [Boost documentation][1],
2734 //
2735 // > It is important to enforce any possible access to the object in one
2736 // > thread (through an existing reference) to *happen before* deleting
2737 // > the object in a different thread. This is achieved by a "release"
2738 // > operation after dropping a reference (any access to the object
2739 // > through this reference must obviously happened before), and an
2740 // > "acquire" operation before deleting the object.
2741 //
2742 // In particular, while the contents of an Arc are usually immutable, it's
2743 // possible to have interior writes to something like a Mutex<T>. Since a
2744 // Mutex is not acquired when it is deleted, we can't rely on its
2745 // synchronization logic to make writes in thread A visible to a destructor
2746 // running in thread B.
2747 //
2748 // Also note that the Acquire fence here could probably be replaced with an
2749 // Acquire load, which could improve performance in highly-contended
2750 // situations. See [2].
2751 //
2752 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2753 // [2]: (https://github.com/rust-lang/rust/pull/41714)
2754 acquire!(self.inner().strong);
2755
2756 // Make sure we aren't trying to "drop" the shared static for empty slices
2757 // used by Default::default.
2758 debug_assert!(
2759 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2760 "Arcs backed by a static should never reach a strong count of 0. \
2761 Likely decrement_strong_count or from_raw were called too many times.",
2762 );
2763
2764 unsafe {
2765 self.drop_slow();
2766 }
2767 }
2768}
2769
2770impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2771 /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2772 ///
2773 /// # Examples
2774 ///
2775 /// ```
2776 /// use std::any::Any;
2777 /// use std::sync::Arc;
2778 ///
2779 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2780 /// if let Ok(string) = value.downcast::<String>() {
2781 /// println!("String ({}): {}", string.len(), string);
2782 /// }
2783 /// }
2784 ///
2785 /// let my_string = "Hello World".to_string();
2786 /// print_if_string(Arc::new(my_string));
2787 /// print_if_string(Arc::new(0i8));
2788 /// ```
2789 #[inline]
2790 #[stable(feature = "rc_downcast", since = "1.29.0")]
2791 pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2792 where
2793 T: Any + Send + Sync,
2794 {
2795 if (*self).is::<T>() {
2796 unsafe {
2797 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2798 Ok(Arc::from_inner_in(ptr.cast(), alloc))
2799 }
2800 } else {
2801 Err(self)
2802 }
2803 }
2804
2805 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2806 ///
2807 /// For a safe alternative see [`downcast`].
2808 ///
2809 /// # Examples
2810 ///
2811 /// ```
2812 /// #![feature(downcast_unchecked)]
2813 ///
2814 /// use std::any::Any;
2815 /// use std::sync::Arc;
2816 ///
2817 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2818 ///
2819 /// unsafe {
2820 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2821 /// }
2822 /// ```
2823 ///
2824 /// # Safety
2825 ///
2826 /// The contained value must be of type `T`. Calling this method
2827 /// with the incorrect type is *undefined behavior*.
2828 ///
2829 ///
2830 /// [`downcast`]: Self::downcast
2831 #[inline]
2832 #[unstable(feature = "downcast_unchecked", issue = "90850")]
2833 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2834 where
2835 T: Any + Send + Sync,
2836 {
2837 unsafe {
2838 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2839 Arc::from_inner_in(ptr.cast(), alloc)
2840 }
2841 }
2842}
2843
2844impl<T> Weak<T> {
2845 /// Constructs a new `Weak<T>`, without allocating any memory.
2846 /// Calling [`upgrade`] on the return value always gives [`None`].
2847 ///
2848 /// [`upgrade`]: Weak::upgrade
2849 ///
2850 /// # Examples
2851 ///
2852 /// ```
2853 /// use std::sync::Weak;
2854 ///
2855 /// let empty: Weak<i64> = Weak::new();
2856 /// assert!(empty.upgrade().is_none());
2857 /// ```
2858 #[inline]
2859 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2860 #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2861 #[must_use]
2862 pub const fn new() -> Weak<T> {
2863 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2864 }
2865}
2866
2867impl<T, A: Allocator> Weak<T, A> {
2868 /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2869 /// allocator.
2870 /// Calling [`upgrade`] on the return value always gives [`None`].
2871 ///
2872 /// [`upgrade`]: Weak::upgrade
2873 ///
2874 /// # Examples
2875 ///
2876 /// ```
2877 /// #![feature(allocator_api)]
2878 ///
2879 /// use std::sync::Weak;
2880 /// use std::alloc::System;
2881 ///
2882 /// let empty: Weak<i64, _> = Weak::new_in(System);
2883 /// assert!(empty.upgrade().is_none());
2884 /// ```
2885 #[inline]
2886 #[unstable(feature = "allocator_api", issue = "32838")]
2887 pub fn new_in(alloc: A) -> Weak<T, A> {
2888 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2889 }
2890}
2891
2892/// Helper type to allow accessing the reference counts without
2893/// making any assertions about the data field.
2894struct WeakInner<'a> {
2895 weak: &'a Atomic<usize>,
2896 strong: &'a Atomic<usize>,
2897}
2898
2899impl<T: ?Sized> Weak<T> {
2900 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2901 ///
2902 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2903 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2904 ///
2905 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2906 /// as these don't own anything; the method still works on them).
2907 ///
2908 /// # Safety
2909 ///
2910 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2911 /// weak reference, and must point to a block of memory allocated by global allocator.
2912 ///
2913 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2914 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2915 /// count is not modified by this operation) and therefore it must be paired with a previous
2916 /// call to [`into_raw`].
2917 /// # Examples
2918 ///
2919 /// ```
2920 /// use std::sync::{Arc, Weak};
2921 ///
2922 /// let strong = Arc::new("hello".to_owned());
2923 ///
2924 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2925 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2926 ///
2927 /// assert_eq!(2, Arc::weak_count(&strong));
2928 ///
2929 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2930 /// assert_eq!(1, Arc::weak_count(&strong));
2931 ///
2932 /// drop(strong);
2933 ///
2934 /// // Decrement the last weak count.
2935 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2936 /// ```
2937 ///
2938 /// [`new`]: Weak::new
2939 /// [`into_raw`]: Weak::into_raw
2940 /// [`upgrade`]: Weak::upgrade
2941 #[inline]
2942 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2943 pub unsafe fn from_raw(ptr: *const T) -> Self {
2944 unsafe { Weak::from_raw_in(ptr, Global) }
2945 }
2946
2947 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2948 ///
2949 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2950 /// one weak reference (the weak count is not modified by this operation). It can be turned
2951 /// back into the `Weak<T>` with [`from_raw`].
2952 ///
2953 /// The same restrictions of accessing the target of the pointer as with
2954 /// [`as_ptr`] apply.
2955 ///
2956 /// # Examples
2957 ///
2958 /// ```
2959 /// use std::sync::{Arc, Weak};
2960 ///
2961 /// let strong = Arc::new("hello".to_owned());
2962 /// let weak = Arc::downgrade(&strong);
2963 /// let raw = weak.into_raw();
2964 ///
2965 /// assert_eq!(1, Arc::weak_count(&strong));
2966 /// assert_eq!("hello", unsafe { &*raw });
2967 ///
2968 /// drop(unsafe { Weak::from_raw(raw) });
2969 /// assert_eq!(0, Arc::weak_count(&strong));
2970 /// ```
2971 ///
2972 /// [`from_raw`]: Weak::from_raw
2973 /// [`as_ptr`]: Weak::as_ptr
2974 #[must_use = "losing the pointer will leak memory"]
2975 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2976 pub fn into_raw(self) -> *const T {
2977 ManuallyDrop::new(self).as_ptr()
2978 }
2979}
2980
2981impl<T: ?Sized, A: Allocator> Weak<T, A> {
2982 /// Returns a reference to the underlying allocator.
2983 #[inline]
2984 #[unstable(feature = "allocator_api", issue = "32838")]
2985 pub fn allocator(&self) -> &A {
2986 &self.alloc
2987 }
2988
2989 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2990 ///
2991 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2992 /// unaligned or even [`null`] otherwise.
2993 ///
2994 /// # Examples
2995 ///
2996 /// ```
2997 /// use std::sync::Arc;
2998 /// use std::ptr;
2999 ///
3000 /// let strong = Arc::new("hello".to_owned());
3001 /// let weak = Arc::downgrade(&strong);
3002 /// // Both point to the same object
3003 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
3004 /// // The strong here keeps it alive, so we can still access the object.
3005 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
3006 ///
3007 /// drop(strong);
3008 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
3009 /// // undefined behavior.
3010 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
3011 /// ```
3012 ///
3013 /// [`null`]: core::ptr::null "ptr::null"
3014 #[must_use]
3015 #[stable(feature = "weak_into_raw", since = "1.45.0")]
3016 pub fn as_ptr(&self) -> *const T {
3017 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
3018
3019 if is_dangling(ptr) {
3020 // If the pointer is dangling, we return the sentinel directly. This cannot be
3021 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
3022 ptr as *const T
3023 } else {
3024 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
3025 // The payload may be dropped at this point, and we have to maintain provenance,
3026 // so use raw pointer manipulation.
3027 unsafe { &raw mut (*ptr).data }
3028 }
3029 }
3030
3031 /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
3032 ///
3033 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
3034 /// one weak reference (the weak count is not modified by this operation). It can be turned
3035 /// back into the `Weak<T>` with [`from_raw_in`].
3036 ///
3037 /// The same restrictions of accessing the target of the pointer as with
3038 /// [`as_ptr`] apply.
3039 ///
3040 /// # Examples
3041 ///
3042 /// ```
3043 /// #![feature(allocator_api)]
3044 /// use std::sync::{Arc, Weak};
3045 /// use std::alloc::System;
3046 ///
3047 /// let strong = Arc::new_in("hello".to_owned(), System);
3048 /// let weak = Arc::downgrade(&strong);
3049 /// let (raw, alloc) = weak.into_raw_with_allocator();
3050 ///
3051 /// assert_eq!(1, Arc::weak_count(&strong));
3052 /// assert_eq!("hello", unsafe { &*raw });
3053 ///
3054 /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
3055 /// assert_eq!(0, Arc::weak_count(&strong));
3056 /// ```
3057 ///
3058 /// [`from_raw_in`]: Weak::from_raw_in
3059 /// [`as_ptr`]: Weak::as_ptr
3060 #[must_use = "losing the pointer will leak memory"]
3061 #[unstable(feature = "allocator_api", issue = "32838")]
3062 pub fn into_raw_with_allocator(self) -> (*const T, A) {
3063 let this = mem::ManuallyDrop::new(self);
3064 let result = this.as_ptr();
3065 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
3066 let alloc = unsafe { ptr::read(&this.alloc) };
3067 (result, alloc)
3068 }
3069
3070 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
3071 /// allocator.
3072 ///
3073 /// This can be used to safely get a strong reference (by calling [`upgrade`]
3074 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
3075 ///
3076 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
3077 /// as these don't own anything; the method still works on them).
3078 ///
3079 /// # Safety
3080 ///
3081 /// The pointer must have originated from the [`into_raw`] and must still own its potential
3082 /// weak reference, and must point to a block of memory allocated by `alloc`.
3083 ///
3084 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3085 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3086 /// count is not modified by this operation) and therefore it must be paired with a previous
3087 /// call to [`into_raw`].
3088 /// # Examples
3089 ///
3090 /// ```
3091 /// use std::sync::{Arc, Weak};
3092 ///
3093 /// let strong = Arc::new("hello".to_owned());
3094 ///
3095 /// let raw_1 = Arc::downgrade(&strong).into_raw();
3096 /// let raw_2 = Arc::downgrade(&strong).into_raw();
3097 ///
3098 /// assert_eq!(2, Arc::weak_count(&strong));
3099 ///
3100 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3101 /// assert_eq!(1, Arc::weak_count(&strong));
3102 ///
3103 /// drop(strong);
3104 ///
3105 /// // Decrement the last weak count.
3106 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3107 /// ```
3108 ///
3109 /// [`new`]: Weak::new
3110 /// [`into_raw`]: Weak::into_raw
3111 /// [`upgrade`]: Weak::upgrade
3112 #[inline]
3113 #[unstable(feature = "allocator_api", issue = "32838")]
3114 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3115 // See Weak::as_ptr for context on how the input pointer is derived.
3116
3117 let ptr = if is_dangling(ptr) {
3118 // This is a dangling Weak.
3119 ptr as *mut ArcInner<T>
3120 } else {
3121 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3122 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3123 let offset = unsafe { data_offset(ptr) };
3124 // Thus, we reverse the offset to get the whole ArcInner.
3125 // SAFETY: the pointer originated from a Weak, so this offset is safe.
3126 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3127 };
3128
3129 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3130 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3131 }
3132}
3133
3134impl<T: ?Sized, A: Allocator> Weak<T, A> {
3135 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3136 /// dropping of the inner value if successful.
3137 ///
3138 /// Returns [`None`] if the inner value has since been dropped.
3139 ///
3140 /// # Examples
3141 ///
3142 /// ```
3143 /// use std::sync::Arc;
3144 ///
3145 /// let five = Arc::new(5);
3146 ///
3147 /// let weak_five = Arc::downgrade(&five);
3148 ///
3149 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3150 /// assert!(strong_five.is_some());
3151 ///
3152 /// // Destroy all strong pointers.
3153 /// drop(strong_five);
3154 /// drop(five);
3155 ///
3156 /// assert!(weak_five.upgrade().is_none());
3157 /// ```
3158 #[must_use = "this returns a new `Arc`, \
3159 without modifying the original weak pointer"]
3160 #[stable(feature = "arc_weak", since = "1.4.0")]
3161 pub fn upgrade(&self) -> Option<Arc<T, A>>
3162 where
3163 A: Clone,
3164 {
3165 #[inline]
3166 fn checked_increment(n: usize) -> Option<usize> {
3167 // Any write of 0 we can observe leaves the field in permanently zero state.
3168 if n == 0 {
3169 return None;
3170 }
3171 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3172 assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3173 Some(n + 1)
3174 }
3175
3176 // We use a CAS loop to increment the strong count instead of a
3177 // fetch_add as this function should never take the reference count
3178 // from zero to one.
3179 //
3180 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3181 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3182 // value can be initialized after `Weak` references have already been created. In that case, we
3183 // expect to observe the fully initialized value.
3184 if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3185 // SAFETY: pointer is not null, verified in checked_increment
3186 unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3187 } else {
3188 None
3189 }
3190 }
3191
3192 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3193 ///
3194 /// If `self` was created using [`Weak::new`], this will return 0.
3195 #[must_use]
3196 #[stable(feature = "weak_counts", since = "1.41.0")]
3197 pub fn strong_count(&self) -> usize {
3198 if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3199 }
3200
3201 /// Gets an approximation of the number of `Weak` pointers pointing to this
3202 /// allocation.
3203 ///
3204 /// If `self` was created using [`Weak::new`], or if there are no remaining
3205 /// strong pointers, this will return 0.
3206 ///
3207 /// # Accuracy
3208 ///
3209 /// Due to implementation details, the returned value can be off by 1 in
3210 /// either direction when other threads are manipulating any `Arc`s or
3211 /// `Weak`s pointing to the same allocation.
3212 #[must_use]
3213 #[stable(feature = "weak_counts", since = "1.41.0")]
3214 pub fn weak_count(&self) -> usize {
3215 if let Some(inner) = self.inner() {
3216 let weak = inner.weak.load(Acquire);
3217 let strong = inner.strong.load(Relaxed);
3218 if strong == 0 {
3219 0
3220 } else {
3221 // Since we observed that there was at least one strong pointer
3222 // after reading the weak count, we know that the implicit weak
3223 // reference (present whenever any strong references are alive)
3224 // was still around when we observed the weak count, and can
3225 // therefore safely subtract it.
3226 weak - 1
3227 }
3228 } else {
3229 0
3230 }
3231 }
3232
3233 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3234 /// (i.e., when this `Weak` was created by `Weak::new`).
3235 #[inline]
3236 fn inner(&self) -> Option<WeakInner<'_>> {
3237 let ptr = self.ptr.as_ptr();
3238 if is_dangling(ptr) {
3239 None
3240 } else {
3241 // We are careful to *not* create a reference covering the "data" field, as
3242 // the field may be mutated concurrently (for example, if the last `Arc`
3243 // is dropped, the data field will be dropped in-place).
3244 Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3245 }
3246 }
3247
3248 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3249 /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3250 /// this function ignores the metadata of `dyn Trait` pointers.
3251 ///
3252 /// # Notes
3253 ///
3254 /// Since this compares pointers it means that `Weak::new()` will equal each
3255 /// other, even though they don't point to any allocation.
3256 ///
3257 /// # Examples
3258 ///
3259 /// ```
3260 /// use std::sync::Arc;
3261 ///
3262 /// let first_rc = Arc::new(5);
3263 /// let first = Arc::downgrade(&first_rc);
3264 /// let second = Arc::downgrade(&first_rc);
3265 ///
3266 /// assert!(first.ptr_eq(&second));
3267 ///
3268 /// let third_rc = Arc::new(5);
3269 /// let third = Arc::downgrade(&third_rc);
3270 ///
3271 /// assert!(!first.ptr_eq(&third));
3272 /// ```
3273 ///
3274 /// Comparing `Weak::new`.
3275 ///
3276 /// ```
3277 /// use std::sync::{Arc, Weak};
3278 ///
3279 /// let first = Weak::new();
3280 /// let second = Weak::new();
3281 /// assert!(first.ptr_eq(&second));
3282 ///
3283 /// let third_rc = Arc::new(());
3284 /// let third = Arc::downgrade(&third_rc);
3285 /// assert!(!first.ptr_eq(&third));
3286 /// ```
3287 ///
3288 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3289 #[inline]
3290 #[must_use]
3291 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3292 pub fn ptr_eq(&self, other: &Self) -> bool {
3293 ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3294 }
3295}
3296
3297#[stable(feature = "arc_weak", since = "1.4.0")]
3298impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3299 /// Makes a clone of the `Weak` pointer that points to the same allocation.
3300 ///
3301 /// # Examples
3302 ///
3303 /// ```
3304 /// use std::sync::{Arc, Weak};
3305 ///
3306 /// let weak_five = Arc::downgrade(&Arc::new(5));
3307 ///
3308 /// let _ = Weak::clone(&weak_five);
3309 /// ```
3310 #[inline]
3311 fn clone(&self) -> Weak<T, A> {
3312 if let Some(inner) = self.inner() {
3313 // See comments in Arc::clone() for why this is relaxed. This can use a
3314 // fetch_add (ignoring the lock) because the weak count is only locked
3315 // where are *no other* weak pointers in existence. (So we can't be
3316 // running this code in that case).
3317 let old_size = inner.weak.fetch_add(1, Relaxed);
3318
3319 // See comments in Arc::clone() for why we do this (for mem::forget).
3320 if old_size > MAX_REFCOUNT {
3321 abort();
3322 }
3323 }
3324
3325 Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3326 }
3327}
3328
3329#[unstable(feature = "ergonomic_clones", issue = "132290")]
3330impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3331
3332#[stable(feature = "downgraded_weak", since = "1.10.0")]
3333impl<T> Default for Weak<T> {
3334 /// Constructs a new `Weak<T>`, without allocating memory.
3335 /// Calling [`upgrade`] on the return value always
3336 /// gives [`None`].
3337 ///
3338 /// [`upgrade`]: Weak::upgrade
3339 ///
3340 /// # Examples
3341 ///
3342 /// ```
3343 /// use std::sync::Weak;
3344 ///
3345 /// let empty: Weak<i64> = Default::default();
3346 /// assert!(empty.upgrade().is_none());
3347 /// ```
3348 fn default() -> Weak<T> {
3349 Weak::new()
3350 }
3351}
3352
3353#[stable(feature = "arc_weak", since = "1.4.0")]
3354unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3355 /// Drops the `Weak` pointer.
3356 ///
3357 /// # Examples
3358 ///
3359 /// ```
3360 /// use std::sync::{Arc, Weak};
3361 ///
3362 /// struct Foo;
3363 ///
3364 /// impl Drop for Foo {
3365 /// fn drop(&mut self) {
3366 /// println!("dropped!");
3367 /// }
3368 /// }
3369 ///
3370 /// let foo = Arc::new(Foo);
3371 /// let weak_foo = Arc::downgrade(&foo);
3372 /// let other_weak_foo = Weak::clone(&weak_foo);
3373 ///
3374 /// drop(weak_foo); // Doesn't print anything
3375 /// drop(foo); // Prints "dropped!"
3376 ///
3377 /// assert!(other_weak_foo.upgrade().is_none());
3378 /// ```
3379 fn drop(&mut self) {
3380 // If we find out that we were the last weak pointer, then its time to
3381 // deallocate the data entirely. See the discussion in Arc::drop() about
3382 // the memory orderings
3383 //
3384 // It's not necessary to check for the locked state here, because the
3385 // weak count can only be locked if there was precisely one weak ref,
3386 // meaning that drop could only subsequently run ON that remaining weak
3387 // ref, which can only happen after the lock is released.
3388 let inner = if let Some(inner) = self.inner() { inner } else { return };
3389
3390 if inner.weak.fetch_sub(1, Release) == 1 {
3391 acquire!(inner.weak);
3392
3393 // Make sure we aren't trying to "deallocate" the shared static for empty slices
3394 // used by Default::default.
3395 debug_assert!(
3396 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3397 "Arc/Weaks backed by a static should never be deallocated. \
3398 Likely decrement_strong_count or from_raw were called too many times.",
3399 );
3400
3401 unsafe {
3402 self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3403 }
3404 }
3405 }
3406}
3407
3408#[stable(feature = "rust1", since = "1.0.0")]
3409trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3410 fn eq(&self, other: &Arc<T, A>) -> bool;
3411 fn ne(&self, other: &Arc<T, A>) -> bool;
3412}
3413
3414#[stable(feature = "rust1", since = "1.0.0")]
3415impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3416 #[inline]
3417 default fn eq(&self, other: &Arc<T, A>) -> bool {
3418 **self == **other
3419 }
3420 #[inline]
3421 default fn ne(&self, other: &Arc<T, A>) -> bool {
3422 **self != **other
3423 }
3424}
3425
3426/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3427/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3428/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3429/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3430/// the same value, than two `&T`s.
3431///
3432/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3433#[stable(feature = "rust1", since = "1.0.0")]
3434impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3435 #[inline]
3436 fn eq(&self, other: &Arc<T, A>) -> bool {
3437 Arc::ptr_eq(self, other) || **self == **other
3438 }
3439
3440 #[inline]
3441 fn ne(&self, other: &Arc<T, A>) -> bool {
3442 !Arc::ptr_eq(self, other) && **self != **other
3443 }
3444}
3445
3446#[stable(feature = "rust1", since = "1.0.0")]
3447impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3448 /// Equality for two `Arc`s.
3449 ///
3450 /// Two `Arc`s are equal if their inner values are equal, even if they are
3451 /// stored in different allocation.
3452 ///
3453 /// If `T` also implements `Eq` (implying reflexivity of equality),
3454 /// two `Arc`s that point to the same allocation are always equal.
3455 ///
3456 /// # Examples
3457 ///
3458 /// ```
3459 /// use std::sync::Arc;
3460 ///
3461 /// let five = Arc::new(5);
3462 ///
3463 /// assert!(five == Arc::new(5));
3464 /// ```
3465 #[inline]
3466 fn eq(&self, other: &Arc<T, A>) -> bool {
3467 ArcEqIdent::eq(self, other)
3468 }
3469
3470 /// Inequality for two `Arc`s.
3471 ///
3472 /// Two `Arc`s are not equal if their inner values are not equal.
3473 ///
3474 /// If `T` also implements `Eq` (implying reflexivity of equality),
3475 /// two `Arc`s that point to the same value are always equal.
3476 ///
3477 /// # Examples
3478 ///
3479 /// ```
3480 /// use std::sync::Arc;
3481 ///
3482 /// let five = Arc::new(5);
3483 ///
3484 /// assert!(five != Arc::new(6));
3485 /// ```
3486 #[inline]
3487 fn ne(&self, other: &Arc<T, A>) -> bool {
3488 ArcEqIdent::ne(self, other)
3489 }
3490}
3491
3492#[stable(feature = "rust1", since = "1.0.0")]
3493impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3494 /// Partial comparison for two `Arc`s.
3495 ///
3496 /// The two are compared by calling `partial_cmp()` on their inner values.
3497 ///
3498 /// # Examples
3499 ///
3500 /// ```
3501 /// use std::sync::Arc;
3502 /// use std::cmp::Ordering;
3503 ///
3504 /// let five = Arc::new(5);
3505 ///
3506 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3507 /// ```
3508 fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3509 (**self).partial_cmp(&**other)
3510 }
3511
3512 /// Less-than comparison for two `Arc`s.
3513 ///
3514 /// The two are compared by calling `<` on their inner values.
3515 ///
3516 /// # Examples
3517 ///
3518 /// ```
3519 /// use std::sync::Arc;
3520 ///
3521 /// let five = Arc::new(5);
3522 ///
3523 /// assert!(five < Arc::new(6));
3524 /// ```
3525 fn lt(&self, other: &Arc<T, A>) -> bool {
3526 *(*self) < *(*other)
3527 }
3528
3529 /// 'Less than or equal to' comparison for two `Arc`s.
3530 ///
3531 /// The two are compared by calling `<=` on their inner values.
3532 ///
3533 /// # Examples
3534 ///
3535 /// ```
3536 /// use std::sync::Arc;
3537 ///
3538 /// let five = Arc::new(5);
3539 ///
3540 /// assert!(five <= Arc::new(5));
3541 /// ```
3542 fn le(&self, other: &Arc<T, A>) -> bool {
3543 *(*self) <= *(*other)
3544 }
3545
3546 /// Greater-than comparison for two `Arc`s.
3547 ///
3548 /// The two are compared by calling `>` on their inner values.
3549 ///
3550 /// # Examples
3551 ///
3552 /// ```
3553 /// use std::sync::Arc;
3554 ///
3555 /// let five = Arc::new(5);
3556 ///
3557 /// assert!(five > Arc::new(4));
3558 /// ```
3559 fn gt(&self, other: &Arc<T, A>) -> bool {
3560 *(*self) > *(*other)
3561 }
3562
3563 /// 'Greater than or equal to' comparison for two `Arc`s.
3564 ///
3565 /// The two are compared by calling `>=` on their inner values.
3566 ///
3567 /// # Examples
3568 ///
3569 /// ```
3570 /// use std::sync::Arc;
3571 ///
3572 /// let five = Arc::new(5);
3573 ///
3574 /// assert!(five >= Arc::new(5));
3575 /// ```
3576 fn ge(&self, other: &Arc<T, A>) -> bool {
3577 *(*self) >= *(*other)
3578 }
3579}
3580#[stable(feature = "rust1", since = "1.0.0")]
3581impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3582 /// Comparison for two `Arc`s.
3583 ///
3584 /// The two are compared by calling `cmp()` on their inner values.
3585 ///
3586 /// # Examples
3587 ///
3588 /// ```
3589 /// use std::sync::Arc;
3590 /// use std::cmp::Ordering;
3591 ///
3592 /// let five = Arc::new(5);
3593 ///
3594 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3595 /// ```
3596 fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3597 (**self).cmp(&**other)
3598 }
3599}
3600#[stable(feature = "rust1", since = "1.0.0")]
3601impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3602
3603#[stable(feature = "rust1", since = "1.0.0")]
3604impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3605 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3606 fmt::Display::fmt(&**self, f)
3607 }
3608}
3609
3610#[stable(feature = "rust1", since = "1.0.0")]
3611impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3612 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3613 fmt::Debug::fmt(&**self, f)
3614 }
3615}
3616
3617#[stable(feature = "rust1", since = "1.0.0")]
3618impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3619 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3620 fmt::Pointer::fmt(&(&raw const **self), f)
3621 }
3622}
3623
3624#[cfg(not(no_global_oom_handling))]
3625#[stable(feature = "rust1", since = "1.0.0")]
3626impl<T: Default> Default for Arc<T> {
3627 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3628 ///
3629 /// # Examples
3630 ///
3631 /// ```
3632 /// use std::sync::Arc;
3633 ///
3634 /// let x: Arc<i32> = Default::default();
3635 /// assert_eq!(*x, 0);
3636 /// ```
3637 fn default() -> Arc<T> {
3638 unsafe {
3639 Self::from_inner(
3640 Box::leak(Box::write(
3641 Box::new_uninit(),
3642 ArcInner {
3643 strong: atomic::AtomicUsize::new(1),
3644 weak: atomic::AtomicUsize::new(1),
3645 data: T::default(),
3646 },
3647 ))
3648 .into(),
3649 )
3650 }
3651 }
3652}
3653
3654/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3655/// returned by `Default::default`.
3656///
3657/// Layout notes:
3658/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3659/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3660/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3661#[repr(C, align(16))]
3662struct SliceArcInnerForStatic {
3663 inner: ArcInner<[u8; 1]>,
3664}
3665#[cfg(not(no_global_oom_handling))]
3666const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3667
3668static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3669 inner: ArcInner {
3670 strong: atomic::AtomicUsize::new(1),
3671 weak: atomic::AtomicUsize::new(1),
3672 data: [0],
3673 },
3674};
3675
3676#[cfg(not(no_global_oom_handling))]
3677#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3678impl Default for Arc<str> {
3679 /// Creates an empty str inside an Arc
3680 ///
3681 /// This may or may not share an allocation with other Arcs.
3682 #[inline]
3683 fn default() -> Self {
3684 let arc: Arc<[u8]> = Default::default();
3685 debug_assert!(core::str::from_utf8(&*arc).is_ok());
3686 let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3687 unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3688 }
3689}
3690
3691#[cfg(not(no_global_oom_handling))]
3692#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3693impl Default for Arc<core::ffi::CStr> {
3694 /// Creates an empty CStr inside an Arc
3695 ///
3696 /// This may or may not share an allocation with other Arcs.
3697 #[inline]
3698 fn default() -> Self {
3699 use core::ffi::CStr;
3700 let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3701 let inner: NonNull<ArcInner<CStr>> =
3702 NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3703 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3704 let this: mem::ManuallyDrop<Arc<CStr>> =
3705 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3706 (*this).clone()
3707 }
3708}
3709
3710#[cfg(not(no_global_oom_handling))]
3711#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3712impl<T> Default for Arc<[T]> {
3713 /// Creates an empty `[T]` inside an Arc
3714 ///
3715 /// This may or may not share an allocation with other Arcs.
3716 #[inline]
3717 fn default() -> Self {
3718 if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3719 // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3720 // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3721 // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3722 // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3723 let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3724 let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3725 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3726 let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3727 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3728 return (*this).clone();
3729 }
3730
3731 // If T's alignment is too large for the static, make a new unique allocation.
3732 let arr: [T; 0] = [];
3733 Arc::from(arr)
3734 }
3735}
3736
3737#[cfg(not(no_global_oom_handling))]
3738#[stable(feature = "pin_default_impls", since = "1.91.0")]
3739impl<T> Default for Pin<Arc<T>>
3740where
3741 T: ?Sized,
3742 Arc<T>: Default,
3743{
3744 #[inline]
3745 fn default() -> Self {
3746 unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3747 }
3748}
3749
3750#[stable(feature = "rust1", since = "1.0.0")]
3751impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3752 fn hash<H: Hasher>(&self, state: &mut H) {
3753 (**self).hash(state)
3754 }
3755}
3756
3757#[cfg(not(no_global_oom_handling))]
3758#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3759impl<T> From<T> for Arc<T> {
3760 /// Converts a `T` into an `Arc<T>`
3761 ///
3762 /// The conversion moves the value into a
3763 /// newly allocated `Arc`. It is equivalent to
3764 /// calling `Arc::new(t)`.
3765 ///
3766 /// # Example
3767 /// ```rust
3768 /// # use std::sync::Arc;
3769 /// let x = 5;
3770 /// let arc = Arc::new(5);
3771 ///
3772 /// assert_eq!(Arc::from(x), arc);
3773 /// ```
3774 fn from(t: T) -> Self {
3775 Arc::new(t)
3776 }
3777}
3778
3779#[cfg(not(no_global_oom_handling))]
3780#[stable(feature = "shared_from_array", since = "1.74.0")]
3781impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3782 /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3783 ///
3784 /// The conversion moves the array into a newly allocated `Arc`.
3785 ///
3786 /// # Example
3787 ///
3788 /// ```
3789 /// # use std::sync::Arc;
3790 /// let original: [i32; 3] = [1, 2, 3];
3791 /// let shared: Arc<[i32]> = Arc::from(original);
3792 /// assert_eq!(&[1, 2, 3], &shared[..]);
3793 /// ```
3794 #[inline]
3795 fn from(v: [T; N]) -> Arc<[T]> {
3796 Arc::<[T; N]>::from(v)
3797 }
3798}
3799
3800#[cfg(not(no_global_oom_handling))]
3801#[stable(feature = "shared_from_slice", since = "1.21.0")]
3802impl<T: Clone> From<&[T]> for Arc<[T]> {
3803 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3804 ///
3805 /// # Example
3806 ///
3807 /// ```
3808 /// # use std::sync::Arc;
3809 /// let original: &[i32] = &[1, 2, 3];
3810 /// let shared: Arc<[i32]> = Arc::from(original);
3811 /// assert_eq!(&[1, 2, 3], &shared[..]);
3812 /// ```
3813 #[inline]
3814 fn from(v: &[T]) -> Arc<[T]> {
3815 <Self as ArcFromSlice<T>>::from_slice(v)
3816 }
3817}
3818
3819#[cfg(not(no_global_oom_handling))]
3820#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3821impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3822 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3823 ///
3824 /// # Example
3825 ///
3826 /// ```
3827 /// # use std::sync::Arc;
3828 /// let mut original = [1, 2, 3];
3829 /// let original: &mut [i32] = &mut original;
3830 /// let shared: Arc<[i32]> = Arc::from(original);
3831 /// assert_eq!(&[1, 2, 3], &shared[..]);
3832 /// ```
3833 #[inline]
3834 fn from(v: &mut [T]) -> Arc<[T]> {
3835 Arc::from(&*v)
3836 }
3837}
3838
3839#[cfg(not(no_global_oom_handling))]
3840#[stable(feature = "shared_from_slice", since = "1.21.0")]
3841impl From<&str> for Arc<str> {
3842 /// Allocates a reference-counted `str` and copies `v` into it.
3843 ///
3844 /// # Example
3845 ///
3846 /// ```
3847 /// # use std::sync::Arc;
3848 /// let shared: Arc<str> = Arc::from("eggplant");
3849 /// assert_eq!("eggplant", &shared[..]);
3850 /// ```
3851 #[inline]
3852 fn from(v: &str) -> Arc<str> {
3853 let arc = Arc::<[u8]>::from(v.as_bytes());
3854 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3855 }
3856}
3857
3858#[cfg(not(no_global_oom_handling))]
3859#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3860impl From<&mut str> for Arc<str> {
3861 /// Allocates a reference-counted `str` and copies `v` into it.
3862 ///
3863 /// # Example
3864 ///
3865 /// ```
3866 /// # use std::sync::Arc;
3867 /// let mut original = String::from("eggplant");
3868 /// let original: &mut str = &mut original;
3869 /// let shared: Arc<str> = Arc::from(original);
3870 /// assert_eq!("eggplant", &shared[..]);
3871 /// ```
3872 #[inline]
3873 fn from(v: &mut str) -> Arc<str> {
3874 Arc::from(&*v)
3875 }
3876}
3877
3878#[cfg(not(no_global_oom_handling))]
3879#[stable(feature = "shared_from_slice", since = "1.21.0")]
3880impl From<String> for Arc<str> {
3881 /// Allocates a reference-counted `str` and copies `v` into it.
3882 ///
3883 /// # Example
3884 ///
3885 /// ```
3886 /// # use std::sync::Arc;
3887 /// let unique: String = "eggplant".to_owned();
3888 /// let shared: Arc<str> = Arc::from(unique);
3889 /// assert_eq!("eggplant", &shared[..]);
3890 /// ```
3891 #[inline]
3892 fn from(v: String) -> Arc<str> {
3893 Arc::from(&v[..])
3894 }
3895}
3896
3897#[cfg(not(no_global_oom_handling))]
3898#[stable(feature = "shared_from_slice", since = "1.21.0")]
3899impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3900 /// Move a boxed object to a new, reference-counted allocation.
3901 ///
3902 /// # Example
3903 ///
3904 /// ```
3905 /// # use std::sync::Arc;
3906 /// let unique: Box<str> = Box::from("eggplant");
3907 /// let shared: Arc<str> = Arc::from(unique);
3908 /// assert_eq!("eggplant", &shared[..]);
3909 /// ```
3910 #[inline]
3911 fn from(v: Box<T, A>) -> Arc<T, A> {
3912 Arc::from_box_in(v)
3913 }
3914}
3915
3916#[cfg(not(no_global_oom_handling))]
3917#[stable(feature = "shared_from_slice", since = "1.21.0")]
3918impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3919 /// Allocates a reference-counted slice and moves `v`'s items into it.
3920 ///
3921 /// # Example
3922 ///
3923 /// ```
3924 /// # use std::sync::Arc;
3925 /// let unique: Vec<i32> = vec![1, 2, 3];
3926 /// let shared: Arc<[i32]> = Arc::from(unique);
3927 /// assert_eq!(&[1, 2, 3], &shared[..]);
3928 /// ```
3929 #[inline]
3930 fn from(v: Vec<T, A>) -> Arc<[T], A> {
3931 unsafe {
3932 let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3933
3934 let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3935 ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3936
3937 // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3938 // without dropping its contents or the allocator
3939 let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3940
3941 Self::from_ptr_in(rc_ptr, alloc)
3942 }
3943 }
3944}
3945
3946#[stable(feature = "shared_from_cow", since = "1.45.0")]
3947impl<'a, B> From<Cow<'a, B>> for Arc<B>
3948where
3949 B: ToOwned + ?Sized,
3950 Arc<B>: From<&'a B> + From<B::Owned>,
3951{
3952 /// Creates an atomically reference-counted pointer from a clone-on-write
3953 /// pointer by copying its content.
3954 ///
3955 /// # Example
3956 ///
3957 /// ```rust
3958 /// # use std::sync::Arc;
3959 /// # use std::borrow::Cow;
3960 /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3961 /// let shared: Arc<str> = Arc::from(cow);
3962 /// assert_eq!("eggplant", &shared[..]);
3963 /// ```
3964 #[inline]
3965 fn from(cow: Cow<'a, B>) -> Arc<B> {
3966 match cow {
3967 Cow::Borrowed(s) => Arc::from(s),
3968 Cow::Owned(s) => Arc::from(s),
3969 }
3970 }
3971}
3972
3973#[stable(feature = "shared_from_str", since = "1.62.0")]
3974impl From<Arc<str>> for Arc<[u8]> {
3975 /// Converts an atomically reference-counted string slice into a byte slice.
3976 ///
3977 /// # Example
3978 ///
3979 /// ```
3980 /// # use std::sync::Arc;
3981 /// let string: Arc<str> = Arc::from("eggplant");
3982 /// let bytes: Arc<[u8]> = Arc::from(string);
3983 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3984 /// ```
3985 #[inline]
3986 fn from(rc: Arc<str>) -> Self {
3987 // SAFETY: `str` has the same layout as `[u8]`.
3988 unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3989 }
3990}
3991
3992#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3993impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3994 type Error = Arc<[T], A>;
3995
3996 fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3997 if boxed_slice.len() == N {
3998 let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3999 Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
4000 } else {
4001 Err(boxed_slice)
4002 }
4003 }
4004}
4005
4006#[cfg(not(no_global_oom_handling))]
4007#[stable(feature = "shared_from_iter", since = "1.37.0")]
4008impl<T> FromIterator<T> for Arc<[T]> {
4009 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
4010 ///
4011 /// # Performance characteristics
4012 ///
4013 /// ## The general case
4014 ///
4015 /// In the general case, collecting into `Arc<[T]>` is done by first
4016 /// collecting into a `Vec<T>`. That is, when writing the following:
4017 ///
4018 /// ```rust
4019 /// # use std::sync::Arc;
4020 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
4021 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4022 /// ```
4023 ///
4024 /// this behaves as if we wrote:
4025 ///
4026 /// ```rust
4027 /// # use std::sync::Arc;
4028 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
4029 /// .collect::<Vec<_>>() // The first set of allocations happens here.
4030 /// .into(); // A second allocation for `Arc<[T]>` happens here.
4031 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4032 /// ```
4033 ///
4034 /// This will allocate as many times as needed for constructing the `Vec<T>`
4035 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
4036 ///
4037 /// ## Iterators of known length
4038 ///
4039 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
4040 /// a single allocation will be made for the `Arc<[T]>`. For example:
4041 ///
4042 /// ```rust
4043 /// # use std::sync::Arc;
4044 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
4045 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
4046 /// ```
4047 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
4048 ToArcSlice::to_arc_slice(iter.into_iter())
4049 }
4050}
4051
4052#[cfg(not(no_global_oom_handling))]
4053/// Specialization trait used for collecting into `Arc<[T]>`.
4054trait ToArcSlice<T>: Iterator<Item = T> + Sized {
4055 fn to_arc_slice(self) -> Arc<[T]>;
4056}
4057
4058#[cfg(not(no_global_oom_handling))]
4059impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
4060 default fn to_arc_slice(self) -> Arc<[T]> {
4061 self.collect::<Vec<T>>().into()
4062 }
4063}
4064
4065#[cfg(not(no_global_oom_handling))]
4066impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
4067 fn to_arc_slice(self) -> Arc<[T]> {
4068 // This is the case for a `TrustedLen` iterator.
4069 let (low, high) = self.size_hint();
4070 if let Some(high) = high {
4071 debug_assert_eq!(
4072 low,
4073 high,
4074 "TrustedLen iterator's size hint is not exact: {:?}",
4075 (low, high)
4076 );
4077
4078 unsafe {
4079 // SAFETY: We need to ensure that the iterator has an exact length and we have.
4080 Arc::from_iter_exact(self, low)
4081 }
4082 } else {
4083 // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
4084 // length exceeding `usize::MAX`.
4085 // The default implementation would collect into a vec which would panic.
4086 // Thus we panic here immediately without invoking `Vec` code.
4087 panic!("capacity overflow");
4088 }
4089 }
4090}
4091
4092#[stable(feature = "rust1", since = "1.0.0")]
4093impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4094 fn borrow(&self) -> &T {
4095 &**self
4096 }
4097}
4098
4099#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4100impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4101 fn as_ref(&self) -> &T {
4102 &**self
4103 }
4104}
4105
4106#[stable(feature = "pin", since = "1.33.0")]
4107impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4108
4109/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4110///
4111/// # Safety
4112///
4113/// The pointer must point to (and have valid metadata for) a previously
4114/// valid instance of T, but the T is allowed to be dropped.
4115unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4116 // Align the unsized value to the end of the ArcInner.
4117 // Because ArcInner is repr(C), it will always be the last field in memory.
4118 // SAFETY: since the only unsized types possible are slices, trait objects,
4119 // and extern types, the input safety requirement is currently enough to
4120 // satisfy the requirements of align_of_val_raw; this is an implementation
4121 // detail of the language that must not be relied upon outside of std.
4122 unsafe { data_offset_align(align_of_val_raw(ptr)) }
4123}
4124
4125#[inline]
4126fn data_offset_align(align: usize) -> usize {
4127 let layout = Layout::new::<ArcInner<()>>();
4128 layout.size() + layout.padding_needed_for(align)
4129}
4130
4131/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4132/// but will deallocate it (without dropping the value) when dropped.
4133///
4134/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4135#[cfg(not(no_global_oom_handling))]
4136struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4137 ptr: NonNull<ArcInner<T>>,
4138 layout_for_value: Layout,
4139 alloc: Option<A>,
4140}
4141
4142#[cfg(not(no_global_oom_handling))]
4143impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4144 /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4145 fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4146 let layout = Layout::for_value(for_value);
4147 let ptr = unsafe {
4148 Arc::allocate_for_layout(
4149 layout,
4150 |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4151 |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4152 )
4153 };
4154 Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4155 }
4156
4157 /// Returns the pointer to be written into to initialize the [`Arc`].
4158 fn data_ptr(&mut self) -> *mut T {
4159 let offset = data_offset_align(self.layout_for_value.align());
4160 unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4161 }
4162
4163 /// Upgrade this into a normal [`Arc`].
4164 ///
4165 /// # Safety
4166 ///
4167 /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4168 unsafe fn into_arc(self) -> Arc<T, A> {
4169 let mut this = ManuallyDrop::new(self);
4170 let ptr = this.ptr.as_ptr();
4171 let alloc = this.alloc.take().unwrap();
4172
4173 // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4174 // for having initialized the data.
4175 unsafe { Arc::from_ptr_in(ptr, alloc) }
4176 }
4177}
4178
4179#[cfg(not(no_global_oom_handling))]
4180impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4181 fn drop(&mut self) {
4182 // SAFETY:
4183 // * new() produced a pointer safe to deallocate.
4184 // * We own the pointer unless into_arc() was called, which forgets us.
4185 unsafe {
4186 self.alloc.take().unwrap().deallocate(
4187 self.ptr.cast(),
4188 arcinner_layout_for_value_layout(self.layout_for_value),
4189 );
4190 }
4191 }
4192}
4193
4194#[stable(feature = "arc_error", since = "1.52.0")]
4195impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4196 #[allow(deprecated)]
4197 fn cause(&self) -> Option<&dyn core::error::Error> {
4198 core::error::Error::cause(&**self)
4199 }
4200
4201 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4202 core::error::Error::source(&**self)
4203 }
4204
4205 fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4206 core::error::Error::provide(&**self, req);
4207 }
4208}
4209
4210/// A uniquely owned [`Arc`].
4211///
4212/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4213/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4214/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4215///
4216/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4217/// use case is to have an object be mutable during its initialization phase but then have it become
4218/// immutable and converted to a normal `Arc`.
4219///
4220/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4221///
4222/// ```
4223/// #![feature(unique_rc_arc)]
4224/// use std::sync::{Arc, Weak, UniqueArc};
4225///
4226/// struct Gadget {
4227/// me: Weak<Gadget>,
4228/// }
4229///
4230/// fn create_gadget() -> Option<Arc<Gadget>> {
4231/// let mut rc = UniqueArc::new(Gadget {
4232/// me: Weak::new(),
4233/// });
4234/// rc.me = UniqueArc::downgrade(&rc);
4235/// Some(UniqueArc::into_arc(rc))
4236/// }
4237///
4238/// create_gadget().unwrap();
4239/// ```
4240///
4241/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4242/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4243/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4244/// including fallible or async constructors.
4245#[unstable(feature = "unique_rc_arc", issue = "112566")]
4246pub struct UniqueArc<
4247 T: ?Sized,
4248 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4249> {
4250 ptr: NonNull<ArcInner<T>>,
4251 // Define the ownership of `ArcInner<T>` for drop-check
4252 _marker: PhantomData<ArcInner<T>>,
4253 // Invariance is necessary for soundness: once other `Weak`
4254 // references exist, we already have a form of shared mutability!
4255 _marker2: PhantomData<*mut T>,
4256 alloc: A,
4257}
4258
4259#[unstable(feature = "unique_rc_arc", issue = "112566")]
4260unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4261
4262#[unstable(feature = "unique_rc_arc", issue = "112566")]
4263unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4264
4265#[unstable(feature = "unique_rc_arc", issue = "112566")]
4266// #[unstable(feature = "coerce_unsized", issue = "18598")]
4267impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4268 for UniqueArc<T, A>
4269{
4270}
4271
4272//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4273#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4274impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4275
4276#[unstable(feature = "unique_rc_arc", issue = "112566")]
4277impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4278 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4279 fmt::Display::fmt(&**self, f)
4280 }
4281}
4282
4283#[unstable(feature = "unique_rc_arc", issue = "112566")]
4284impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4285 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4286 fmt::Debug::fmt(&**self, f)
4287 }
4288}
4289
4290#[unstable(feature = "unique_rc_arc", issue = "112566")]
4291impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4292 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4293 fmt::Pointer::fmt(&(&raw const **self), f)
4294 }
4295}
4296
4297#[unstable(feature = "unique_rc_arc", issue = "112566")]
4298impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4299 fn borrow(&self) -> &T {
4300 &**self
4301 }
4302}
4303
4304#[unstable(feature = "unique_rc_arc", issue = "112566")]
4305impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4306 fn borrow_mut(&mut self) -> &mut T {
4307 &mut **self
4308 }
4309}
4310
4311#[unstable(feature = "unique_rc_arc", issue = "112566")]
4312impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4313 fn as_ref(&self) -> &T {
4314 &**self
4315 }
4316}
4317
4318#[unstable(feature = "unique_rc_arc", issue = "112566")]
4319impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4320 fn as_mut(&mut self) -> &mut T {
4321 &mut **self
4322 }
4323}
4324
4325#[unstable(feature = "unique_rc_arc", issue = "112566")]
4326impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4327
4328#[unstable(feature = "unique_rc_arc", issue = "112566")]
4329impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4330 /// Equality for two `UniqueArc`s.
4331 ///
4332 /// Two `UniqueArc`s are equal if their inner values are equal.
4333 ///
4334 /// # Examples
4335 ///
4336 /// ```
4337 /// #![feature(unique_rc_arc)]
4338 /// use std::sync::UniqueArc;
4339 ///
4340 /// let five = UniqueArc::new(5);
4341 ///
4342 /// assert!(five == UniqueArc::new(5));
4343 /// ```
4344 #[inline]
4345 fn eq(&self, other: &Self) -> bool {
4346 PartialEq::eq(&**self, &**other)
4347 }
4348}
4349
4350#[unstable(feature = "unique_rc_arc", issue = "112566")]
4351impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4352 /// Partial comparison for two `UniqueArc`s.
4353 ///
4354 /// The two are compared by calling `partial_cmp()` on their inner values.
4355 ///
4356 /// # Examples
4357 ///
4358 /// ```
4359 /// #![feature(unique_rc_arc)]
4360 /// use std::sync::UniqueArc;
4361 /// use std::cmp::Ordering;
4362 ///
4363 /// let five = UniqueArc::new(5);
4364 ///
4365 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4366 /// ```
4367 #[inline(always)]
4368 fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4369 (**self).partial_cmp(&**other)
4370 }
4371
4372 /// Less-than comparison for two `UniqueArc`s.
4373 ///
4374 /// The two are compared by calling `<` on their inner values.
4375 ///
4376 /// # Examples
4377 ///
4378 /// ```
4379 /// #![feature(unique_rc_arc)]
4380 /// use std::sync::UniqueArc;
4381 ///
4382 /// let five = UniqueArc::new(5);
4383 ///
4384 /// assert!(five < UniqueArc::new(6));
4385 /// ```
4386 #[inline(always)]
4387 fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4388 **self < **other
4389 }
4390
4391 /// 'Less than or equal to' comparison for two `UniqueArc`s.
4392 ///
4393 /// The two are compared by calling `<=` on their inner values.
4394 ///
4395 /// # Examples
4396 ///
4397 /// ```
4398 /// #![feature(unique_rc_arc)]
4399 /// use std::sync::UniqueArc;
4400 ///
4401 /// let five = UniqueArc::new(5);
4402 ///
4403 /// assert!(five <= UniqueArc::new(5));
4404 /// ```
4405 #[inline(always)]
4406 fn le(&self, other: &UniqueArc<T, A>) -> bool {
4407 **self <= **other
4408 }
4409
4410 /// Greater-than comparison for two `UniqueArc`s.
4411 ///
4412 /// The two are compared by calling `>` on their inner values.
4413 ///
4414 /// # Examples
4415 ///
4416 /// ```
4417 /// #![feature(unique_rc_arc)]
4418 /// use std::sync::UniqueArc;
4419 ///
4420 /// let five = UniqueArc::new(5);
4421 ///
4422 /// assert!(five > UniqueArc::new(4));
4423 /// ```
4424 #[inline(always)]
4425 fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4426 **self > **other
4427 }
4428
4429 /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4430 ///
4431 /// The two are compared by calling `>=` on their inner values.
4432 ///
4433 /// # Examples
4434 ///
4435 /// ```
4436 /// #![feature(unique_rc_arc)]
4437 /// use std::sync::UniqueArc;
4438 ///
4439 /// let five = UniqueArc::new(5);
4440 ///
4441 /// assert!(five >= UniqueArc::new(5));
4442 /// ```
4443 #[inline(always)]
4444 fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4445 **self >= **other
4446 }
4447}
4448
4449#[unstable(feature = "unique_rc_arc", issue = "112566")]
4450impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4451 /// Comparison for two `UniqueArc`s.
4452 ///
4453 /// The two are compared by calling `cmp()` on their inner values.
4454 ///
4455 /// # Examples
4456 ///
4457 /// ```
4458 /// #![feature(unique_rc_arc)]
4459 /// use std::sync::UniqueArc;
4460 /// use std::cmp::Ordering;
4461 ///
4462 /// let five = UniqueArc::new(5);
4463 ///
4464 /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4465 /// ```
4466 #[inline]
4467 fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4468 (**self).cmp(&**other)
4469 }
4470}
4471
4472#[unstable(feature = "unique_rc_arc", issue = "112566")]
4473impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4474
4475#[unstable(feature = "unique_rc_arc", issue = "112566")]
4476impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4477 fn hash<H: Hasher>(&self, state: &mut H) {
4478 (**self).hash(state);
4479 }
4480}
4481
4482impl<T> UniqueArc<T, Global> {
4483 /// Creates a new `UniqueArc`.
4484 ///
4485 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4486 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4487 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4488 /// point to the new [`Arc`].
4489 #[cfg(not(no_global_oom_handling))]
4490 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4491 #[must_use]
4492 pub fn new(value: T) -> Self {
4493 Self::new_in(value, Global)
4494 }
4495
4496 /// Maps the value in a `UniqueArc`, reusing the allocation if possible.
4497 ///
4498 /// `f` is called on a reference to the value in the `UniqueArc`, and the result is returned,
4499 /// also in a `UniqueArc`.
4500 ///
4501 /// Note: this is an associated function, which means that you have
4502 /// to call it as `UniqueArc::map(u, f)` instead of `u.map(f)`. This
4503 /// is so that there is no conflict with a method on the inner type.
4504 ///
4505 /// # Examples
4506 ///
4507 /// ```
4508 /// #![feature(smart_pointer_try_map)]
4509 /// #![feature(unique_rc_arc)]
4510 ///
4511 /// use std::sync::UniqueArc;
4512 ///
4513 /// let r = UniqueArc::new(7);
4514 /// let new = UniqueArc::map(r, |i| i + 7);
4515 /// assert_eq!(*new, 14);
4516 /// ```
4517 #[cfg(not(no_global_oom_handling))]
4518 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4519 pub fn map<U>(this: Self, f: impl FnOnce(T) -> U) -> UniqueArc<U> {
4520 if size_of::<T>() == size_of::<U>()
4521 && align_of::<T>() == align_of::<U>()
4522 && UniqueArc::weak_count(&this) == 0
4523 {
4524 unsafe {
4525 let ptr = UniqueArc::into_raw(this);
4526 let value = ptr.read();
4527 let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
4528
4529 allocation.write(f(value));
4530 allocation.assume_init()
4531 }
4532 } else {
4533 UniqueArc::new(f(UniqueArc::unwrap(this)))
4534 }
4535 }
4536
4537 /// Attempts to map the value in a `UniqueArc`, reusing the allocation if possible.
4538 ///
4539 /// `f` is called on a reference to the value in the `UniqueArc`, and if the operation succeeds,
4540 /// the result is returned, also in a `UniqueArc`.
4541 ///
4542 /// Note: this is an associated function, which means that you have
4543 /// to call it as `UniqueArc::try_map(u, f)` instead of `u.try_map(f)`. This
4544 /// is so that there is no conflict with a method on the inner type.
4545 ///
4546 /// # Examples
4547 ///
4548 /// ```
4549 /// #![feature(smart_pointer_try_map)]
4550 /// #![feature(unique_rc_arc)]
4551 ///
4552 /// use std::sync::UniqueArc;
4553 ///
4554 /// let b = UniqueArc::new(7);
4555 /// let new = UniqueArc::try_map(b, u32::try_from).unwrap();
4556 /// assert_eq!(*new, 7);
4557 /// ```
4558 #[cfg(not(no_global_oom_handling))]
4559 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4560 pub fn try_map<R>(
4561 this: Self,
4562 f: impl FnOnce(T) -> R,
4563 ) -> <R::Residual as Residual<UniqueArc<R::Output>>>::TryType
4564 where
4565 R: Try,
4566 R::Residual: Residual<UniqueArc<R::Output>>,
4567 {
4568 if size_of::<T>() == size_of::<R::Output>()
4569 && align_of::<T>() == align_of::<R::Output>()
4570 && UniqueArc::weak_count(&this) == 0
4571 {
4572 unsafe {
4573 let ptr = UniqueArc::into_raw(this);
4574 let value = ptr.read();
4575 let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
4576
4577 allocation.write(f(value)?);
4578 try { allocation.assume_init() }
4579 }
4580 } else {
4581 try { UniqueArc::new(f(UniqueArc::unwrap(this))?) }
4582 }
4583 }
4584
4585 #[cfg(not(no_global_oom_handling))]
4586 fn unwrap(this: Self) -> T {
4587 let this = ManuallyDrop::new(this);
4588 let val: T = unsafe { ptr::read(&**this) };
4589
4590 let _weak = Weak { ptr: this.ptr, alloc: Global };
4591
4592 val
4593 }
4594}
4595
4596impl<T: ?Sized> UniqueArc<T> {
4597 #[cfg(not(no_global_oom_handling))]
4598 unsafe fn from_raw(ptr: *const T) -> Self {
4599 let offset = unsafe { data_offset(ptr) };
4600
4601 // Reverse the offset to find the original ArcInner.
4602 let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> };
4603
4604 Self {
4605 ptr: unsafe { NonNull::new_unchecked(rc_ptr) },
4606 _marker: PhantomData,
4607 _marker2: PhantomData,
4608 alloc: Global,
4609 }
4610 }
4611
4612 #[cfg(not(no_global_oom_handling))]
4613 fn into_raw(this: Self) -> *const T {
4614 let this = ManuallyDrop::new(this);
4615 Self::as_ptr(&*this)
4616 }
4617}
4618
4619impl<T, A: Allocator> UniqueArc<T, A> {
4620 /// Creates a new `UniqueArc` in the provided allocator.
4621 ///
4622 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4623 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4624 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4625 /// point to the new [`Arc`].
4626 #[cfg(not(no_global_oom_handling))]
4627 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4628 #[must_use]
4629 // #[unstable(feature = "allocator_api", issue = "32838")]
4630 pub fn new_in(data: T, alloc: A) -> Self {
4631 let (ptr, alloc) = Box::into_unique(Box::new_in(
4632 ArcInner {
4633 strong: atomic::AtomicUsize::new(0),
4634 // keep one weak reference so if all the weak pointers that are created are dropped
4635 // the UniqueArc still stays valid.
4636 weak: atomic::AtomicUsize::new(1),
4637 data,
4638 },
4639 alloc,
4640 ));
4641 Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4642 }
4643}
4644
4645impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4646 /// Converts the `UniqueArc` into a regular [`Arc`].
4647 ///
4648 /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4649 /// is passed to `into_arc`.
4650 ///
4651 /// Any weak references created before this method is called can now be upgraded to strong
4652 /// references.
4653 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4654 #[must_use]
4655 pub fn into_arc(this: Self) -> Arc<T, A> {
4656 let this = ManuallyDrop::new(this);
4657
4658 // Move the allocator out.
4659 // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4660 // a `ManuallyDrop`.
4661 let alloc: A = unsafe { ptr::read(&this.alloc) };
4662
4663 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4664 unsafe {
4665 // Convert our weak reference into a strong reference
4666 (*this.ptr.as_ptr()).strong.store(1, Release);
4667 Arc::from_inner_in(this.ptr, alloc)
4668 }
4669 }
4670
4671 #[cfg(not(no_global_oom_handling))]
4672 fn weak_count(this: &Self) -> usize {
4673 this.inner().weak.load(Acquire) - 1
4674 }
4675
4676 #[cfg(not(no_global_oom_handling))]
4677 fn inner(&self) -> &ArcInner<T> {
4678 // SAFETY: while this UniqueArc is alive we're guaranteed that the inner pointer is valid.
4679 unsafe { self.ptr.as_ref() }
4680 }
4681
4682 #[cfg(not(no_global_oom_handling))]
4683 fn as_ptr(this: &Self) -> *const T {
4684 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
4685
4686 // SAFETY: This cannot go through Deref::deref or UniqueArc::inner because
4687 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
4688 // write through the pointer after the Rc is recovered through `from_raw`.
4689 unsafe { &raw mut (*ptr).data }
4690 }
4691
4692 #[inline]
4693 #[cfg(not(no_global_oom_handling))]
4694 fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
4695 let this = mem::ManuallyDrop::new(this);
4696 (this.ptr, unsafe { ptr::read(&this.alloc) })
4697 }
4698
4699 #[inline]
4700 #[cfg(not(no_global_oom_handling))]
4701 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
4702 Self { ptr, _marker: PhantomData, _marker2: PhantomData, alloc }
4703 }
4704}
4705
4706impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4707 /// Creates a new weak reference to the `UniqueArc`.
4708 ///
4709 /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4710 /// to a [`Arc`] using [`UniqueArc::into_arc`].
4711 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4712 #[must_use]
4713 pub fn downgrade(this: &Self) -> Weak<T, A> {
4714 // Using a relaxed ordering is alright here, as knowledge of the
4715 // original reference prevents other threads from erroneously deleting
4716 // the object or converting the object to a normal `Arc<T, A>`.
4717 //
4718 // Note that we don't need to test if the weak counter is locked because there
4719 // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4720 // the weak counter.
4721 //
4722 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4723 let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4724
4725 // See comments in Arc::clone() for why we do this (for mem::forget).
4726 if old_size > MAX_REFCOUNT {
4727 abort();
4728 }
4729
4730 Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4731 }
4732}
4733
4734#[cfg(not(no_global_oom_handling))]
4735impl<T, A: Allocator> UniqueArc<mem::MaybeUninit<T>, A> {
4736 unsafe fn assume_init(self) -> UniqueArc<T, A> {
4737 let (ptr, alloc) = UniqueArc::into_inner_with_allocator(self);
4738 unsafe { UniqueArc::from_inner_in(ptr.cast(), alloc) }
4739 }
4740}
4741
4742#[unstable(feature = "unique_rc_arc", issue = "112566")]
4743impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4744 type Target = T;
4745
4746 fn deref(&self) -> &T {
4747 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4748 unsafe { &self.ptr.as_ref().data }
4749 }
4750}
4751
4752// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4753#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4754unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4755
4756#[unstable(feature = "unique_rc_arc", issue = "112566")]
4757impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4758 fn deref_mut(&mut self) -> &mut T {
4759 // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4760 // have unique ownership and therefore it's safe to make a mutable reference because
4761 // `UniqueArc` owns the only strong reference to itself.
4762 // We also need to be careful to only create a mutable reference to the `data` field,
4763 // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4764 // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4765 unsafe { &mut (*self.ptr.as_ptr()).data }
4766 }
4767}
4768
4769#[unstable(feature = "unique_rc_arc", issue = "112566")]
4770// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4771unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4772
4773#[unstable(feature = "unique_rc_arc", issue = "112566")]
4774unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4775 fn drop(&mut self) {
4776 // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4777 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4778 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4779
4780 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4781 }
4782}