Thanks to visit codestin.com
Credit goes to doc.rust-lang.org

alloc/
sync.rs

1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12#[cfg(not(no_global_oom_handling))]
13use core::clone::CloneToUninit;
14use core::clone::UseCloned;
15use core::cmp::Ordering;
16use core::hash::{Hash, Hasher};
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21use core::mem::{self, ManuallyDrop, align_of_val_raw};
22use core::num::NonZeroUsize;
23use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
24use core::panic::{RefUnwindSafe, UnwindSafe};
25use core::pin::{Pin, PinCoerceUnsized};
26use core::ptr::{self, NonNull};
27#[cfg(not(no_global_oom_handling))]
28use core::slice::from_raw_parts_mut;
29use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
30use core::sync::atomic::{self, Atomic};
31use core::{borrow, fmt, hint};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35use crate::alloc::{AllocError, Allocator, Global, Layout};
36use crate::borrow::{Cow, ToOwned};
37use crate::boxed::Box;
38use crate::rc::is_dangling;
39#[cfg(not(no_global_oom_handling))]
40use crate::string::String;
41#[cfg(not(no_global_oom_handling))]
42use crate::vec::Vec;
43
44/// A soft limit on the amount of references that may be made to an `Arc`.
45///
46/// Going above this limit will abort your program (although not
47/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
48/// Trying to go above it might call a `panic` (if not actually going above it).
49///
50/// This is a global invariant, and also applies when using a compare-exchange loop.
51///
52/// See comment in `Arc::clone`.
53const MAX_REFCOUNT: usize = (isize::MAX) as usize;
54
55/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
56const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
57
58#[cfg(not(sanitize = "thread"))]
59macro_rules! acquire {
60    ($x:expr) => {
61        atomic::fence(Acquire)
62    };
63}
64
65// ThreadSanitizer does not support memory fences. To avoid false positive
66// reports in Arc / Weak implementation use atomic loads for synchronization
67// instead.
68#[cfg(sanitize = "thread")]
69macro_rules! acquire {
70    ($x:expr) => {
71        $x.load(Acquire)
72    };
73}
74
75/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
76/// Reference Counted'.
77///
78/// The type `Arc<T>` provides shared ownership of a value of type `T`,
79/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
80/// a new `Arc` instance, which points to the same allocation on the heap as the
81/// source `Arc`, while increasing a reference count. When the last `Arc`
82/// pointer to a given allocation is destroyed, the value stored in that allocation (often
83/// referred to as "inner value") is also dropped.
84///
85/// Shared references in Rust disallow mutation by default, and `Arc` is no
86/// exception: you cannot generally obtain a mutable reference to something
87/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
88///
89/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
90///    [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
91///
92/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
93///    without requiring interior mutability. This approach clones the data only when
94///    needed (when there are multiple references) and can be more efficient when mutations
95///    are infrequent.
96///
97/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
98///    which provides direct mutable access to the inner value without any cloning.
99///
100/// ```
101/// use std::sync::Arc;
102///
103/// let mut data = Arc::new(vec![1, 2, 3]);
104///
105/// // This will clone the vector only if there are other references to it
106/// Arc::make_mut(&mut data).push(4);
107///
108/// assert_eq!(*data, vec![1, 2, 3, 4]);
109/// ```
110///
111/// **Note**: This type is only available on platforms that support atomic
112/// loads and stores of pointers, which includes all platforms that support
113/// the `std` crate but not all those which only support [`alloc`](crate).
114/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
115///
116/// ## Thread Safety
117///
118/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
119/// counting. This means that it is thread-safe. The disadvantage is that
120/// atomic operations are more expensive than ordinary memory accesses. If you
121/// are not sharing reference-counted allocations between threads, consider using
122/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
123/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
124/// However, a library might choose `Arc<T>` in order to give library consumers
125/// more flexibility.
126///
127/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
128/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
129/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
130/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
131/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
132/// data, but it  doesn't add thread safety to its data. Consider
133/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
134/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
135/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
136/// non-atomic operations.
137///
138/// In the end, this means that you may need to pair `Arc<T>` with some sort of
139/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
140///
141/// ## Breaking cycles with `Weak`
142///
143/// The [`downgrade`][downgrade] method can be used to create a non-owning
144/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
145/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
146/// already been dropped. In other words, `Weak` pointers do not keep the value
147/// inside the allocation alive; however, they *do* keep the allocation
148/// (the backing store for the value) alive.
149///
150/// A cycle between `Arc` pointers will never be deallocated. For this reason,
151/// [`Weak`] is used to break cycles. For example, a tree could have
152/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
153/// pointers from children back to their parents.
154///
155/// # Cloning references
156///
157/// Creating a new reference from an existing reference-counted pointer is done using the
158/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
159///
160/// ```
161/// use std::sync::Arc;
162/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
163/// // The two syntaxes below are equivalent.
164/// let a = foo.clone();
165/// let b = Arc::clone(&foo);
166/// // a, b, and foo are all Arcs that point to the same memory location
167/// ```
168///
169/// ## `Deref` behavior
170///
171/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
172/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
173/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
174/// functions, called using [fully qualified syntax]:
175///
176/// ```
177/// use std::sync::Arc;
178///
179/// let my_arc = Arc::new(());
180/// let my_weak = Arc::downgrade(&my_arc);
181/// ```
182///
183/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
184/// fully qualified syntax. Some people prefer to use fully qualified syntax,
185/// while others prefer using method-call syntax.
186///
187/// ```
188/// use std::sync::Arc;
189///
190/// let arc = Arc::new(());
191/// // Method-call syntax
192/// let arc2 = arc.clone();
193/// // Fully qualified syntax
194/// let arc3 = Arc::clone(&arc);
195/// ```
196///
197/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
198/// already been dropped.
199///
200/// [`Rc<T>`]: crate::rc::Rc
201/// [clone]: Clone::clone
202/// [mutex]: ../../std/sync/struct.Mutex.html
203/// [rwlock]: ../../std/sync/struct.RwLock.html
204/// [atomic]: core::sync::atomic
205/// [downgrade]: Arc::downgrade
206/// [upgrade]: Weak::upgrade
207/// [RefCell\<T>]: core::cell::RefCell
208/// [`RefCell<T>`]: core::cell::RefCell
209/// [`std::sync`]: ../../std/sync/index.html
210/// [`Arc::clone(&from)`]: Arc::clone
211/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
212///
213/// # Examples
214///
215/// Sharing some immutable data between threads:
216///
217/// ```
218/// use std::sync::Arc;
219/// use std::thread;
220///
221/// let five = Arc::new(5);
222///
223/// for _ in 0..10 {
224///     let five = Arc::clone(&five);
225///
226///     thread::spawn(move || {
227///         println!("{five:?}");
228///     });
229/// }
230/// ```
231///
232/// Sharing a mutable [`AtomicUsize`]:
233///
234/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
235///
236/// ```
237/// use std::sync::Arc;
238/// use std::sync::atomic::{AtomicUsize, Ordering};
239/// use std::thread;
240///
241/// let val = Arc::new(AtomicUsize::new(5));
242///
243/// for _ in 0..10 {
244///     let val = Arc::clone(&val);
245///
246///     thread::spawn(move || {
247///         let v = val.fetch_add(1, Ordering::Relaxed);
248///         println!("{v:?}");
249///     });
250/// }
251/// ```
252///
253/// See the [`rc` documentation][rc_examples] for more examples of reference
254/// counting in general.
255///
256/// [rc_examples]: crate::rc#examples
257#[doc(search_unbox)]
258#[rustc_diagnostic_item = "Arc"]
259#[stable(feature = "rust1", since = "1.0.0")]
260#[rustc_insignificant_dtor]
261pub struct Arc<
262    T: ?Sized,
263    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
264> {
265    ptr: NonNull<ArcInner<T>>,
266    phantom: PhantomData<ArcInner<T>>,
267    alloc: A,
268}
269
270#[stable(feature = "rust1", since = "1.0.0")]
271unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
272#[stable(feature = "rust1", since = "1.0.0")]
273unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
274
275#[stable(feature = "catch_unwind", since = "1.9.0")]
276impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
277
278#[unstable(feature = "coerce_unsized", issue = "18598")]
279impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
280
281#[unstable(feature = "dispatch_from_dyn", issue = "none")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
283
284impl<T: ?Sized> Arc<T> {
285    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
286        unsafe { Self::from_inner_in(ptr, Global) }
287    }
288
289    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
290        unsafe { Self::from_ptr_in(ptr, Global) }
291    }
292}
293
294impl<T: ?Sized, A: Allocator> Arc<T, A> {
295    #[inline]
296    fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
297        let this = mem::ManuallyDrop::new(this);
298        (this.ptr, unsafe { ptr::read(&this.alloc) })
299    }
300
301    #[inline]
302    unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
303        Self { ptr, phantom: PhantomData, alloc }
304    }
305
306    #[inline]
307    unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
308        unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
309    }
310}
311
312/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
313/// managed allocation.
314///
315/// The allocation is accessed by calling [`upgrade`] on the `Weak`
316/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
317///
318/// Since a `Weak` reference does not count towards ownership, it will not
319/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
320/// guarantees about the value still being present. Thus it may return [`None`]
321/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
322/// itself (the backing store) from being deallocated.
323///
324/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
325/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
326/// prevent circular references between [`Arc`] pointers, since mutual owning references
327/// would never allow either [`Arc`] to be dropped. For example, a tree could
328/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
329/// pointers from children back to their parents.
330///
331/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
332///
333/// [`upgrade`]: Weak::upgrade
334#[stable(feature = "arc_weak", since = "1.4.0")]
335#[rustc_diagnostic_item = "ArcWeak"]
336pub struct Weak<
337    T: ?Sized,
338    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
339> {
340    // This is a `NonNull` to allow optimizing the size of this type in enums,
341    // but it is not necessarily a valid pointer.
342    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
343    // to allocate space on the heap. That's not a value a real pointer
344    // will ever have because ArcInner has alignment at least 2.
345    ptr: NonNull<ArcInner<T>>,
346    alloc: A,
347}
348
349#[stable(feature = "arc_weak", since = "1.4.0")]
350unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
351#[stable(feature = "arc_weak", since = "1.4.0")]
352unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
353
354#[unstable(feature = "coerce_unsized", issue = "18598")]
355impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
356#[unstable(feature = "dispatch_from_dyn", issue = "none")]
357impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
358
359#[stable(feature = "arc_weak", since = "1.4.0")]
360impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
361    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
362        write!(f, "(Weak)")
363    }
364}
365
366// This is repr(C) to future-proof against possible field-reordering, which
367// would interfere with otherwise safe [into|from]_raw() of transmutable
368// inner types.
369// Unlike RcInner, repr(align(2)) is not strictly required because atomic types
370// have the alignment same as its size, but we use it for consistency and clarity.
371#[repr(C, align(2))]
372struct ArcInner<T: ?Sized> {
373    strong: Atomic<usize>,
374
375    // the value usize::MAX acts as a sentinel for temporarily "locking" the
376    // ability to upgrade weak pointers or downgrade strong ones; this is used
377    // to avoid races in `make_mut` and `get_mut`.
378    weak: Atomic<usize>,
379
380    data: T,
381}
382
383/// Calculate layout for `ArcInner<T>` using the inner value's layout
384fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
385    // Calculate layout using the given value layout.
386    // Previously, layout was calculated on the expression
387    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
388    // reference (see #54908).
389    Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
390}
391
392unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
393unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
394
395impl<T> Arc<T> {
396    /// Constructs a new `Arc<T>`.
397    ///
398    /// # Examples
399    ///
400    /// ```
401    /// use std::sync::Arc;
402    ///
403    /// let five = Arc::new(5);
404    /// ```
405    #[cfg(not(no_global_oom_handling))]
406    #[inline]
407    #[stable(feature = "rust1", since = "1.0.0")]
408    pub fn new(data: T) -> Arc<T> {
409        // Start the weak pointer count as 1 which is the weak pointer that's
410        // held by all the strong pointers (kinda), see std/rc.rs for more info
411        let x: Box<_> = Box::new(ArcInner {
412            strong: atomic::AtomicUsize::new(1),
413            weak: atomic::AtomicUsize::new(1),
414            data,
415        });
416        unsafe { Self::from_inner(Box::leak(x).into()) }
417    }
418
419    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
420    /// to allow you to construct a `T` which holds a weak pointer to itself.
421    ///
422    /// Generally, a structure circularly referencing itself, either directly or
423    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
424    /// Using this function, you get access to the weak pointer during the
425    /// initialization of `T`, before the `Arc<T>` is created, such that you can
426    /// clone and store it inside the `T`.
427    ///
428    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
429    /// then calls your closure, giving it a `Weak<T>` to this allocation,
430    /// and only afterwards completes the construction of the `Arc<T>` by placing
431    /// the `T` returned from your closure into the allocation.
432    ///
433    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
434    /// returns, calling [`upgrade`] on the weak reference inside your closure will
435    /// fail and result in a `None` value.
436    ///
437    /// # Panics
438    ///
439    /// If `data_fn` panics, the panic is propagated to the caller, and the
440    /// temporary [`Weak<T>`] is dropped normally.
441    ///
442    /// # Example
443    ///
444    /// ```
445    /// # #![allow(dead_code)]
446    /// use std::sync::{Arc, Weak};
447    ///
448    /// struct Gadget {
449    ///     me: Weak<Gadget>,
450    /// }
451    ///
452    /// impl Gadget {
453    ///     /// Constructs a reference counted Gadget.
454    ///     fn new() -> Arc<Self> {
455    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
456    ///         // `Arc` we're constructing.
457    ///         Arc::new_cyclic(|me| {
458    ///             // Create the actual struct here.
459    ///             Gadget { me: me.clone() }
460    ///         })
461    ///     }
462    ///
463    ///     /// Returns a reference counted pointer to Self.
464    ///     fn me(&self) -> Arc<Self> {
465    ///         self.me.upgrade().unwrap()
466    ///     }
467    /// }
468    /// ```
469    /// [`upgrade`]: Weak::upgrade
470    #[cfg(not(no_global_oom_handling))]
471    #[inline]
472    #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
473    pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
474    where
475        F: FnOnce(&Weak<T>) -> T,
476    {
477        Self::new_cyclic_in(data_fn, Global)
478    }
479
480    /// Constructs a new `Arc` with uninitialized contents.
481    ///
482    /// # Examples
483    ///
484    /// ```
485    /// use std::sync::Arc;
486    ///
487    /// let mut five = Arc::<u32>::new_uninit();
488    ///
489    /// // Deferred initialization:
490    /// Arc::get_mut(&mut five).unwrap().write(5);
491    ///
492    /// let five = unsafe { five.assume_init() };
493    ///
494    /// assert_eq!(*five, 5)
495    /// ```
496    #[cfg(not(no_global_oom_handling))]
497    #[inline]
498    #[stable(feature = "new_uninit", since = "1.82.0")]
499    #[must_use]
500    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
501        unsafe {
502            Arc::from_ptr(Arc::allocate_for_layout(
503                Layout::new::<T>(),
504                |layout| Global.allocate(layout),
505                <*mut u8>::cast,
506            ))
507        }
508    }
509
510    /// Constructs a new `Arc` with uninitialized contents, with the memory
511    /// being filled with `0` bytes.
512    ///
513    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
514    /// of this method.
515    ///
516    /// # Examples
517    ///
518    /// ```
519    /// use std::sync::Arc;
520    ///
521    /// let zero = Arc::<u32>::new_zeroed();
522    /// let zero = unsafe { zero.assume_init() };
523    ///
524    /// assert_eq!(*zero, 0)
525    /// ```
526    ///
527    /// [zeroed]: mem::MaybeUninit::zeroed
528    #[cfg(not(no_global_oom_handling))]
529    #[inline]
530    #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
531    #[must_use]
532    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
533        unsafe {
534            Arc::from_ptr(Arc::allocate_for_layout(
535                Layout::new::<T>(),
536                |layout| Global.allocate_zeroed(layout),
537                <*mut u8>::cast,
538            ))
539        }
540    }
541
542    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
543    /// `data` will be pinned in memory and unable to be moved.
544    #[cfg(not(no_global_oom_handling))]
545    #[stable(feature = "pin", since = "1.33.0")]
546    #[must_use]
547    pub fn pin(data: T) -> Pin<Arc<T>> {
548        unsafe { Pin::new_unchecked(Arc::new(data)) }
549    }
550
551    /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
552    #[unstable(feature = "allocator_api", issue = "32838")]
553    #[inline]
554    pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
555        unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
556    }
557
558    /// Constructs a new `Arc<T>`, returning an error if allocation fails.
559    ///
560    /// # Examples
561    ///
562    /// ```
563    /// #![feature(allocator_api)]
564    /// use std::sync::Arc;
565    ///
566    /// let five = Arc::try_new(5)?;
567    /// # Ok::<(), std::alloc::AllocError>(())
568    /// ```
569    #[unstable(feature = "allocator_api", issue = "32838")]
570    #[inline]
571    pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
572        // Start the weak pointer count as 1 which is the weak pointer that's
573        // held by all the strong pointers (kinda), see std/rc.rs for more info
574        let x: Box<_> = Box::try_new(ArcInner {
575            strong: atomic::AtomicUsize::new(1),
576            weak: atomic::AtomicUsize::new(1),
577            data,
578        })?;
579        unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
580    }
581
582    /// Constructs a new `Arc` with uninitialized contents, returning an error
583    /// if allocation fails.
584    ///
585    /// # Examples
586    ///
587    /// ```
588    /// #![feature(allocator_api)]
589    ///
590    /// use std::sync::Arc;
591    ///
592    /// let mut five = Arc::<u32>::try_new_uninit()?;
593    ///
594    /// // Deferred initialization:
595    /// Arc::get_mut(&mut five).unwrap().write(5);
596    ///
597    /// let five = unsafe { five.assume_init() };
598    ///
599    /// assert_eq!(*five, 5);
600    /// # Ok::<(), std::alloc::AllocError>(())
601    /// ```
602    #[unstable(feature = "allocator_api", issue = "32838")]
603    pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
604        unsafe {
605            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
606                Layout::new::<T>(),
607                |layout| Global.allocate(layout),
608                <*mut u8>::cast,
609            )?))
610        }
611    }
612
613    /// Constructs a new `Arc` with uninitialized contents, with the memory
614    /// being filled with `0` bytes, returning an error if allocation fails.
615    ///
616    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
617    /// of this method.
618    ///
619    /// # Examples
620    ///
621    /// ```
622    /// #![feature( allocator_api)]
623    ///
624    /// use std::sync::Arc;
625    ///
626    /// let zero = Arc::<u32>::try_new_zeroed()?;
627    /// let zero = unsafe { zero.assume_init() };
628    ///
629    /// assert_eq!(*zero, 0);
630    /// # Ok::<(), std::alloc::AllocError>(())
631    /// ```
632    ///
633    /// [zeroed]: mem::MaybeUninit::zeroed
634    #[unstable(feature = "allocator_api", issue = "32838")]
635    pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
636        unsafe {
637            Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
638                Layout::new::<T>(),
639                |layout| Global.allocate_zeroed(layout),
640                <*mut u8>::cast,
641            )?))
642        }
643    }
644}
645
646impl<T, A: Allocator> Arc<T, A> {
647    /// Constructs a new `Arc<T>` in the provided allocator.
648    ///
649    /// # Examples
650    ///
651    /// ```
652    /// #![feature(allocator_api)]
653    ///
654    /// use std::sync::Arc;
655    /// use std::alloc::System;
656    ///
657    /// let five = Arc::new_in(5, System);
658    /// ```
659    #[inline]
660    #[cfg(not(no_global_oom_handling))]
661    #[unstable(feature = "allocator_api", issue = "32838")]
662    pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
663        // Start the weak pointer count as 1 which is the weak pointer that's
664        // held by all the strong pointers (kinda), see std/rc.rs for more info
665        let x = Box::new_in(
666            ArcInner {
667                strong: atomic::AtomicUsize::new(1),
668                weak: atomic::AtomicUsize::new(1),
669                data,
670            },
671            alloc,
672        );
673        let (ptr, alloc) = Box::into_unique(x);
674        unsafe { Self::from_inner_in(ptr.into(), alloc) }
675    }
676
677    /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
678    ///
679    /// # Examples
680    ///
681    /// ```
682    /// #![feature(get_mut_unchecked)]
683    /// #![feature(allocator_api)]
684    ///
685    /// use std::sync::Arc;
686    /// use std::alloc::System;
687    ///
688    /// let mut five = Arc::<u32, _>::new_uninit_in(System);
689    ///
690    /// let five = unsafe {
691    ///     // Deferred initialization:
692    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
693    ///
694    ///     five.assume_init()
695    /// };
696    ///
697    /// assert_eq!(*five, 5)
698    /// ```
699    #[cfg(not(no_global_oom_handling))]
700    #[unstable(feature = "allocator_api", issue = "32838")]
701    #[inline]
702    pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
703        unsafe {
704            Arc::from_ptr_in(
705                Arc::allocate_for_layout(
706                    Layout::new::<T>(),
707                    |layout| alloc.allocate(layout),
708                    <*mut u8>::cast,
709                ),
710                alloc,
711            )
712        }
713    }
714
715    /// Constructs a new `Arc` with uninitialized contents, with the memory
716    /// being filled with `0` bytes, in the provided allocator.
717    ///
718    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
719    /// of this method.
720    ///
721    /// # Examples
722    ///
723    /// ```
724    /// #![feature(allocator_api)]
725    ///
726    /// use std::sync::Arc;
727    /// use std::alloc::System;
728    ///
729    /// let zero = Arc::<u32, _>::new_zeroed_in(System);
730    /// let zero = unsafe { zero.assume_init() };
731    ///
732    /// assert_eq!(*zero, 0)
733    /// ```
734    ///
735    /// [zeroed]: mem::MaybeUninit::zeroed
736    #[cfg(not(no_global_oom_handling))]
737    #[unstable(feature = "allocator_api", issue = "32838")]
738    #[inline]
739    pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
740        unsafe {
741            Arc::from_ptr_in(
742                Arc::allocate_for_layout(
743                    Layout::new::<T>(),
744                    |layout| alloc.allocate_zeroed(layout),
745                    <*mut u8>::cast,
746                ),
747                alloc,
748            )
749        }
750    }
751
752    /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
753    /// to allow you to construct a `T` which holds a weak pointer to itself.
754    ///
755    /// Generally, a structure circularly referencing itself, either directly or
756    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
757    /// Using this function, you get access to the weak pointer during the
758    /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
759    /// clone and store it inside the `T`.
760    ///
761    /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
762    /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
763    /// and only afterwards completes the construction of the `Arc<T, A>` by placing
764    /// the `T` returned from your closure into the allocation.
765    ///
766    /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
767    /// returns, calling [`upgrade`] on the weak reference inside your closure will
768    /// fail and result in a `None` value.
769    ///
770    /// # Panics
771    ///
772    /// If `data_fn` panics, the panic is propagated to the caller, and the
773    /// temporary [`Weak<T>`] is dropped normally.
774    ///
775    /// # Example
776    ///
777    /// See [`new_cyclic`]
778    ///
779    /// [`new_cyclic`]: Arc::new_cyclic
780    /// [`upgrade`]: Weak::upgrade
781    #[cfg(not(no_global_oom_handling))]
782    #[inline]
783    #[unstable(feature = "allocator_api", issue = "32838")]
784    pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
785    where
786        F: FnOnce(&Weak<T, A>) -> T,
787    {
788        // Construct the inner in the "uninitialized" state with a single
789        // weak reference.
790        let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
791            ArcInner {
792                strong: atomic::AtomicUsize::new(0),
793                weak: atomic::AtomicUsize::new(1),
794                data: mem::MaybeUninit::<T>::uninit(),
795            },
796            alloc,
797        ));
798        let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
799        let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
800
801        let weak = Weak { ptr: init_ptr, alloc };
802
803        // It's important we don't give up ownership of the weak pointer, or
804        // else the memory might be freed by the time `data_fn` returns. If
805        // we really wanted to pass ownership, we could create an additional
806        // weak pointer for ourselves, but this would result in additional
807        // updates to the weak reference count which might not be necessary
808        // otherwise.
809        let data = data_fn(&weak);
810
811        // Now we can properly initialize the inner value and turn our weak
812        // reference into a strong reference.
813        let strong = unsafe {
814            let inner = init_ptr.as_ptr();
815            ptr::write(&raw mut (*inner).data, data);
816
817            // The above write to the data field must be visible to any threads which
818            // observe a non-zero strong count. Therefore we need at least "Release" ordering
819            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
820            //
821            // "Acquire" ordering is not required. When considering the possible behaviors
822            // of `data_fn` we only need to look at what it could do with a reference to a
823            // non-upgradeable `Weak`:
824            // - It can *clone* the `Weak`, increasing the weak reference count.
825            // - It can drop those clones, decreasing the weak reference count (but never to zero).
826            //
827            // These side effects do not impact us in any way, and no other side effects are
828            // possible with safe code alone.
829            let prev_value = (*inner).strong.fetch_add(1, Release);
830            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
831
832            // Strong references should collectively own a shared weak reference,
833            // so don't run the destructor for our old weak reference.
834            // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
835            // and forgetting the weak reference.
836            let alloc = weak.into_raw_with_allocator().1;
837
838            Arc::from_inner_in(init_ptr, alloc)
839        };
840
841        strong
842    }
843
844    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
845    /// then `data` will be pinned in memory and unable to be moved.
846    #[cfg(not(no_global_oom_handling))]
847    #[unstable(feature = "allocator_api", issue = "32838")]
848    #[inline]
849    pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
850    where
851        A: 'static,
852    {
853        unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
854    }
855
856    /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
857    /// fails.
858    #[inline]
859    #[unstable(feature = "allocator_api", issue = "32838")]
860    pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
861    where
862        A: 'static,
863    {
864        unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
865    }
866
867    /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
868    ///
869    /// # Examples
870    ///
871    /// ```
872    /// #![feature(allocator_api)]
873    ///
874    /// use std::sync::Arc;
875    /// use std::alloc::System;
876    ///
877    /// let five = Arc::try_new_in(5, System)?;
878    /// # Ok::<(), std::alloc::AllocError>(())
879    /// ```
880    #[inline]
881    #[unstable(feature = "allocator_api", issue = "32838")]
882    #[inline]
883    pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
884        // Start the weak pointer count as 1 which is the weak pointer that's
885        // held by all the strong pointers (kinda), see std/rc.rs for more info
886        let x = Box::try_new_in(
887            ArcInner {
888                strong: atomic::AtomicUsize::new(1),
889                weak: atomic::AtomicUsize::new(1),
890                data,
891            },
892            alloc,
893        )?;
894        let (ptr, alloc) = Box::into_unique(x);
895        Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
896    }
897
898    /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
899    /// error if allocation fails.
900    ///
901    /// # Examples
902    ///
903    /// ```
904    /// #![feature(allocator_api)]
905    /// #![feature(get_mut_unchecked)]
906    ///
907    /// use std::sync::Arc;
908    /// use std::alloc::System;
909    ///
910    /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
911    ///
912    /// let five = unsafe {
913    ///     // Deferred initialization:
914    ///     Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
915    ///
916    ///     five.assume_init()
917    /// };
918    ///
919    /// assert_eq!(*five, 5);
920    /// # Ok::<(), std::alloc::AllocError>(())
921    /// ```
922    #[unstable(feature = "allocator_api", issue = "32838")]
923    #[inline]
924    pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
925        unsafe {
926            Ok(Arc::from_ptr_in(
927                Arc::try_allocate_for_layout(
928                    Layout::new::<T>(),
929                    |layout| alloc.allocate(layout),
930                    <*mut u8>::cast,
931                )?,
932                alloc,
933            ))
934        }
935    }
936
937    /// Constructs a new `Arc` with uninitialized contents, with the memory
938    /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
939    /// fails.
940    ///
941    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
942    /// of this method.
943    ///
944    /// # Examples
945    ///
946    /// ```
947    /// #![feature(allocator_api)]
948    ///
949    /// use std::sync::Arc;
950    /// use std::alloc::System;
951    ///
952    /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
953    /// let zero = unsafe { zero.assume_init() };
954    ///
955    /// assert_eq!(*zero, 0);
956    /// # Ok::<(), std::alloc::AllocError>(())
957    /// ```
958    ///
959    /// [zeroed]: mem::MaybeUninit::zeroed
960    #[unstable(feature = "allocator_api", issue = "32838")]
961    #[inline]
962    pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
963        unsafe {
964            Ok(Arc::from_ptr_in(
965                Arc::try_allocate_for_layout(
966                    Layout::new::<T>(),
967                    |layout| alloc.allocate_zeroed(layout),
968                    <*mut u8>::cast,
969                )?,
970                alloc,
971            ))
972        }
973    }
974    /// Returns the inner value, if the `Arc` has exactly one strong reference.
975    ///
976    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
977    /// passed in.
978    ///
979    /// This will succeed even if there are outstanding weak references.
980    ///
981    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
982    /// keep the `Arc` in the [`Err`] case.
983    /// Immediately dropping the [`Err`]-value, as the expression
984    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
985    /// drop to zero and the inner value of the `Arc` to be dropped.
986    /// For instance, if two threads execute such an expression in parallel,
987    /// there is a race condition without the possibility of unsafety:
988    /// The threads could first both check whether they own the last instance
989    /// in `Arc::try_unwrap`, determine that they both do not, and then both
990    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
991    /// In this scenario, the value inside the `Arc` is safely destroyed
992    /// by exactly one of the threads, but neither thread will ever be able
993    /// to use the value.
994    ///
995    /// # Examples
996    ///
997    /// ```
998    /// use std::sync::Arc;
999    ///
1000    /// let x = Arc::new(3);
1001    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1002    ///
1003    /// let x = Arc::new(4);
1004    /// let _y = Arc::clone(&x);
1005    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1006    /// ```
1007    #[inline]
1008    #[stable(feature = "arc_unique", since = "1.4.0")]
1009    pub fn try_unwrap(this: Self) -> Result<T, Self> {
1010        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1011            return Err(this);
1012        }
1013
1014        acquire!(this.inner().strong);
1015
1016        let this = ManuallyDrop::new(this);
1017        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1018        let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1019
1020        // Make a weak pointer to clean up the implicit strong-weak reference
1021        let _weak = Weak { ptr: this.ptr, alloc };
1022
1023        Ok(elem)
1024    }
1025
1026    /// Returns the inner value, if the `Arc` has exactly one strong reference.
1027    ///
1028    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1029    ///
1030    /// This will succeed even if there are outstanding weak references.
1031    ///
1032    /// If `Arc::into_inner` is called on every clone of this `Arc`,
1033    /// it is guaranteed that exactly one of the calls returns the inner value.
1034    /// This means in particular that the inner value is not dropped.
1035    ///
1036    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1037    /// is meant for different use-cases. If used as a direct replacement
1038    /// for `Arc::into_inner` anyway, such as with the expression
1039    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1040    /// **not** give the same guarantee as described in the previous paragraph.
1041    /// For more information, see the examples below and read the documentation
1042    /// of [`Arc::try_unwrap`].
1043    ///
1044    /// # Examples
1045    ///
1046    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1047    /// ```
1048    /// use std::sync::Arc;
1049    ///
1050    /// let x = Arc::new(3);
1051    /// let y = Arc::clone(&x);
1052    ///
1053    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1054    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1055    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1056    ///
1057    /// let x_inner_value = x_thread.join().unwrap();
1058    /// let y_inner_value = y_thread.join().unwrap();
1059    ///
1060    /// // One of the threads is guaranteed to receive the inner value:
1061    /// assert!(matches!(
1062    ///     (x_inner_value, y_inner_value),
1063    ///     (None, Some(3)) | (Some(3), None)
1064    /// ));
1065    /// // The result could also be `(None, None)` if the threads called
1066    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1067    /// ```
1068    ///
1069    /// A more practical example demonstrating the need for `Arc::into_inner`:
1070    /// ```
1071    /// use std::sync::Arc;
1072    ///
1073    /// // Definition of a simple singly linked list using `Arc`:
1074    /// #[derive(Clone)]
1075    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1076    /// struct Node<T>(T, Option<Arc<Node<T>>>);
1077    ///
1078    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1079    /// // can cause a stack overflow. To prevent this, we can provide a
1080    /// // manual `Drop` implementation that does the destruction in a loop:
1081    /// impl<T> Drop for LinkedList<T> {
1082    ///     fn drop(&mut self) {
1083    ///         let mut link = self.0.take();
1084    ///         while let Some(arc_node) = link.take() {
1085    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1086    ///                 link = next;
1087    ///             }
1088    ///         }
1089    ///     }
1090    /// }
1091    ///
1092    /// // Implementation of `new` and `push` omitted
1093    /// impl<T> LinkedList<T> {
1094    ///     /* ... */
1095    /// #   fn new() -> Self {
1096    /// #       LinkedList(None)
1097    /// #   }
1098    /// #   fn push(&mut self, x: T) {
1099    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
1100    /// #   }
1101    /// }
1102    ///
1103    /// // The following code could have still caused a stack overflow
1104    /// // despite the manual `Drop` impl if that `Drop` impl had used
1105    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1106    ///
1107    /// // Create a long list and clone it
1108    /// let mut x = LinkedList::new();
1109    /// let size = 100000;
1110    /// # let size = if cfg!(miri) { 100 } else { size };
1111    /// for i in 0..size {
1112    ///     x.push(i); // Adds i to the front of x
1113    /// }
1114    /// let y = x.clone();
1115    ///
1116    /// // Drop the clones in parallel
1117    /// let x_thread = std::thread::spawn(|| drop(x));
1118    /// let y_thread = std::thread::spawn(|| drop(y));
1119    /// x_thread.join().unwrap();
1120    /// y_thread.join().unwrap();
1121    /// ```
1122    #[inline]
1123    #[stable(feature = "arc_into_inner", since = "1.70.0")]
1124    pub fn into_inner(this: Self) -> Option<T> {
1125        // Make sure that the ordinary `Drop` implementation isn’t called as well
1126        let mut this = mem::ManuallyDrop::new(this);
1127
1128        // Following the implementation of `drop` and `drop_slow`
1129        if this.inner().strong.fetch_sub(1, Release) != 1 {
1130            return None;
1131        }
1132
1133        acquire!(this.inner().strong);
1134
1135        // SAFETY: This mirrors the line
1136        //
1137        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1138        //
1139        // in `drop_slow`. Instead of dropping the value behind the pointer,
1140        // it is read and eventually returned; `ptr::read` has the same
1141        // safety conditions as `ptr::drop_in_place`.
1142
1143        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1144        let alloc = unsafe { ptr::read(&this.alloc) };
1145
1146        drop(Weak { ptr: this.ptr, alloc });
1147
1148        Some(inner)
1149    }
1150}
1151
1152impl<T> Arc<[T]> {
1153    /// Constructs a new atomically reference-counted slice with uninitialized contents.
1154    ///
1155    /// # Examples
1156    ///
1157    /// ```
1158    /// use std::sync::Arc;
1159    ///
1160    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1161    ///
1162    /// // Deferred initialization:
1163    /// let data = Arc::get_mut(&mut values).unwrap();
1164    /// data[0].write(1);
1165    /// data[1].write(2);
1166    /// data[2].write(3);
1167    ///
1168    /// let values = unsafe { values.assume_init() };
1169    ///
1170    /// assert_eq!(*values, [1, 2, 3])
1171    /// ```
1172    #[cfg(not(no_global_oom_handling))]
1173    #[inline]
1174    #[stable(feature = "new_uninit", since = "1.82.0")]
1175    #[must_use]
1176    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1177        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1178    }
1179
1180    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1181    /// filled with `0` bytes.
1182    ///
1183    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1184    /// incorrect usage of this method.
1185    ///
1186    /// # Examples
1187    ///
1188    /// ```
1189    /// use std::sync::Arc;
1190    ///
1191    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1192    /// let values = unsafe { values.assume_init() };
1193    ///
1194    /// assert_eq!(*values, [0, 0, 0])
1195    /// ```
1196    ///
1197    /// [zeroed]: mem::MaybeUninit::zeroed
1198    #[cfg(not(no_global_oom_handling))]
1199    #[inline]
1200    #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
1201    #[must_use]
1202    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1203        unsafe {
1204            Arc::from_ptr(Arc::allocate_for_layout(
1205                Layout::array::<T>(len).unwrap(),
1206                |layout| Global.allocate_zeroed(layout),
1207                |mem| {
1208                    ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1209                        as *mut ArcInner<[mem::MaybeUninit<T>]>
1210                },
1211            ))
1212        }
1213    }
1214
1215    /// Converts the reference-counted slice into a reference-counted array.
1216    ///
1217    /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1218    ///
1219    /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1220    #[unstable(feature = "slice_as_array", issue = "133508")]
1221    #[inline]
1222    #[must_use]
1223    pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1224        if self.len() == N {
1225            let ptr = Self::into_raw(self) as *const [T; N];
1226
1227            // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1228            let me = unsafe { Arc::from_raw(ptr) };
1229            Some(me)
1230        } else {
1231            None
1232        }
1233    }
1234}
1235
1236impl<T, A: Allocator> Arc<[T], A> {
1237    /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1238    /// provided allocator.
1239    ///
1240    /// # Examples
1241    ///
1242    /// ```
1243    /// #![feature(get_mut_unchecked)]
1244    /// #![feature(allocator_api)]
1245    ///
1246    /// use std::sync::Arc;
1247    /// use std::alloc::System;
1248    ///
1249    /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1250    ///
1251    /// let values = unsafe {
1252    ///     // Deferred initialization:
1253    ///     Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1254    ///     Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1255    ///     Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1256    ///
1257    ///     values.assume_init()
1258    /// };
1259    ///
1260    /// assert_eq!(*values, [1, 2, 3])
1261    /// ```
1262    #[cfg(not(no_global_oom_handling))]
1263    #[unstable(feature = "allocator_api", issue = "32838")]
1264    #[inline]
1265    pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1266        unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1267    }
1268
1269    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1270    /// filled with `0` bytes, in the provided allocator.
1271    ///
1272    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1273    /// incorrect usage of this method.
1274    ///
1275    /// # Examples
1276    ///
1277    /// ```
1278    /// #![feature(allocator_api)]
1279    ///
1280    /// use std::sync::Arc;
1281    /// use std::alloc::System;
1282    ///
1283    /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1284    /// let values = unsafe { values.assume_init() };
1285    ///
1286    /// assert_eq!(*values, [0, 0, 0])
1287    /// ```
1288    ///
1289    /// [zeroed]: mem::MaybeUninit::zeroed
1290    #[cfg(not(no_global_oom_handling))]
1291    #[unstable(feature = "allocator_api", issue = "32838")]
1292    #[inline]
1293    pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1294        unsafe {
1295            Arc::from_ptr_in(
1296                Arc::allocate_for_layout(
1297                    Layout::array::<T>(len).unwrap(),
1298                    |layout| alloc.allocate_zeroed(layout),
1299                    |mem| {
1300                        ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1301                            as *mut ArcInner<[mem::MaybeUninit<T>]>
1302                    },
1303                ),
1304                alloc,
1305            )
1306        }
1307    }
1308}
1309
1310impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1311    /// Converts to `Arc<T>`.
1312    ///
1313    /// # Safety
1314    ///
1315    /// As with [`MaybeUninit::assume_init`],
1316    /// it is up to the caller to guarantee that the inner value
1317    /// really is in an initialized state.
1318    /// Calling this when the content is not yet fully initialized
1319    /// causes immediate undefined behavior.
1320    ///
1321    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1322    ///
1323    /// # Examples
1324    ///
1325    /// ```
1326    /// use std::sync::Arc;
1327    ///
1328    /// let mut five = Arc::<u32>::new_uninit();
1329    ///
1330    /// // Deferred initialization:
1331    /// Arc::get_mut(&mut five).unwrap().write(5);
1332    ///
1333    /// let five = unsafe { five.assume_init() };
1334    ///
1335    /// assert_eq!(*five, 5)
1336    /// ```
1337    #[stable(feature = "new_uninit", since = "1.82.0")]
1338    #[must_use = "`self` will be dropped if the result is not used"]
1339    #[inline]
1340    pub unsafe fn assume_init(self) -> Arc<T, A> {
1341        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1342        unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1343    }
1344}
1345
1346impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1347    /// Converts to `Arc<[T]>`.
1348    ///
1349    /// # Safety
1350    ///
1351    /// As with [`MaybeUninit::assume_init`],
1352    /// it is up to the caller to guarantee that the inner value
1353    /// really is in an initialized state.
1354    /// Calling this when the content is not yet fully initialized
1355    /// causes immediate undefined behavior.
1356    ///
1357    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1358    ///
1359    /// # Examples
1360    ///
1361    /// ```
1362    /// use std::sync::Arc;
1363    ///
1364    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1365    ///
1366    /// // Deferred initialization:
1367    /// let data = Arc::get_mut(&mut values).unwrap();
1368    /// data[0].write(1);
1369    /// data[1].write(2);
1370    /// data[2].write(3);
1371    ///
1372    /// let values = unsafe { values.assume_init() };
1373    ///
1374    /// assert_eq!(*values, [1, 2, 3])
1375    /// ```
1376    #[stable(feature = "new_uninit", since = "1.82.0")]
1377    #[must_use = "`self` will be dropped if the result is not used"]
1378    #[inline]
1379    pub unsafe fn assume_init(self) -> Arc<[T], A> {
1380        let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1381        unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1382    }
1383}
1384
1385impl<T: ?Sized> Arc<T> {
1386    /// Constructs an `Arc<T>` from a raw pointer.
1387    ///
1388    /// The raw pointer must have been previously returned by a call to
1389    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1390    ///
1391    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1392    ///   is trivially true if `U` is `T`.
1393    /// * If `U` is unsized, its data pointer must have the same size and
1394    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1395    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1396    ///   coercion].
1397    ///
1398    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1399    /// and alignment, this is basically like transmuting references of
1400    /// different types. See [`mem::transmute`][transmute] for more information
1401    /// on what restrictions apply in this case.
1402    ///
1403    /// The raw pointer must point to a block of memory allocated by the global allocator.
1404    ///
1405    /// The user of `from_raw` has to make sure a specific value of `T` is only
1406    /// dropped once.
1407    ///
1408    /// This function is unsafe because improper use may lead to memory unsafety,
1409    /// even if the returned `Arc<T>` is never accessed.
1410    ///
1411    /// [into_raw]: Arc::into_raw
1412    /// [transmute]: core::mem::transmute
1413    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1414    ///
1415    /// # Examples
1416    ///
1417    /// ```
1418    /// use std::sync::Arc;
1419    ///
1420    /// let x = Arc::new("hello".to_owned());
1421    /// let x_ptr = Arc::into_raw(x);
1422    ///
1423    /// unsafe {
1424    ///     // Convert back to an `Arc` to prevent leak.
1425    ///     let x = Arc::from_raw(x_ptr);
1426    ///     assert_eq!(&*x, "hello");
1427    ///
1428    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1429    /// }
1430    ///
1431    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1432    /// ```
1433    ///
1434    /// Convert a slice back into its original array:
1435    ///
1436    /// ```
1437    /// use std::sync::Arc;
1438    ///
1439    /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1440    /// let x_ptr: *const [u32] = Arc::into_raw(x);
1441    ///
1442    /// unsafe {
1443    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1444    ///     assert_eq!(&*x, &[1, 2, 3]);
1445    /// }
1446    /// ```
1447    #[inline]
1448    #[stable(feature = "rc_raw", since = "1.17.0")]
1449    pub unsafe fn from_raw(ptr: *const T) -> Self {
1450        unsafe { Arc::from_raw_in(ptr, Global) }
1451    }
1452
1453    /// Consumes the `Arc`, returning the wrapped pointer.
1454    ///
1455    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1456    /// [`Arc::from_raw`].
1457    ///
1458    /// # Examples
1459    ///
1460    /// ```
1461    /// use std::sync::Arc;
1462    ///
1463    /// let x = Arc::new("hello".to_owned());
1464    /// let x_ptr = Arc::into_raw(x);
1465    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1466    /// # // Prevent leaks for Miri.
1467    /// # drop(unsafe { Arc::from_raw(x_ptr) });
1468    /// ```
1469    #[must_use = "losing the pointer will leak memory"]
1470    #[stable(feature = "rc_raw", since = "1.17.0")]
1471    #[rustc_never_returns_null_ptr]
1472    pub fn into_raw(this: Self) -> *const T {
1473        let this = ManuallyDrop::new(this);
1474        Self::as_ptr(&*this)
1475    }
1476
1477    /// Increments the strong reference count on the `Arc<T>` associated with the
1478    /// provided pointer by one.
1479    ///
1480    /// # Safety
1481    ///
1482    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1483    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1484    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1485    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1486    /// allocated by the global allocator.
1487    ///
1488    /// [from_raw_in]: Arc::from_raw_in
1489    ///
1490    /// # Examples
1491    ///
1492    /// ```
1493    /// use std::sync::Arc;
1494    ///
1495    /// let five = Arc::new(5);
1496    ///
1497    /// unsafe {
1498    ///     let ptr = Arc::into_raw(five);
1499    ///     Arc::increment_strong_count(ptr);
1500    ///
1501    ///     // This assertion is deterministic because we haven't shared
1502    ///     // the `Arc` between threads.
1503    ///     let five = Arc::from_raw(ptr);
1504    ///     assert_eq!(2, Arc::strong_count(&five));
1505    /// #   // Prevent leaks for Miri.
1506    /// #   Arc::decrement_strong_count(ptr);
1507    /// }
1508    /// ```
1509    #[inline]
1510    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1511    pub unsafe fn increment_strong_count(ptr: *const T) {
1512        unsafe { Arc::increment_strong_count_in(ptr, Global) }
1513    }
1514
1515    /// Decrements the strong reference count on the `Arc<T>` associated with the
1516    /// provided pointer by one.
1517    ///
1518    /// # Safety
1519    ///
1520    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1521    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1522    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1523    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1524    /// allocated by the global allocator. This method can be used to release the final
1525    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1526    /// released.
1527    ///
1528    /// [from_raw_in]: Arc::from_raw_in
1529    ///
1530    /// # Examples
1531    ///
1532    /// ```
1533    /// use std::sync::Arc;
1534    ///
1535    /// let five = Arc::new(5);
1536    ///
1537    /// unsafe {
1538    ///     let ptr = Arc::into_raw(five);
1539    ///     Arc::increment_strong_count(ptr);
1540    ///
1541    ///     // Those assertions are deterministic because we haven't shared
1542    ///     // the `Arc` between threads.
1543    ///     let five = Arc::from_raw(ptr);
1544    ///     assert_eq!(2, Arc::strong_count(&five));
1545    ///     Arc::decrement_strong_count(ptr);
1546    ///     assert_eq!(1, Arc::strong_count(&five));
1547    /// }
1548    /// ```
1549    #[inline]
1550    #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1551    pub unsafe fn decrement_strong_count(ptr: *const T) {
1552        unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1553    }
1554}
1555
1556impl<T: ?Sized, A: Allocator> Arc<T, A> {
1557    /// Returns a reference to the underlying allocator.
1558    ///
1559    /// Note: this is an associated function, which means that you have
1560    /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1561    /// is so that there is no conflict with a method on the inner type.
1562    #[inline]
1563    #[unstable(feature = "allocator_api", issue = "32838")]
1564    pub fn allocator(this: &Self) -> &A {
1565        &this.alloc
1566    }
1567
1568    /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1569    ///
1570    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1571    /// [`Arc::from_raw_in`].
1572    ///
1573    /// # Examples
1574    ///
1575    /// ```
1576    /// #![feature(allocator_api)]
1577    /// use std::sync::Arc;
1578    /// use std::alloc::System;
1579    ///
1580    /// let x = Arc::new_in("hello".to_owned(), System);
1581    /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1582    /// assert_eq!(unsafe { &*ptr }, "hello");
1583    /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1584    /// assert_eq!(&*x, "hello");
1585    /// ```
1586    #[must_use = "losing the pointer will leak memory"]
1587    #[unstable(feature = "allocator_api", issue = "32838")]
1588    pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1589        let this = mem::ManuallyDrop::new(this);
1590        let ptr = Self::as_ptr(&this);
1591        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1592        let alloc = unsafe { ptr::read(&this.alloc) };
1593        (ptr, alloc)
1594    }
1595
1596    /// Provides a raw pointer to the data.
1597    ///
1598    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1599    /// as long as there are strong counts in the `Arc`.
1600    ///
1601    /// # Examples
1602    ///
1603    /// ```
1604    /// use std::sync::Arc;
1605    ///
1606    /// let x = Arc::new("hello".to_owned());
1607    /// let y = Arc::clone(&x);
1608    /// let x_ptr = Arc::as_ptr(&x);
1609    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1610    /// assert_eq!(unsafe { &*x_ptr }, "hello");
1611    /// ```
1612    #[must_use]
1613    #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1614    #[rustc_never_returns_null_ptr]
1615    pub fn as_ptr(this: &Self) -> *const T {
1616        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1617
1618        // SAFETY: This cannot go through Deref::deref or ArcInnerPtr::inner because
1619        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1620        // write through the pointer after the Arc is recovered through `from_raw`.
1621        unsafe { &raw mut (*ptr).data }
1622    }
1623
1624    /// Constructs an `Arc<T, A>` from a raw pointer.
1625    ///
1626    /// The raw pointer must have been previously returned by a call to [`Arc<U,
1627    /// A>::into_raw`][into_raw] with the following requirements:
1628    ///
1629    /// * If `U` is sized, it must have the same size and alignment as `T`. This
1630    ///   is trivially true if `U` is `T`.
1631    /// * If `U` is unsized, its data pointer must have the same size and
1632    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
1633    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1634    ///   coercion].
1635    ///
1636    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1637    /// and alignment, this is basically like transmuting references of
1638    /// different types. See [`mem::transmute`][transmute] for more information
1639    /// on what restrictions apply in this case.
1640    ///
1641    /// The raw pointer must point to a block of memory allocated by `alloc`
1642    ///
1643    /// The user of `from_raw` has to make sure a specific value of `T` is only
1644    /// dropped once.
1645    ///
1646    /// This function is unsafe because improper use may lead to memory unsafety,
1647    /// even if the returned `Arc<T>` is never accessed.
1648    ///
1649    /// [into_raw]: Arc::into_raw
1650    /// [transmute]: core::mem::transmute
1651    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1652    ///
1653    /// # Examples
1654    ///
1655    /// ```
1656    /// #![feature(allocator_api)]
1657    ///
1658    /// use std::sync::Arc;
1659    /// use std::alloc::System;
1660    ///
1661    /// let x = Arc::new_in("hello".to_owned(), System);
1662    /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1663    ///
1664    /// unsafe {
1665    ///     // Convert back to an `Arc` to prevent leak.
1666    ///     let x = Arc::from_raw_in(x_ptr, System);
1667    ///     assert_eq!(&*x, "hello");
1668    ///
1669    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1670    /// }
1671    ///
1672    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1673    /// ```
1674    ///
1675    /// Convert a slice back into its original array:
1676    ///
1677    /// ```
1678    /// #![feature(allocator_api)]
1679    ///
1680    /// use std::sync::Arc;
1681    /// use std::alloc::System;
1682    ///
1683    /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1684    /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1685    ///
1686    /// unsafe {
1687    ///     let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1688    ///     assert_eq!(&*x, &[1, 2, 3]);
1689    /// }
1690    /// ```
1691    #[inline]
1692    #[unstable(feature = "allocator_api", issue = "32838")]
1693    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1694        unsafe {
1695            let offset = data_offset(ptr);
1696
1697            // Reverse the offset to find the original ArcInner.
1698            let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1699
1700            Self::from_ptr_in(arc_ptr, alloc)
1701        }
1702    }
1703
1704    /// Creates a new [`Weak`] pointer to this allocation.
1705    ///
1706    /// # Examples
1707    ///
1708    /// ```
1709    /// use std::sync::Arc;
1710    ///
1711    /// let five = Arc::new(5);
1712    ///
1713    /// let weak_five = Arc::downgrade(&five);
1714    /// ```
1715    #[must_use = "this returns a new `Weak` pointer, \
1716                  without modifying the original `Arc`"]
1717    #[stable(feature = "arc_weak", since = "1.4.0")]
1718    pub fn downgrade(this: &Self) -> Weak<T, A>
1719    where
1720        A: Clone,
1721    {
1722        // This Relaxed is OK because we're checking the value in the CAS
1723        // below.
1724        let mut cur = this.inner().weak.load(Relaxed);
1725
1726        loop {
1727            // check if the weak counter is currently "locked"; if so, spin.
1728            if cur == usize::MAX {
1729                hint::spin_loop();
1730                cur = this.inner().weak.load(Relaxed);
1731                continue;
1732            }
1733
1734            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1735            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1736
1737            // NOTE: this code currently ignores the possibility of overflow
1738            // into usize::MAX; in general both Rc and Arc need to be adjusted
1739            // to deal with overflow.
1740
1741            // Unlike with Clone(), we need this to be an Acquire read to
1742            // synchronize with the write coming from `is_unique`, so that the
1743            // events prior to that write happen before this read.
1744            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1745                Ok(_) => {
1746                    // Make sure we do not create a dangling Weak
1747                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1748                    return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1749                }
1750                Err(old) => cur = old,
1751            }
1752        }
1753    }
1754
1755    /// Gets the number of [`Weak`] pointers to this allocation.
1756    ///
1757    /// # Safety
1758    ///
1759    /// This method by itself is safe, but using it correctly requires extra care.
1760    /// Another thread can change the weak count at any time,
1761    /// including potentially between calling this method and acting on the result.
1762    ///
1763    /// # Examples
1764    ///
1765    /// ```
1766    /// use std::sync::Arc;
1767    ///
1768    /// let five = Arc::new(5);
1769    /// let _weak_five = Arc::downgrade(&five);
1770    ///
1771    /// // This assertion is deterministic because we haven't shared
1772    /// // the `Arc` or `Weak` between threads.
1773    /// assert_eq!(1, Arc::weak_count(&five));
1774    /// ```
1775    #[inline]
1776    #[must_use]
1777    #[stable(feature = "arc_counts", since = "1.15.0")]
1778    pub fn weak_count(this: &Self) -> usize {
1779        let cnt = this.inner().weak.load(Relaxed);
1780        // If the weak count is currently locked, the value of the
1781        // count was 0 just before taking the lock.
1782        if cnt == usize::MAX { 0 } else { cnt - 1 }
1783    }
1784
1785    /// Gets the number of strong (`Arc`) pointers to this allocation.
1786    ///
1787    /// # Safety
1788    ///
1789    /// This method by itself is safe, but using it correctly requires extra care.
1790    /// Another thread can change the strong count at any time,
1791    /// including potentially between calling this method and acting on the result.
1792    ///
1793    /// # Examples
1794    ///
1795    /// ```
1796    /// use std::sync::Arc;
1797    ///
1798    /// let five = Arc::new(5);
1799    /// let _also_five = Arc::clone(&five);
1800    ///
1801    /// // This assertion is deterministic because we haven't shared
1802    /// // the `Arc` between threads.
1803    /// assert_eq!(2, Arc::strong_count(&five));
1804    /// ```
1805    #[inline]
1806    #[must_use]
1807    #[stable(feature = "arc_counts", since = "1.15.0")]
1808    pub fn strong_count(this: &Self) -> usize {
1809        this.inner().strong.load(Relaxed)
1810    }
1811
1812    /// Increments the strong reference count on the `Arc<T>` associated with the
1813    /// provided pointer by one.
1814    ///
1815    /// # Safety
1816    ///
1817    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1818    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1819    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1820    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1821    /// allocated by `alloc`.
1822    ///
1823    /// [from_raw_in]: Arc::from_raw_in
1824    ///
1825    /// # Examples
1826    ///
1827    /// ```
1828    /// #![feature(allocator_api)]
1829    ///
1830    /// use std::sync::Arc;
1831    /// use std::alloc::System;
1832    ///
1833    /// let five = Arc::new_in(5, System);
1834    ///
1835    /// unsafe {
1836    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1837    ///     Arc::increment_strong_count_in(ptr, System);
1838    ///
1839    ///     // This assertion is deterministic because we haven't shared
1840    ///     // the `Arc` between threads.
1841    ///     let five = Arc::from_raw_in(ptr, System);
1842    ///     assert_eq!(2, Arc::strong_count(&five));
1843    /// #   // Prevent leaks for Miri.
1844    /// #   Arc::decrement_strong_count_in(ptr, System);
1845    /// }
1846    /// ```
1847    #[inline]
1848    #[unstable(feature = "allocator_api", issue = "32838")]
1849    pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1850    where
1851        A: Clone,
1852    {
1853        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1854        let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1855        // Now increase refcount, but don't drop new refcount either
1856        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1857    }
1858
1859    /// Decrements the strong reference count on the `Arc<T>` associated with the
1860    /// provided pointer by one.
1861    ///
1862    /// # Safety
1863    ///
1864    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1865    /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1866    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1867    /// least 1) when invoking this method, and `ptr` must point to a block of memory
1868    /// allocated by `alloc`. This method can be used to release the final
1869    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1870    /// released.
1871    ///
1872    /// [from_raw_in]: Arc::from_raw_in
1873    ///
1874    /// # Examples
1875    ///
1876    /// ```
1877    /// #![feature(allocator_api)]
1878    ///
1879    /// use std::sync::Arc;
1880    /// use std::alloc::System;
1881    ///
1882    /// let five = Arc::new_in(5, System);
1883    ///
1884    /// unsafe {
1885    ///     let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1886    ///     Arc::increment_strong_count_in(ptr, System);
1887    ///
1888    ///     // Those assertions are deterministic because we haven't shared
1889    ///     // the `Arc` between threads.
1890    ///     let five = Arc::from_raw_in(ptr, System);
1891    ///     assert_eq!(2, Arc::strong_count(&five));
1892    ///     Arc::decrement_strong_count_in(ptr, System);
1893    ///     assert_eq!(1, Arc::strong_count(&five));
1894    /// }
1895    /// ```
1896    #[inline]
1897    #[unstable(feature = "allocator_api", issue = "32838")]
1898    pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1899        unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1900    }
1901
1902    #[inline]
1903    fn inner(&self) -> &ArcInner<T> {
1904        // This unsafety is ok because while this arc is alive we're guaranteed
1905        // that the inner pointer is valid. Furthermore, we know that the
1906        // `ArcInner` structure itself is `Sync` because the inner data is
1907        // `Sync` as well, so we're ok loaning out an immutable pointer to these
1908        // contents.
1909        unsafe { self.ptr.as_ref() }
1910    }
1911
1912    // Non-inlined part of `drop`.
1913    #[inline(never)]
1914    unsafe fn drop_slow(&mut self) {
1915        // Drop the weak ref collectively held by all strong references when this
1916        // variable goes out of scope. This ensures that the memory is deallocated
1917        // even if the destructor of `T` panics.
1918        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1919        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1920        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
1921
1922        // Destroy the data at this time, even though we must not free the box
1923        // allocation itself (there might still be weak pointers lying around).
1924        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1925        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1926    }
1927
1928    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1929    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
1930    ///
1931    /// # Examples
1932    ///
1933    /// ```
1934    /// use std::sync::Arc;
1935    ///
1936    /// let five = Arc::new(5);
1937    /// let same_five = Arc::clone(&five);
1938    /// let other_five = Arc::new(5);
1939    ///
1940    /// assert!(Arc::ptr_eq(&five, &same_five));
1941    /// assert!(!Arc::ptr_eq(&five, &other_five));
1942    /// ```
1943    ///
1944    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1945    #[inline]
1946    #[must_use]
1947    #[stable(feature = "ptr_eq", since = "1.17.0")]
1948    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1949        ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1950    }
1951}
1952
1953impl<T: ?Sized> Arc<T> {
1954    /// Allocates an `ArcInner<T>` with sufficient space for
1955    /// a possibly-unsized inner value where the value has the layout provided.
1956    ///
1957    /// The function `mem_to_arcinner` is called with the data pointer
1958    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1959    #[cfg(not(no_global_oom_handling))]
1960    unsafe fn allocate_for_layout(
1961        value_layout: Layout,
1962        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1963        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1964    ) -> *mut ArcInner<T> {
1965        let layout = arcinner_layout_for_value_layout(value_layout);
1966
1967        let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1968
1969        unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1970    }
1971
1972    /// Allocates an `ArcInner<T>` with sufficient space for
1973    /// a possibly-unsized inner value where the value has the layout provided,
1974    /// returning an error if allocation fails.
1975    ///
1976    /// The function `mem_to_arcinner` is called with the data pointer
1977    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1978    unsafe fn try_allocate_for_layout(
1979        value_layout: Layout,
1980        allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1981        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1982    ) -> Result<*mut ArcInner<T>, AllocError> {
1983        let layout = arcinner_layout_for_value_layout(value_layout);
1984
1985        let ptr = allocate(layout)?;
1986
1987        let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
1988
1989        Ok(inner)
1990    }
1991
1992    unsafe fn initialize_arcinner(
1993        ptr: NonNull<[u8]>,
1994        layout: Layout,
1995        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1996    ) -> *mut ArcInner<T> {
1997        let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
1998        debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
1999
2000        unsafe {
2001            (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2002            (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2003        }
2004
2005        inner
2006    }
2007}
2008
2009impl<T: ?Sized, A: Allocator> Arc<T, A> {
2010    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2011    #[inline]
2012    #[cfg(not(no_global_oom_handling))]
2013    unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2014        // Allocate for the `ArcInner<T>` using the given value.
2015        unsafe {
2016            Arc::allocate_for_layout(
2017                Layout::for_value_raw(ptr),
2018                |layout| alloc.allocate(layout),
2019                |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2020            )
2021        }
2022    }
2023
2024    #[cfg(not(no_global_oom_handling))]
2025    fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2026        unsafe {
2027            let value_size = size_of_val(&*src);
2028            let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2029
2030            // Copy value as bytes
2031            ptr::copy_nonoverlapping(
2032                (&raw const *src) as *const u8,
2033                (&raw mut (*ptr).data) as *mut u8,
2034                value_size,
2035            );
2036
2037            // Free the allocation without dropping its contents
2038            let (bptr, alloc) = Box::into_raw_with_allocator(src);
2039            let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2040            drop(src);
2041
2042            Self::from_ptr_in(ptr, alloc)
2043        }
2044    }
2045}
2046
2047impl<T> Arc<[T]> {
2048    /// Allocates an `ArcInner<[T]>` with the given length.
2049    #[cfg(not(no_global_oom_handling))]
2050    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2051        unsafe {
2052            Self::allocate_for_layout(
2053                Layout::array::<T>(len).unwrap(),
2054                |layout| Global.allocate(layout),
2055                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2056            )
2057        }
2058    }
2059
2060    /// Copy elements from slice into newly allocated `Arc<[T]>`
2061    ///
2062    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2063    #[cfg(not(no_global_oom_handling))]
2064    unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2065        unsafe {
2066            let ptr = Self::allocate_for_slice(v.len());
2067
2068            ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2069
2070            Self::from_ptr(ptr)
2071        }
2072    }
2073
2074    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2075    ///
2076    /// Behavior is undefined should the size be wrong.
2077    #[cfg(not(no_global_oom_handling))]
2078    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2079        // Panic guard while cloning T elements.
2080        // In the event of a panic, elements that have been written
2081        // into the new ArcInner will be dropped, then the memory freed.
2082        struct Guard<T> {
2083            mem: NonNull<u8>,
2084            elems: *mut T,
2085            layout: Layout,
2086            n_elems: usize,
2087        }
2088
2089        impl<T> Drop for Guard<T> {
2090            fn drop(&mut self) {
2091                unsafe {
2092                    let slice = from_raw_parts_mut(self.elems, self.n_elems);
2093                    ptr::drop_in_place(slice);
2094
2095                    Global.deallocate(self.mem, self.layout);
2096                }
2097            }
2098        }
2099
2100        unsafe {
2101            let ptr = Self::allocate_for_slice(len);
2102
2103            let mem = ptr as *mut _ as *mut u8;
2104            let layout = Layout::for_value_raw(ptr);
2105
2106            // Pointer to first element
2107            let elems = (&raw mut (*ptr).data) as *mut T;
2108
2109            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2110
2111            for (i, item) in iter.enumerate() {
2112                ptr::write(elems.add(i), item);
2113                guard.n_elems += 1;
2114            }
2115
2116            // All clear. Forget the guard so it doesn't free the new ArcInner.
2117            mem::forget(guard);
2118
2119            Self::from_ptr(ptr)
2120        }
2121    }
2122}
2123
2124impl<T, A: Allocator> Arc<[T], A> {
2125    /// Allocates an `ArcInner<[T]>` with the given length.
2126    #[inline]
2127    #[cfg(not(no_global_oom_handling))]
2128    unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2129        unsafe {
2130            Arc::allocate_for_layout(
2131                Layout::array::<T>(len).unwrap(),
2132                |layout| alloc.allocate(layout),
2133                |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2134            )
2135        }
2136    }
2137}
2138
2139/// Specialization trait used for `From<&[T]>`.
2140#[cfg(not(no_global_oom_handling))]
2141trait ArcFromSlice<T> {
2142    fn from_slice(slice: &[T]) -> Self;
2143}
2144
2145#[cfg(not(no_global_oom_handling))]
2146impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2147    #[inline]
2148    default fn from_slice(v: &[T]) -> Self {
2149        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2150    }
2151}
2152
2153#[cfg(not(no_global_oom_handling))]
2154impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2155    #[inline]
2156    fn from_slice(v: &[T]) -> Self {
2157        unsafe { Arc::copy_from_slice(v) }
2158    }
2159}
2160
2161#[stable(feature = "rust1", since = "1.0.0")]
2162impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2163    /// Makes a clone of the `Arc` pointer.
2164    ///
2165    /// This creates another pointer to the same allocation, increasing the
2166    /// strong reference count.
2167    ///
2168    /// # Examples
2169    ///
2170    /// ```
2171    /// use std::sync::Arc;
2172    ///
2173    /// let five = Arc::new(5);
2174    ///
2175    /// let _ = Arc::clone(&five);
2176    /// ```
2177    #[inline]
2178    fn clone(&self) -> Arc<T, A> {
2179        // Using a relaxed ordering is alright here, as knowledge of the
2180        // original reference prevents other threads from erroneously deleting
2181        // the object.
2182        //
2183        // As explained in the [Boost documentation][1], Increasing the
2184        // reference counter can always be done with memory_order_relaxed: New
2185        // references to an object can only be formed from an existing
2186        // reference, and passing an existing reference from one thread to
2187        // another must already provide any required synchronization.
2188        //
2189        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2190        let old_size = self.inner().strong.fetch_add(1, Relaxed);
2191
2192        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2193        // Arcs. If we don't do this the count can overflow and users will use-after free. This
2194        // branch will never be taken in any realistic program. We abort because such a program is
2195        // incredibly degenerate, and we don't care to support it.
2196        //
2197        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2198        // But we do that check *after* having done the increment, so there is a chance here that
2199        // the worst already happened and we actually do overflow the `usize` counter. However, that
2200        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2201        // above and the `abort` below, which seems exceedingly unlikely.
2202        //
2203        // This is a global invariant, and also applies when using a compare-exchange loop to increment
2204        // counters in other methods.
2205        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2206        // and then overflow using a few `fetch_add`s.
2207        if old_size > MAX_REFCOUNT {
2208            abort();
2209        }
2210
2211        unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2212    }
2213}
2214
2215#[unstable(feature = "ergonomic_clones", issue = "132290")]
2216impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2217
2218#[stable(feature = "rust1", since = "1.0.0")]
2219impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2220    type Target = T;
2221
2222    #[inline]
2223    fn deref(&self) -> &T {
2224        &self.inner().data
2225    }
2226}
2227
2228#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2229unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2230
2231#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2232unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2233
2234#[unstable(feature = "deref_pure_trait", issue = "87121")]
2235unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2236
2237#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2238impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2239
2240#[cfg(not(no_global_oom_handling))]
2241impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2242    /// Makes a mutable reference into the given `Arc`.
2243    ///
2244    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2245    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
2246    /// referred to as clone-on-write.
2247    ///
2248    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2249    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2250    /// be cloned.
2251    ///
2252    /// See also [`get_mut`], which will fail rather than cloning the inner value
2253    /// or dissociating [`Weak`] pointers.
2254    ///
2255    /// [`clone`]: Clone::clone
2256    /// [`get_mut`]: Arc::get_mut
2257    ///
2258    /// # Examples
2259    ///
2260    /// ```
2261    /// use std::sync::Arc;
2262    ///
2263    /// let mut data = Arc::new(5);
2264    ///
2265    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2266    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2267    /// *Arc::make_mut(&mut data) += 1;         // Clones inner data
2268    /// *Arc::make_mut(&mut data) += 1;         // Won't clone anything
2269    /// *Arc::make_mut(&mut other_data) *= 2;   // Won't clone anything
2270    ///
2271    /// // Now `data` and `other_data` point to different allocations.
2272    /// assert_eq!(*data, 8);
2273    /// assert_eq!(*other_data, 12);
2274    /// ```
2275    ///
2276    /// [`Weak`] pointers will be dissociated:
2277    ///
2278    /// ```
2279    /// use std::sync::Arc;
2280    ///
2281    /// let mut data = Arc::new(75);
2282    /// let weak = Arc::downgrade(&data);
2283    ///
2284    /// assert!(75 == *data);
2285    /// assert!(75 == *weak.upgrade().unwrap());
2286    ///
2287    /// *Arc::make_mut(&mut data) += 1;
2288    ///
2289    /// assert!(76 == *data);
2290    /// assert!(weak.upgrade().is_none());
2291    /// ```
2292    #[inline]
2293    #[stable(feature = "arc_unique", since = "1.4.0")]
2294    pub fn make_mut(this: &mut Self) -> &mut T {
2295        let size_of_val = size_of_val::<T>(&**this);
2296
2297        // Note that we hold both a strong reference and a weak reference.
2298        // Thus, releasing our strong reference only will not, by itself, cause
2299        // the memory to be deallocated.
2300        //
2301        // Use Acquire to ensure that we see any writes to `weak` that happen
2302        // before release writes (i.e., decrements) to `strong`. Since we hold a
2303        // weak count, there's no chance the ArcInner itself could be
2304        // deallocated.
2305        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2306            // Another strong pointer exists, so we must clone.
2307
2308            let this_data_ref: &T = &**this;
2309            // `in_progress` drops the allocation if we panic before finishing initializing it.
2310            let mut in_progress: UniqueArcUninit<T, A> =
2311                UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2312
2313            let initialized_clone = unsafe {
2314                // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2315                this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2316                // Cast type of pointer, now that it is initialized.
2317                in_progress.into_arc()
2318            };
2319            *this = initialized_clone;
2320        } else if this.inner().weak.load(Relaxed) != 1 {
2321            // Relaxed suffices in the above because this is fundamentally an
2322            // optimization: we are always racing with weak pointers being
2323            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2324
2325            // We removed the last strong ref, but there are additional weak
2326            // refs remaining. We'll move the contents to a new Arc, and
2327            // invalidate the other weak refs.
2328
2329            // Note that it is not possible for the read of `weak` to yield
2330            // usize::MAX (i.e., locked), since the weak count can only be
2331            // locked by a thread with a strong reference.
2332
2333            // Materialize our own implicit weak pointer, so that it can clean
2334            // up the ArcInner as needed.
2335            let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2336
2337            // Can just steal the data, all that's left is Weaks
2338            //
2339            // We don't need panic-protection like the above branch does, but we might as well
2340            // use the same mechanism.
2341            let mut in_progress: UniqueArcUninit<T, A> =
2342                UniqueArcUninit::new(&**this, this.alloc.clone());
2343            unsafe {
2344                // Initialize `in_progress` with move of **this.
2345                // We have to express this in terms of bytes because `T: ?Sized`; there is no
2346                // operation that just copies a value based on its `size_of_val()`.
2347                ptr::copy_nonoverlapping(
2348                    ptr::from_ref(&**this).cast::<u8>(),
2349                    in_progress.data_ptr().cast::<u8>(),
2350                    size_of_val,
2351                );
2352
2353                ptr::write(this, in_progress.into_arc());
2354            }
2355        } else {
2356            // We were the sole reference of either kind; bump back up the
2357            // strong ref count.
2358            this.inner().strong.store(1, Release);
2359        }
2360
2361        // As with `get_mut()`, the unsafety is ok because our reference was
2362        // either unique to begin with, or became one upon cloning the contents.
2363        unsafe { Self::get_mut_unchecked(this) }
2364    }
2365}
2366
2367impl<T: Clone, A: Allocator> Arc<T, A> {
2368    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2369    /// clone.
2370    ///
2371    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2372    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2373    ///
2374    /// # Examples
2375    ///
2376    /// ```
2377    /// # use std::{ptr, sync::Arc};
2378    /// let inner = String::from("test");
2379    /// let ptr = inner.as_ptr();
2380    ///
2381    /// let arc = Arc::new(inner);
2382    /// let inner = Arc::unwrap_or_clone(arc);
2383    /// // The inner value was not cloned
2384    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2385    ///
2386    /// let arc = Arc::new(inner);
2387    /// let arc2 = arc.clone();
2388    /// let inner = Arc::unwrap_or_clone(arc);
2389    /// // Because there were 2 references, we had to clone the inner value.
2390    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2391    /// // `arc2` is the last reference, so when we unwrap it we get back
2392    /// // the original `String`.
2393    /// let inner = Arc::unwrap_or_clone(arc2);
2394    /// assert!(ptr::eq(ptr, inner.as_ptr()));
2395    /// ```
2396    #[inline]
2397    #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2398    pub fn unwrap_or_clone(this: Self) -> T {
2399        Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2400    }
2401}
2402
2403impl<T: ?Sized, A: Allocator> Arc<T, A> {
2404    /// Returns a mutable reference into the given `Arc`, if there are
2405    /// no other `Arc` or [`Weak`] pointers to the same allocation.
2406    ///
2407    /// Returns [`None`] otherwise, because it is not safe to
2408    /// mutate a shared value.
2409    ///
2410    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2411    /// the inner value when there are other `Arc` pointers.
2412    ///
2413    /// [make_mut]: Arc::make_mut
2414    /// [clone]: Clone::clone
2415    ///
2416    /// # Examples
2417    ///
2418    /// ```
2419    /// use std::sync::Arc;
2420    ///
2421    /// let mut x = Arc::new(3);
2422    /// *Arc::get_mut(&mut x).unwrap() = 4;
2423    /// assert_eq!(*x, 4);
2424    ///
2425    /// let _y = Arc::clone(&x);
2426    /// assert!(Arc::get_mut(&mut x).is_none());
2427    /// ```
2428    #[inline]
2429    #[stable(feature = "arc_unique", since = "1.4.0")]
2430    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2431        if Self::is_unique(this) {
2432            // This unsafety is ok because we're guaranteed that the pointer
2433            // returned is the *only* pointer that will ever be returned to T. Our
2434            // reference count is guaranteed to be 1 at this point, and we required
2435            // the Arc itself to be `mut`, so we're returning the only possible
2436            // reference to the inner data.
2437            unsafe { Some(Arc::get_mut_unchecked(this)) }
2438        } else {
2439            None
2440        }
2441    }
2442
2443    /// Returns a mutable reference into the given `Arc`,
2444    /// without any check.
2445    ///
2446    /// See also [`get_mut`], which is safe and does appropriate checks.
2447    ///
2448    /// [`get_mut`]: Arc::get_mut
2449    ///
2450    /// # Safety
2451    ///
2452    /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2453    /// they must not be dereferenced or have active borrows for the duration
2454    /// of the returned borrow, and their inner type must be exactly the same as the
2455    /// inner type of this Arc (including lifetimes). This is trivially the case if no
2456    /// such pointers exist, for example immediately after `Arc::new`.
2457    ///
2458    /// # Examples
2459    ///
2460    /// ```
2461    /// #![feature(get_mut_unchecked)]
2462    ///
2463    /// use std::sync::Arc;
2464    ///
2465    /// let mut x = Arc::new(String::new());
2466    /// unsafe {
2467    ///     Arc::get_mut_unchecked(&mut x).push_str("foo")
2468    /// }
2469    /// assert_eq!(*x, "foo");
2470    /// ```
2471    /// Other `Arc` pointers to the same allocation must be to the same type.
2472    /// ```no_run
2473    /// #![feature(get_mut_unchecked)]
2474    ///
2475    /// use std::sync::Arc;
2476    ///
2477    /// let x: Arc<str> = Arc::from("Hello, world!");
2478    /// let mut y: Arc<[u8]> = x.clone().into();
2479    /// unsafe {
2480    ///     // this is Undefined Behavior, because x's inner type is str, not [u8]
2481    ///     Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2482    /// }
2483    /// println!("{}", &*x); // Invalid UTF-8 in a str
2484    /// ```
2485    /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2486    /// ```no_run
2487    /// #![feature(get_mut_unchecked)]
2488    ///
2489    /// use std::sync::Arc;
2490    ///
2491    /// let x: Arc<&str> = Arc::new("Hello, world!");
2492    /// {
2493    ///     let s = String::from("Oh, no!");
2494    ///     let mut y: Arc<&str> = x.clone();
2495    ///     unsafe {
2496    ///         // this is Undefined Behavior, because x's inner type
2497    ///         // is &'long str, not &'short str
2498    ///         *Arc::get_mut_unchecked(&mut y) = &s;
2499    ///     }
2500    /// }
2501    /// println!("{}", &*x); // Use-after-free
2502    /// ```
2503    #[inline]
2504    #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2505    pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2506        // We are careful to *not* create a reference covering the "count" fields, as
2507        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2508        unsafe { &mut (*this.ptr.as_ptr()).data }
2509    }
2510
2511    /// Determine whether this is the unique reference to the underlying data.
2512    ///
2513    /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2514    /// returns `false` otherwise.
2515    ///
2516    /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2517    /// on this `Arc`, so long as no clones occur in between.
2518    ///
2519    /// # Examples
2520    ///
2521    /// ```
2522    /// #![feature(arc_is_unique)]
2523    ///
2524    /// use std::sync::Arc;
2525    ///
2526    /// let x = Arc::new(3);
2527    /// assert!(Arc::is_unique(&x));
2528    ///
2529    /// let y = Arc::clone(&x);
2530    /// assert!(!Arc::is_unique(&x));
2531    /// drop(y);
2532    ///
2533    /// // Weak references also count, because they could be upgraded at any time.
2534    /// let z = Arc::downgrade(&x);
2535    /// assert!(!Arc::is_unique(&x));
2536    /// ```
2537    ///
2538    /// # Pointer invalidation
2539    ///
2540    /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2541    /// unlike that operation it does not produce any mutable references to the underlying data,
2542    /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2543    /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2544    ///
2545    /// ```
2546    /// #![feature(arc_is_unique)]
2547    ///
2548    /// use std::sync::Arc;
2549    ///
2550    /// let arc = Arc::new(5);
2551    /// let pointer: *const i32 = &*arc;
2552    /// assert!(Arc::is_unique(&arc));
2553    /// assert_eq!(unsafe { *pointer }, 5);
2554    /// ```
2555    ///
2556    /// # Atomic orderings
2557    ///
2558    /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2559    /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2560    /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2561    ///
2562    /// Note that this operation requires locking the weak ref count, so concurrent calls to
2563    /// `downgrade` may spin-loop for a short period of time.
2564    ///
2565    /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2566    #[inline]
2567    #[unstable(feature = "arc_is_unique", issue = "138938")]
2568    pub fn is_unique(this: &Self) -> bool {
2569        // lock the weak pointer count if we appear to be the sole weak pointer
2570        // holder.
2571        //
2572        // The acquire label here ensures a happens-before relationship with any
2573        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2574        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2575        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2576        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2577            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2578            // counter in `drop` -- the only access that happens when any but the last reference
2579            // is being dropped.
2580            let unique = this.inner().strong.load(Acquire) == 1;
2581
2582            // The release write here synchronizes with a read in `downgrade`,
2583            // effectively preventing the above read of `strong` from happening
2584            // after the write.
2585            this.inner().weak.store(1, Release); // release the lock
2586            unique
2587        } else {
2588            false
2589        }
2590    }
2591}
2592
2593#[stable(feature = "rust1", since = "1.0.0")]
2594unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2595    /// Drops the `Arc`.
2596    ///
2597    /// This will decrement the strong reference count. If the strong reference
2598    /// count reaches zero then the only other references (if any) are
2599    /// [`Weak`], so we `drop` the inner value.
2600    ///
2601    /// # Examples
2602    ///
2603    /// ```
2604    /// use std::sync::Arc;
2605    ///
2606    /// struct Foo;
2607    ///
2608    /// impl Drop for Foo {
2609    ///     fn drop(&mut self) {
2610    ///         println!("dropped!");
2611    ///     }
2612    /// }
2613    ///
2614    /// let foo  = Arc::new(Foo);
2615    /// let foo2 = Arc::clone(&foo);
2616    ///
2617    /// drop(foo);    // Doesn't print anything
2618    /// drop(foo2);   // Prints "dropped!"
2619    /// ```
2620    #[inline]
2621    fn drop(&mut self) {
2622        // Because `fetch_sub` is already atomic, we do not need to synchronize
2623        // with other threads unless we are going to delete the object. This
2624        // same logic applies to the below `fetch_sub` to the `weak` count.
2625        if self.inner().strong.fetch_sub(1, Release) != 1 {
2626            return;
2627        }
2628
2629        // This fence is needed to prevent reordering of use of the data and
2630        // deletion of the data. Because it is marked `Release`, the decreasing
2631        // of the reference count synchronizes with this `Acquire` fence. This
2632        // means that use of the data happens before decreasing the reference
2633        // count, which happens before this fence, which happens before the
2634        // deletion of the data.
2635        //
2636        // As explained in the [Boost documentation][1],
2637        //
2638        // > It is important to enforce any possible access to the object in one
2639        // > thread (through an existing reference) to *happen before* deleting
2640        // > the object in a different thread. This is achieved by a "release"
2641        // > operation after dropping a reference (any access to the object
2642        // > through this reference must obviously happened before), and an
2643        // > "acquire" operation before deleting the object.
2644        //
2645        // In particular, while the contents of an Arc are usually immutable, it's
2646        // possible to have interior writes to something like a Mutex<T>. Since a
2647        // Mutex is not acquired when it is deleted, we can't rely on its
2648        // synchronization logic to make writes in thread A visible to a destructor
2649        // running in thread B.
2650        //
2651        // Also note that the Acquire fence here could probably be replaced with an
2652        // Acquire load, which could improve performance in highly-contended
2653        // situations. See [2].
2654        //
2655        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2656        // [2]: (https://github.com/rust-lang/rust/pull/41714)
2657        acquire!(self.inner().strong);
2658
2659        // Make sure we aren't trying to "drop" the shared static for empty slices
2660        // used by Default::default.
2661        debug_assert!(
2662            !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2663            "Arcs backed by a static should never reach a strong count of 0. \
2664            Likely decrement_strong_count or from_raw were called too many times.",
2665        );
2666
2667        unsafe {
2668            self.drop_slow();
2669        }
2670    }
2671}
2672
2673impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2674    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2675    ///
2676    /// # Examples
2677    ///
2678    /// ```
2679    /// use std::any::Any;
2680    /// use std::sync::Arc;
2681    ///
2682    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2683    ///     if let Ok(string) = value.downcast::<String>() {
2684    ///         println!("String ({}): {}", string.len(), string);
2685    ///     }
2686    /// }
2687    ///
2688    /// let my_string = "Hello World".to_string();
2689    /// print_if_string(Arc::new(my_string));
2690    /// print_if_string(Arc::new(0i8));
2691    /// ```
2692    #[inline]
2693    #[stable(feature = "rc_downcast", since = "1.29.0")]
2694    pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2695    where
2696        T: Any + Send + Sync,
2697    {
2698        if (*self).is::<T>() {
2699            unsafe {
2700                let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2701                Ok(Arc::from_inner_in(ptr.cast(), alloc))
2702            }
2703        } else {
2704            Err(self)
2705        }
2706    }
2707
2708    /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2709    ///
2710    /// For a safe alternative see [`downcast`].
2711    ///
2712    /// # Examples
2713    ///
2714    /// ```
2715    /// #![feature(downcast_unchecked)]
2716    ///
2717    /// use std::any::Any;
2718    /// use std::sync::Arc;
2719    ///
2720    /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2721    ///
2722    /// unsafe {
2723    ///     assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2724    /// }
2725    /// ```
2726    ///
2727    /// # Safety
2728    ///
2729    /// The contained value must be of type `T`. Calling this method
2730    /// with the incorrect type is *undefined behavior*.
2731    ///
2732    ///
2733    /// [`downcast`]: Self::downcast
2734    #[inline]
2735    #[unstable(feature = "downcast_unchecked", issue = "90850")]
2736    pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2737    where
2738        T: Any + Send + Sync,
2739    {
2740        unsafe {
2741            let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2742            Arc::from_inner_in(ptr.cast(), alloc)
2743        }
2744    }
2745}
2746
2747impl<T> Weak<T> {
2748    /// Constructs a new `Weak<T>`, without allocating any memory.
2749    /// Calling [`upgrade`] on the return value always gives [`None`].
2750    ///
2751    /// [`upgrade`]: Weak::upgrade
2752    ///
2753    /// # Examples
2754    ///
2755    /// ```
2756    /// use std::sync::Weak;
2757    ///
2758    /// let empty: Weak<i64> = Weak::new();
2759    /// assert!(empty.upgrade().is_none());
2760    /// ```
2761    #[inline]
2762    #[stable(feature = "downgraded_weak", since = "1.10.0")]
2763    #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2764    #[must_use]
2765    pub const fn new() -> Weak<T> {
2766        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2767    }
2768}
2769
2770impl<T, A: Allocator> Weak<T, A> {
2771    /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2772    /// allocator.
2773    /// Calling [`upgrade`] on the return value always gives [`None`].
2774    ///
2775    /// [`upgrade`]: Weak::upgrade
2776    ///
2777    /// # Examples
2778    ///
2779    /// ```
2780    /// #![feature(allocator_api)]
2781    ///
2782    /// use std::sync::Weak;
2783    /// use std::alloc::System;
2784    ///
2785    /// let empty: Weak<i64, _> = Weak::new_in(System);
2786    /// assert!(empty.upgrade().is_none());
2787    /// ```
2788    #[inline]
2789    #[unstable(feature = "allocator_api", issue = "32838")]
2790    pub fn new_in(alloc: A) -> Weak<T, A> {
2791        Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2792    }
2793}
2794
2795/// Helper type to allow accessing the reference counts without
2796/// making any assertions about the data field.
2797struct WeakInner<'a> {
2798    weak: &'a Atomic<usize>,
2799    strong: &'a Atomic<usize>,
2800}
2801
2802impl<T: ?Sized> Weak<T> {
2803    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2804    ///
2805    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2806    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2807    ///
2808    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2809    /// as these don't own anything; the method still works on them).
2810    ///
2811    /// # Safety
2812    ///
2813    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2814    /// weak reference, and must point to a block of memory allocated by global allocator.
2815    ///
2816    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2817    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2818    /// count is not modified by this operation) and therefore it must be paired with a previous
2819    /// call to [`into_raw`].
2820    /// # Examples
2821    ///
2822    /// ```
2823    /// use std::sync::{Arc, Weak};
2824    ///
2825    /// let strong = Arc::new("hello".to_owned());
2826    ///
2827    /// let raw_1 = Arc::downgrade(&strong).into_raw();
2828    /// let raw_2 = Arc::downgrade(&strong).into_raw();
2829    ///
2830    /// assert_eq!(2, Arc::weak_count(&strong));
2831    ///
2832    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2833    /// assert_eq!(1, Arc::weak_count(&strong));
2834    ///
2835    /// drop(strong);
2836    ///
2837    /// // Decrement the last weak count.
2838    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2839    /// ```
2840    ///
2841    /// [`new`]: Weak::new
2842    /// [`into_raw`]: Weak::into_raw
2843    /// [`upgrade`]: Weak::upgrade
2844    #[inline]
2845    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2846    pub unsafe fn from_raw(ptr: *const T) -> Self {
2847        unsafe { Weak::from_raw_in(ptr, Global) }
2848    }
2849
2850    /// Consumes the `Weak<T>` and turns it into a raw pointer.
2851    ///
2852    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2853    /// one weak reference (the weak count is not modified by this operation). It can be turned
2854    /// back into the `Weak<T>` with [`from_raw`].
2855    ///
2856    /// The same restrictions of accessing the target of the pointer as with
2857    /// [`as_ptr`] apply.
2858    ///
2859    /// # Examples
2860    ///
2861    /// ```
2862    /// use std::sync::{Arc, Weak};
2863    ///
2864    /// let strong = Arc::new("hello".to_owned());
2865    /// let weak = Arc::downgrade(&strong);
2866    /// let raw = weak.into_raw();
2867    ///
2868    /// assert_eq!(1, Arc::weak_count(&strong));
2869    /// assert_eq!("hello", unsafe { &*raw });
2870    ///
2871    /// drop(unsafe { Weak::from_raw(raw) });
2872    /// assert_eq!(0, Arc::weak_count(&strong));
2873    /// ```
2874    ///
2875    /// [`from_raw`]: Weak::from_raw
2876    /// [`as_ptr`]: Weak::as_ptr
2877    #[must_use = "losing the pointer will leak memory"]
2878    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2879    pub fn into_raw(self) -> *const T {
2880        ManuallyDrop::new(self).as_ptr()
2881    }
2882}
2883
2884impl<T: ?Sized, A: Allocator> Weak<T, A> {
2885    /// Returns a reference to the underlying allocator.
2886    #[inline]
2887    #[unstable(feature = "allocator_api", issue = "32838")]
2888    pub fn allocator(&self) -> &A {
2889        &self.alloc
2890    }
2891
2892    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2893    ///
2894    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2895    /// unaligned or even [`null`] otherwise.
2896    ///
2897    /// # Examples
2898    ///
2899    /// ```
2900    /// use std::sync::Arc;
2901    /// use std::ptr;
2902    ///
2903    /// let strong = Arc::new("hello".to_owned());
2904    /// let weak = Arc::downgrade(&strong);
2905    /// // Both point to the same object
2906    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2907    /// // The strong here keeps it alive, so we can still access the object.
2908    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2909    ///
2910    /// drop(strong);
2911    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2912    /// // undefined behavior.
2913    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2914    /// ```
2915    ///
2916    /// [`null`]: core::ptr::null "ptr::null"
2917    #[must_use]
2918    #[stable(feature = "weak_into_raw", since = "1.45.0")]
2919    pub fn as_ptr(&self) -> *const T {
2920        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2921
2922        if is_dangling(ptr) {
2923            // If the pointer is dangling, we return the sentinel directly. This cannot be
2924            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2925            ptr as *const T
2926        } else {
2927            // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2928            // The payload may be dropped at this point, and we have to maintain provenance,
2929            // so use raw pointer manipulation.
2930            unsafe { &raw mut (*ptr).data }
2931        }
2932    }
2933
2934    /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
2935    ///
2936    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2937    /// one weak reference (the weak count is not modified by this operation). It can be turned
2938    /// back into the `Weak<T>` with [`from_raw_in`].
2939    ///
2940    /// The same restrictions of accessing the target of the pointer as with
2941    /// [`as_ptr`] apply.
2942    ///
2943    /// # Examples
2944    ///
2945    /// ```
2946    /// #![feature(allocator_api)]
2947    /// use std::sync::{Arc, Weak};
2948    /// use std::alloc::System;
2949    ///
2950    /// let strong = Arc::new_in("hello".to_owned(), System);
2951    /// let weak = Arc::downgrade(&strong);
2952    /// let (raw, alloc) = weak.into_raw_with_allocator();
2953    ///
2954    /// assert_eq!(1, Arc::weak_count(&strong));
2955    /// assert_eq!("hello", unsafe { &*raw });
2956    ///
2957    /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
2958    /// assert_eq!(0, Arc::weak_count(&strong));
2959    /// ```
2960    ///
2961    /// [`from_raw_in`]: Weak::from_raw_in
2962    /// [`as_ptr`]: Weak::as_ptr
2963    #[must_use = "losing the pointer will leak memory"]
2964    #[unstable(feature = "allocator_api", issue = "32838")]
2965    pub fn into_raw_with_allocator(self) -> (*const T, A) {
2966        let this = mem::ManuallyDrop::new(self);
2967        let result = this.as_ptr();
2968        // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
2969        let alloc = unsafe { ptr::read(&this.alloc) };
2970        (result, alloc)
2971    }
2972
2973    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2974    /// allocator.
2975    ///
2976    /// This can be used to safely get a strong reference (by calling [`upgrade`]
2977    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2978    ///
2979    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2980    /// as these don't own anything; the method still works on them).
2981    ///
2982    /// # Safety
2983    ///
2984    /// The pointer must have originated from the [`into_raw`] and must still own its potential
2985    /// weak reference, and must point to a block of memory allocated by `alloc`.
2986    ///
2987    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2988    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2989    /// count is not modified by this operation) and therefore it must be paired with a previous
2990    /// call to [`into_raw`].
2991    /// # Examples
2992    ///
2993    /// ```
2994    /// use std::sync::{Arc, Weak};
2995    ///
2996    /// let strong = Arc::new("hello".to_owned());
2997    ///
2998    /// let raw_1 = Arc::downgrade(&strong).into_raw();
2999    /// let raw_2 = Arc::downgrade(&strong).into_raw();
3000    ///
3001    /// assert_eq!(2, Arc::weak_count(&strong));
3002    ///
3003    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3004    /// assert_eq!(1, Arc::weak_count(&strong));
3005    ///
3006    /// drop(strong);
3007    ///
3008    /// // Decrement the last weak count.
3009    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3010    /// ```
3011    ///
3012    /// [`new`]: Weak::new
3013    /// [`into_raw`]: Weak::into_raw
3014    /// [`upgrade`]: Weak::upgrade
3015    #[inline]
3016    #[unstable(feature = "allocator_api", issue = "32838")]
3017    pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3018        // See Weak::as_ptr for context on how the input pointer is derived.
3019
3020        let ptr = if is_dangling(ptr) {
3021            // This is a dangling Weak.
3022            ptr as *mut ArcInner<T>
3023        } else {
3024            // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3025            // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3026            let offset = unsafe { data_offset(ptr) };
3027            // Thus, we reverse the offset to get the whole ArcInner.
3028            // SAFETY: the pointer originated from a Weak, so this offset is safe.
3029            unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3030        };
3031
3032        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3033        Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3034    }
3035}
3036
3037impl<T: ?Sized, A: Allocator> Weak<T, A> {
3038    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3039    /// dropping of the inner value if successful.
3040    ///
3041    /// Returns [`None`] if the inner value has since been dropped.
3042    ///
3043    /// # Examples
3044    ///
3045    /// ```
3046    /// use std::sync::Arc;
3047    ///
3048    /// let five = Arc::new(5);
3049    ///
3050    /// let weak_five = Arc::downgrade(&five);
3051    ///
3052    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3053    /// assert!(strong_five.is_some());
3054    ///
3055    /// // Destroy all strong pointers.
3056    /// drop(strong_five);
3057    /// drop(five);
3058    ///
3059    /// assert!(weak_five.upgrade().is_none());
3060    /// ```
3061    #[must_use = "this returns a new `Arc`, \
3062                  without modifying the original weak pointer"]
3063    #[stable(feature = "arc_weak", since = "1.4.0")]
3064    pub fn upgrade(&self) -> Option<Arc<T, A>>
3065    where
3066        A: Clone,
3067    {
3068        #[inline]
3069        fn checked_increment(n: usize) -> Option<usize> {
3070            // Any write of 0 we can observe leaves the field in permanently zero state.
3071            if n == 0 {
3072                return None;
3073            }
3074            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3075            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3076            Some(n + 1)
3077        }
3078
3079        // We use a CAS loop to increment the strong count instead of a
3080        // fetch_add as this function should never take the reference count
3081        // from zero to one.
3082        //
3083        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3084        // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3085        // value can be initialized after `Weak` references have already been created. In that case, we
3086        // expect to observe the fully initialized value.
3087        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3088            // SAFETY: pointer is not null, verified in checked_increment
3089            unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3090        } else {
3091            None
3092        }
3093    }
3094
3095    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3096    ///
3097    /// If `self` was created using [`Weak::new`], this will return 0.
3098    #[must_use]
3099    #[stable(feature = "weak_counts", since = "1.41.0")]
3100    pub fn strong_count(&self) -> usize {
3101        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3102    }
3103
3104    /// Gets an approximation of the number of `Weak` pointers pointing to this
3105    /// allocation.
3106    ///
3107    /// If `self` was created using [`Weak::new`], or if there are no remaining
3108    /// strong pointers, this will return 0.
3109    ///
3110    /// # Accuracy
3111    ///
3112    /// Due to implementation details, the returned value can be off by 1 in
3113    /// either direction when other threads are manipulating any `Arc`s or
3114    /// `Weak`s pointing to the same allocation.
3115    #[must_use]
3116    #[stable(feature = "weak_counts", since = "1.41.0")]
3117    pub fn weak_count(&self) -> usize {
3118        if let Some(inner) = self.inner() {
3119            let weak = inner.weak.load(Acquire);
3120            let strong = inner.strong.load(Relaxed);
3121            if strong == 0 {
3122                0
3123            } else {
3124                // Since we observed that there was at least one strong pointer
3125                // after reading the weak count, we know that the implicit weak
3126                // reference (present whenever any strong references are alive)
3127                // was still around when we observed the weak count, and can
3128                // therefore safely subtract it.
3129                weak - 1
3130            }
3131        } else {
3132            0
3133        }
3134    }
3135
3136    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3137    /// (i.e., when this `Weak` was created by `Weak::new`).
3138    #[inline]
3139    fn inner(&self) -> Option<WeakInner<'_>> {
3140        let ptr = self.ptr.as_ptr();
3141        if is_dangling(ptr) {
3142            None
3143        } else {
3144            // We are careful to *not* create a reference covering the "data" field, as
3145            // the field may be mutated concurrently (for example, if the last `Arc`
3146            // is dropped, the data field will be dropped in-place).
3147            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3148        }
3149    }
3150
3151    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3152    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3153    /// this function ignores the metadata of  `dyn Trait` pointers.
3154    ///
3155    /// # Notes
3156    ///
3157    /// Since this compares pointers it means that `Weak::new()` will equal each
3158    /// other, even though they don't point to any allocation.
3159    ///
3160    /// # Examples
3161    ///
3162    /// ```
3163    /// use std::sync::Arc;
3164    ///
3165    /// let first_rc = Arc::new(5);
3166    /// let first = Arc::downgrade(&first_rc);
3167    /// let second = Arc::downgrade(&first_rc);
3168    ///
3169    /// assert!(first.ptr_eq(&second));
3170    ///
3171    /// let third_rc = Arc::new(5);
3172    /// let third = Arc::downgrade(&third_rc);
3173    ///
3174    /// assert!(!first.ptr_eq(&third));
3175    /// ```
3176    ///
3177    /// Comparing `Weak::new`.
3178    ///
3179    /// ```
3180    /// use std::sync::{Arc, Weak};
3181    ///
3182    /// let first = Weak::new();
3183    /// let second = Weak::new();
3184    /// assert!(first.ptr_eq(&second));
3185    ///
3186    /// let third_rc = Arc::new(());
3187    /// let third = Arc::downgrade(&third_rc);
3188    /// assert!(!first.ptr_eq(&third));
3189    /// ```
3190    ///
3191    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3192    #[inline]
3193    #[must_use]
3194    #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3195    pub fn ptr_eq(&self, other: &Self) -> bool {
3196        ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3197    }
3198}
3199
3200#[stable(feature = "arc_weak", since = "1.4.0")]
3201impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3202    /// Makes a clone of the `Weak` pointer that points to the same allocation.
3203    ///
3204    /// # Examples
3205    ///
3206    /// ```
3207    /// use std::sync::{Arc, Weak};
3208    ///
3209    /// let weak_five = Arc::downgrade(&Arc::new(5));
3210    ///
3211    /// let _ = Weak::clone(&weak_five);
3212    /// ```
3213    #[inline]
3214    fn clone(&self) -> Weak<T, A> {
3215        if let Some(inner) = self.inner() {
3216            // See comments in Arc::clone() for why this is relaxed. This can use a
3217            // fetch_add (ignoring the lock) because the weak count is only locked
3218            // where are *no other* weak pointers in existence. (So we can't be
3219            // running this code in that case).
3220            let old_size = inner.weak.fetch_add(1, Relaxed);
3221
3222            // See comments in Arc::clone() for why we do this (for mem::forget).
3223            if old_size > MAX_REFCOUNT {
3224                abort();
3225            }
3226        }
3227
3228        Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3229    }
3230}
3231
3232#[unstable(feature = "ergonomic_clones", issue = "132290")]
3233impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3234
3235#[stable(feature = "downgraded_weak", since = "1.10.0")]
3236impl<T> Default for Weak<T> {
3237    /// Constructs a new `Weak<T>`, without allocating memory.
3238    /// Calling [`upgrade`] on the return value always
3239    /// gives [`None`].
3240    ///
3241    /// [`upgrade`]: Weak::upgrade
3242    ///
3243    /// # Examples
3244    ///
3245    /// ```
3246    /// use std::sync::Weak;
3247    ///
3248    /// let empty: Weak<i64> = Default::default();
3249    /// assert!(empty.upgrade().is_none());
3250    /// ```
3251    fn default() -> Weak<T> {
3252        Weak::new()
3253    }
3254}
3255
3256#[stable(feature = "arc_weak", since = "1.4.0")]
3257unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3258    /// Drops the `Weak` pointer.
3259    ///
3260    /// # Examples
3261    ///
3262    /// ```
3263    /// use std::sync::{Arc, Weak};
3264    ///
3265    /// struct Foo;
3266    ///
3267    /// impl Drop for Foo {
3268    ///     fn drop(&mut self) {
3269    ///         println!("dropped!");
3270    ///     }
3271    /// }
3272    ///
3273    /// let foo = Arc::new(Foo);
3274    /// let weak_foo = Arc::downgrade(&foo);
3275    /// let other_weak_foo = Weak::clone(&weak_foo);
3276    ///
3277    /// drop(weak_foo);   // Doesn't print anything
3278    /// drop(foo);        // Prints "dropped!"
3279    ///
3280    /// assert!(other_weak_foo.upgrade().is_none());
3281    /// ```
3282    fn drop(&mut self) {
3283        // If we find out that we were the last weak pointer, then its time to
3284        // deallocate the data entirely. See the discussion in Arc::drop() about
3285        // the memory orderings
3286        //
3287        // It's not necessary to check for the locked state here, because the
3288        // weak count can only be locked if there was precisely one weak ref,
3289        // meaning that drop could only subsequently run ON that remaining weak
3290        // ref, which can only happen after the lock is released.
3291        let inner = if let Some(inner) = self.inner() { inner } else { return };
3292
3293        if inner.weak.fetch_sub(1, Release) == 1 {
3294            acquire!(inner.weak);
3295
3296            // Make sure we aren't trying to "deallocate" the shared static for empty slices
3297            // used by Default::default.
3298            debug_assert!(
3299                !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3300                "Arc/Weaks backed by a static should never be deallocated. \
3301                Likely decrement_strong_count or from_raw were called too many times.",
3302            );
3303
3304            unsafe {
3305                self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3306            }
3307        }
3308    }
3309}
3310
3311#[stable(feature = "rust1", since = "1.0.0")]
3312trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3313    fn eq(&self, other: &Arc<T, A>) -> bool;
3314    fn ne(&self, other: &Arc<T, A>) -> bool;
3315}
3316
3317#[stable(feature = "rust1", since = "1.0.0")]
3318impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3319    #[inline]
3320    default fn eq(&self, other: &Arc<T, A>) -> bool {
3321        **self == **other
3322    }
3323    #[inline]
3324    default fn ne(&self, other: &Arc<T, A>) -> bool {
3325        **self != **other
3326    }
3327}
3328
3329/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3330/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3331/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3332/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3333/// the same value, than two `&T`s.
3334///
3335/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3336#[stable(feature = "rust1", since = "1.0.0")]
3337impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3338    #[inline]
3339    fn eq(&self, other: &Arc<T, A>) -> bool {
3340        Arc::ptr_eq(self, other) || **self == **other
3341    }
3342
3343    #[inline]
3344    fn ne(&self, other: &Arc<T, A>) -> bool {
3345        !Arc::ptr_eq(self, other) && **self != **other
3346    }
3347}
3348
3349#[stable(feature = "rust1", since = "1.0.0")]
3350impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3351    /// Equality for two `Arc`s.
3352    ///
3353    /// Two `Arc`s are equal if their inner values are equal, even if they are
3354    /// stored in different allocation.
3355    ///
3356    /// If `T` also implements `Eq` (implying reflexivity of equality),
3357    /// two `Arc`s that point to the same allocation are always equal.
3358    ///
3359    /// # Examples
3360    ///
3361    /// ```
3362    /// use std::sync::Arc;
3363    ///
3364    /// let five = Arc::new(5);
3365    ///
3366    /// assert!(five == Arc::new(5));
3367    /// ```
3368    #[inline]
3369    fn eq(&self, other: &Arc<T, A>) -> bool {
3370        ArcEqIdent::eq(self, other)
3371    }
3372
3373    /// Inequality for two `Arc`s.
3374    ///
3375    /// Two `Arc`s are not equal if their inner values are not equal.
3376    ///
3377    /// If `T` also implements `Eq` (implying reflexivity of equality),
3378    /// two `Arc`s that point to the same value are always equal.
3379    ///
3380    /// # Examples
3381    ///
3382    /// ```
3383    /// use std::sync::Arc;
3384    ///
3385    /// let five = Arc::new(5);
3386    ///
3387    /// assert!(five != Arc::new(6));
3388    /// ```
3389    #[inline]
3390    fn ne(&self, other: &Arc<T, A>) -> bool {
3391        ArcEqIdent::ne(self, other)
3392    }
3393}
3394
3395#[stable(feature = "rust1", since = "1.0.0")]
3396impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3397    /// Partial comparison for two `Arc`s.
3398    ///
3399    /// The two are compared by calling `partial_cmp()` on their inner values.
3400    ///
3401    /// # Examples
3402    ///
3403    /// ```
3404    /// use std::sync::Arc;
3405    /// use std::cmp::Ordering;
3406    ///
3407    /// let five = Arc::new(5);
3408    ///
3409    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3410    /// ```
3411    fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3412        (**self).partial_cmp(&**other)
3413    }
3414
3415    /// Less-than comparison for two `Arc`s.
3416    ///
3417    /// The two are compared by calling `<` on their inner values.
3418    ///
3419    /// # Examples
3420    ///
3421    /// ```
3422    /// use std::sync::Arc;
3423    ///
3424    /// let five = Arc::new(5);
3425    ///
3426    /// assert!(five < Arc::new(6));
3427    /// ```
3428    fn lt(&self, other: &Arc<T, A>) -> bool {
3429        *(*self) < *(*other)
3430    }
3431
3432    /// 'Less than or equal to' comparison for two `Arc`s.
3433    ///
3434    /// The two are compared by calling `<=` on their inner values.
3435    ///
3436    /// # Examples
3437    ///
3438    /// ```
3439    /// use std::sync::Arc;
3440    ///
3441    /// let five = Arc::new(5);
3442    ///
3443    /// assert!(five <= Arc::new(5));
3444    /// ```
3445    fn le(&self, other: &Arc<T, A>) -> bool {
3446        *(*self) <= *(*other)
3447    }
3448
3449    /// Greater-than comparison for two `Arc`s.
3450    ///
3451    /// The two are compared by calling `>` on their inner values.
3452    ///
3453    /// # Examples
3454    ///
3455    /// ```
3456    /// use std::sync::Arc;
3457    ///
3458    /// let five = Arc::new(5);
3459    ///
3460    /// assert!(five > Arc::new(4));
3461    /// ```
3462    fn gt(&self, other: &Arc<T, A>) -> bool {
3463        *(*self) > *(*other)
3464    }
3465
3466    /// 'Greater than or equal to' comparison for two `Arc`s.
3467    ///
3468    /// The two are compared by calling `>=` on their inner values.
3469    ///
3470    /// # Examples
3471    ///
3472    /// ```
3473    /// use std::sync::Arc;
3474    ///
3475    /// let five = Arc::new(5);
3476    ///
3477    /// assert!(five >= Arc::new(5));
3478    /// ```
3479    fn ge(&self, other: &Arc<T, A>) -> bool {
3480        *(*self) >= *(*other)
3481    }
3482}
3483#[stable(feature = "rust1", since = "1.0.0")]
3484impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3485    /// Comparison for two `Arc`s.
3486    ///
3487    /// The two are compared by calling `cmp()` on their inner values.
3488    ///
3489    /// # Examples
3490    ///
3491    /// ```
3492    /// use std::sync::Arc;
3493    /// use std::cmp::Ordering;
3494    ///
3495    /// let five = Arc::new(5);
3496    ///
3497    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3498    /// ```
3499    fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3500        (**self).cmp(&**other)
3501    }
3502}
3503#[stable(feature = "rust1", since = "1.0.0")]
3504impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3505
3506#[stable(feature = "rust1", since = "1.0.0")]
3507impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3508    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3509        fmt::Display::fmt(&**self, f)
3510    }
3511}
3512
3513#[stable(feature = "rust1", since = "1.0.0")]
3514impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3515    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3516        fmt::Debug::fmt(&**self, f)
3517    }
3518}
3519
3520#[stable(feature = "rust1", since = "1.0.0")]
3521impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3522    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3523        fmt::Pointer::fmt(&(&raw const **self), f)
3524    }
3525}
3526
3527#[cfg(not(no_global_oom_handling))]
3528#[stable(feature = "rust1", since = "1.0.0")]
3529impl<T: Default> Default for Arc<T> {
3530    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3531    ///
3532    /// # Examples
3533    ///
3534    /// ```
3535    /// use std::sync::Arc;
3536    ///
3537    /// let x: Arc<i32> = Default::default();
3538    /// assert_eq!(*x, 0);
3539    /// ```
3540    fn default() -> Arc<T> {
3541        unsafe {
3542            Self::from_inner(
3543                Box::leak(Box::write(
3544                    Box::new_uninit(),
3545                    ArcInner {
3546                        strong: atomic::AtomicUsize::new(1),
3547                        weak: atomic::AtomicUsize::new(1),
3548                        data: T::default(),
3549                    },
3550                ))
3551                .into(),
3552            )
3553        }
3554    }
3555}
3556
3557/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3558/// returned by `Default::default`.
3559///
3560/// Layout notes:
3561/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3562/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3563/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3564#[repr(C, align(16))]
3565struct SliceArcInnerForStatic {
3566    inner: ArcInner<[u8; 1]>,
3567}
3568#[cfg(not(no_global_oom_handling))]
3569const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3570
3571static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3572    inner: ArcInner {
3573        strong: atomic::AtomicUsize::new(1),
3574        weak: atomic::AtomicUsize::new(1),
3575        data: [0],
3576    },
3577};
3578
3579#[cfg(not(no_global_oom_handling))]
3580#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3581impl Default for Arc<str> {
3582    /// Creates an empty str inside an Arc
3583    ///
3584    /// This may or may not share an allocation with other Arcs.
3585    #[inline]
3586    fn default() -> Self {
3587        let arc: Arc<[u8]> = Default::default();
3588        debug_assert!(core::str::from_utf8(&*arc).is_ok());
3589        let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3590        unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3591    }
3592}
3593
3594#[cfg(not(no_global_oom_handling))]
3595#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3596impl Default for Arc<core::ffi::CStr> {
3597    /// Creates an empty CStr inside an Arc
3598    ///
3599    /// This may or may not share an allocation with other Arcs.
3600    #[inline]
3601    fn default() -> Self {
3602        use core::ffi::CStr;
3603        let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3604        let inner: NonNull<ArcInner<CStr>> =
3605            NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3606        // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3607        let this: mem::ManuallyDrop<Arc<CStr>> =
3608            unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3609        (*this).clone()
3610    }
3611}
3612
3613#[cfg(not(no_global_oom_handling))]
3614#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3615impl<T> Default for Arc<[T]> {
3616    /// Creates an empty `[T]` inside an Arc
3617    ///
3618    /// This may or may not share an allocation with other Arcs.
3619    #[inline]
3620    fn default() -> Self {
3621        if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3622            // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3623            // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3624            // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3625            // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3626            let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3627            let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3628            // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3629            let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3630                unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3631            return (*this).clone();
3632        }
3633
3634        // If T's alignment is too large for the static, make a new unique allocation.
3635        let arr: [T; 0] = [];
3636        Arc::from(arr)
3637    }
3638}
3639
3640#[cfg(not(no_global_oom_handling))]
3641#[stable(feature = "pin_default_impls", since = "1.91.0")]
3642impl<T> Default for Pin<Arc<T>>
3643where
3644    T: ?Sized,
3645    Arc<T>: Default,
3646{
3647    #[inline]
3648    fn default() -> Self {
3649        unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3650    }
3651}
3652
3653#[stable(feature = "rust1", since = "1.0.0")]
3654impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3655    fn hash<H: Hasher>(&self, state: &mut H) {
3656        (**self).hash(state)
3657    }
3658}
3659
3660#[cfg(not(no_global_oom_handling))]
3661#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3662impl<T> From<T> for Arc<T> {
3663    /// Converts a `T` into an `Arc<T>`
3664    ///
3665    /// The conversion moves the value into a
3666    /// newly allocated `Arc`. It is equivalent to
3667    /// calling `Arc::new(t)`.
3668    ///
3669    /// # Example
3670    /// ```rust
3671    /// # use std::sync::Arc;
3672    /// let x = 5;
3673    /// let arc = Arc::new(5);
3674    ///
3675    /// assert_eq!(Arc::from(x), arc);
3676    /// ```
3677    fn from(t: T) -> Self {
3678        Arc::new(t)
3679    }
3680}
3681
3682#[cfg(not(no_global_oom_handling))]
3683#[stable(feature = "shared_from_array", since = "1.74.0")]
3684impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3685    /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3686    ///
3687    /// The conversion moves the array into a newly allocated `Arc`.
3688    ///
3689    /// # Example
3690    ///
3691    /// ```
3692    /// # use std::sync::Arc;
3693    /// let original: [i32; 3] = [1, 2, 3];
3694    /// let shared: Arc<[i32]> = Arc::from(original);
3695    /// assert_eq!(&[1, 2, 3], &shared[..]);
3696    /// ```
3697    #[inline]
3698    fn from(v: [T; N]) -> Arc<[T]> {
3699        Arc::<[T; N]>::from(v)
3700    }
3701}
3702
3703#[cfg(not(no_global_oom_handling))]
3704#[stable(feature = "shared_from_slice", since = "1.21.0")]
3705impl<T: Clone> From<&[T]> for Arc<[T]> {
3706    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3707    ///
3708    /// # Example
3709    ///
3710    /// ```
3711    /// # use std::sync::Arc;
3712    /// let original: &[i32] = &[1, 2, 3];
3713    /// let shared: Arc<[i32]> = Arc::from(original);
3714    /// assert_eq!(&[1, 2, 3], &shared[..]);
3715    /// ```
3716    #[inline]
3717    fn from(v: &[T]) -> Arc<[T]> {
3718        <Self as ArcFromSlice<T>>::from_slice(v)
3719    }
3720}
3721
3722#[cfg(not(no_global_oom_handling))]
3723#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3724impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3725    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3726    ///
3727    /// # Example
3728    ///
3729    /// ```
3730    /// # use std::sync::Arc;
3731    /// let mut original = [1, 2, 3];
3732    /// let original: &mut [i32] = &mut original;
3733    /// let shared: Arc<[i32]> = Arc::from(original);
3734    /// assert_eq!(&[1, 2, 3], &shared[..]);
3735    /// ```
3736    #[inline]
3737    fn from(v: &mut [T]) -> Arc<[T]> {
3738        Arc::from(&*v)
3739    }
3740}
3741
3742#[cfg(not(no_global_oom_handling))]
3743#[stable(feature = "shared_from_slice", since = "1.21.0")]
3744impl From<&str> for Arc<str> {
3745    /// Allocates a reference-counted `str` and copies `v` into it.
3746    ///
3747    /// # Example
3748    ///
3749    /// ```
3750    /// # use std::sync::Arc;
3751    /// let shared: Arc<str> = Arc::from("eggplant");
3752    /// assert_eq!("eggplant", &shared[..]);
3753    /// ```
3754    #[inline]
3755    fn from(v: &str) -> Arc<str> {
3756        let arc = Arc::<[u8]>::from(v.as_bytes());
3757        unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3758    }
3759}
3760
3761#[cfg(not(no_global_oom_handling))]
3762#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3763impl From<&mut str> for Arc<str> {
3764    /// Allocates a reference-counted `str` and copies `v` into it.
3765    ///
3766    /// # Example
3767    ///
3768    /// ```
3769    /// # use std::sync::Arc;
3770    /// let mut original = String::from("eggplant");
3771    /// let original: &mut str = &mut original;
3772    /// let shared: Arc<str> = Arc::from(original);
3773    /// assert_eq!("eggplant", &shared[..]);
3774    /// ```
3775    #[inline]
3776    fn from(v: &mut str) -> Arc<str> {
3777        Arc::from(&*v)
3778    }
3779}
3780
3781#[cfg(not(no_global_oom_handling))]
3782#[stable(feature = "shared_from_slice", since = "1.21.0")]
3783impl From<String> for Arc<str> {
3784    /// Allocates a reference-counted `str` and copies `v` into it.
3785    ///
3786    /// # Example
3787    ///
3788    /// ```
3789    /// # use std::sync::Arc;
3790    /// let unique: String = "eggplant".to_owned();
3791    /// let shared: Arc<str> = Arc::from(unique);
3792    /// assert_eq!("eggplant", &shared[..]);
3793    /// ```
3794    #[inline]
3795    fn from(v: String) -> Arc<str> {
3796        Arc::from(&v[..])
3797    }
3798}
3799
3800#[cfg(not(no_global_oom_handling))]
3801#[stable(feature = "shared_from_slice", since = "1.21.0")]
3802impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3803    /// Move a boxed object to a new, reference-counted allocation.
3804    ///
3805    /// # Example
3806    ///
3807    /// ```
3808    /// # use std::sync::Arc;
3809    /// let unique: Box<str> = Box::from("eggplant");
3810    /// let shared: Arc<str> = Arc::from(unique);
3811    /// assert_eq!("eggplant", &shared[..]);
3812    /// ```
3813    #[inline]
3814    fn from(v: Box<T, A>) -> Arc<T, A> {
3815        Arc::from_box_in(v)
3816    }
3817}
3818
3819#[cfg(not(no_global_oom_handling))]
3820#[stable(feature = "shared_from_slice", since = "1.21.0")]
3821impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3822    /// Allocates a reference-counted slice and moves `v`'s items into it.
3823    ///
3824    /// # Example
3825    ///
3826    /// ```
3827    /// # use std::sync::Arc;
3828    /// let unique: Vec<i32> = vec![1, 2, 3];
3829    /// let shared: Arc<[i32]> = Arc::from(unique);
3830    /// assert_eq!(&[1, 2, 3], &shared[..]);
3831    /// ```
3832    #[inline]
3833    fn from(v: Vec<T, A>) -> Arc<[T], A> {
3834        unsafe {
3835            let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3836
3837            let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3838            ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3839
3840            // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3841            // without dropping its contents or the allocator
3842            let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3843
3844            Self::from_ptr_in(rc_ptr, alloc)
3845        }
3846    }
3847}
3848
3849#[stable(feature = "shared_from_cow", since = "1.45.0")]
3850impl<'a, B> From<Cow<'a, B>> for Arc<B>
3851where
3852    B: ToOwned + ?Sized,
3853    Arc<B>: From<&'a B> + From<B::Owned>,
3854{
3855    /// Creates an atomically reference-counted pointer from a clone-on-write
3856    /// pointer by copying its content.
3857    ///
3858    /// # Example
3859    ///
3860    /// ```rust
3861    /// # use std::sync::Arc;
3862    /// # use std::borrow::Cow;
3863    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3864    /// let shared: Arc<str> = Arc::from(cow);
3865    /// assert_eq!("eggplant", &shared[..]);
3866    /// ```
3867    #[inline]
3868    fn from(cow: Cow<'a, B>) -> Arc<B> {
3869        match cow {
3870            Cow::Borrowed(s) => Arc::from(s),
3871            Cow::Owned(s) => Arc::from(s),
3872        }
3873    }
3874}
3875
3876#[stable(feature = "shared_from_str", since = "1.62.0")]
3877impl From<Arc<str>> for Arc<[u8]> {
3878    /// Converts an atomically reference-counted string slice into a byte slice.
3879    ///
3880    /// # Example
3881    ///
3882    /// ```
3883    /// # use std::sync::Arc;
3884    /// let string: Arc<str> = Arc::from("eggplant");
3885    /// let bytes: Arc<[u8]> = Arc::from(string);
3886    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3887    /// ```
3888    #[inline]
3889    fn from(rc: Arc<str>) -> Self {
3890        // SAFETY: `str` has the same layout as `[u8]`.
3891        unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3892    }
3893}
3894
3895#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3896impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3897    type Error = Arc<[T], A>;
3898
3899    fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3900        if boxed_slice.len() == N {
3901            let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3902            Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
3903        } else {
3904            Err(boxed_slice)
3905        }
3906    }
3907}
3908
3909#[cfg(not(no_global_oom_handling))]
3910#[stable(feature = "shared_from_iter", since = "1.37.0")]
3911impl<T> FromIterator<T> for Arc<[T]> {
3912    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3913    ///
3914    /// # Performance characteristics
3915    ///
3916    /// ## The general case
3917    ///
3918    /// In the general case, collecting into `Arc<[T]>` is done by first
3919    /// collecting into a `Vec<T>`. That is, when writing the following:
3920    ///
3921    /// ```rust
3922    /// # use std::sync::Arc;
3923    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3924    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3925    /// ```
3926    ///
3927    /// this behaves as if we wrote:
3928    ///
3929    /// ```rust
3930    /// # use std::sync::Arc;
3931    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3932    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
3933    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
3934    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3935    /// ```
3936    ///
3937    /// This will allocate as many times as needed for constructing the `Vec<T>`
3938    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3939    ///
3940    /// ## Iterators of known length
3941    ///
3942    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3943    /// a single allocation will be made for the `Arc<[T]>`. For example:
3944    ///
3945    /// ```rust
3946    /// # use std::sync::Arc;
3947    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3948    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3949    /// ```
3950    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3951        ToArcSlice::to_arc_slice(iter.into_iter())
3952    }
3953}
3954
3955#[cfg(not(no_global_oom_handling))]
3956/// Specialization trait used for collecting into `Arc<[T]>`.
3957trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3958    fn to_arc_slice(self) -> Arc<[T]>;
3959}
3960
3961#[cfg(not(no_global_oom_handling))]
3962impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3963    default fn to_arc_slice(self) -> Arc<[T]> {
3964        self.collect::<Vec<T>>().into()
3965    }
3966}
3967
3968#[cfg(not(no_global_oom_handling))]
3969impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3970    fn to_arc_slice(self) -> Arc<[T]> {
3971        // This is the case for a `TrustedLen` iterator.
3972        let (low, high) = self.size_hint();
3973        if let Some(high) = high {
3974            debug_assert_eq!(
3975                low,
3976                high,
3977                "TrustedLen iterator's size hint is not exact: {:?}",
3978                (low, high)
3979            );
3980
3981            unsafe {
3982                // SAFETY: We need to ensure that the iterator has an exact length and we have.
3983                Arc::from_iter_exact(self, low)
3984            }
3985        } else {
3986            // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3987            // length exceeding `usize::MAX`.
3988            // The default implementation would collect into a vec which would panic.
3989            // Thus we panic here immediately without invoking `Vec` code.
3990            panic!("capacity overflow");
3991        }
3992    }
3993}
3994
3995#[stable(feature = "rust1", since = "1.0.0")]
3996impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
3997    fn borrow(&self) -> &T {
3998        &**self
3999    }
4000}
4001
4002#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4003impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4004    fn as_ref(&self) -> &T {
4005        &**self
4006    }
4007}
4008
4009#[stable(feature = "pin", since = "1.33.0")]
4010impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4011
4012/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4013///
4014/// # Safety
4015///
4016/// The pointer must point to (and have valid metadata for) a previously
4017/// valid instance of T, but the T is allowed to be dropped.
4018unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4019    // Align the unsized value to the end of the ArcInner.
4020    // Because ArcInner is repr(C), it will always be the last field in memory.
4021    // SAFETY: since the only unsized types possible are slices, trait objects,
4022    // and extern types, the input safety requirement is currently enough to
4023    // satisfy the requirements of align_of_val_raw; this is an implementation
4024    // detail of the language that must not be relied upon outside of std.
4025    unsafe { data_offset_align(align_of_val_raw(ptr)) }
4026}
4027
4028#[inline]
4029fn data_offset_align(align: usize) -> usize {
4030    let layout = Layout::new::<ArcInner<()>>();
4031    layout.size() + layout.padding_needed_for(align)
4032}
4033
4034/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4035/// but will deallocate it (without dropping the value) when dropped.
4036///
4037/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4038#[cfg(not(no_global_oom_handling))]
4039struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4040    ptr: NonNull<ArcInner<T>>,
4041    layout_for_value: Layout,
4042    alloc: Option<A>,
4043}
4044
4045#[cfg(not(no_global_oom_handling))]
4046impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4047    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4048    fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4049        let layout = Layout::for_value(for_value);
4050        let ptr = unsafe {
4051            Arc::allocate_for_layout(
4052                layout,
4053                |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4054                |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4055            )
4056        };
4057        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4058    }
4059
4060    /// Returns the pointer to be written into to initialize the [`Arc`].
4061    fn data_ptr(&mut self) -> *mut T {
4062        let offset = data_offset_align(self.layout_for_value.align());
4063        unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4064    }
4065
4066    /// Upgrade this into a normal [`Arc`].
4067    ///
4068    /// # Safety
4069    ///
4070    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4071    unsafe fn into_arc(self) -> Arc<T, A> {
4072        let mut this = ManuallyDrop::new(self);
4073        let ptr = this.ptr.as_ptr();
4074        let alloc = this.alloc.take().unwrap();
4075
4076        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4077        // for having initialized the data.
4078        unsafe { Arc::from_ptr_in(ptr, alloc) }
4079    }
4080}
4081
4082#[cfg(not(no_global_oom_handling))]
4083impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4084    fn drop(&mut self) {
4085        // SAFETY:
4086        // * new() produced a pointer safe to deallocate.
4087        // * We own the pointer unless into_arc() was called, which forgets us.
4088        unsafe {
4089            self.alloc.take().unwrap().deallocate(
4090                self.ptr.cast(),
4091                arcinner_layout_for_value_layout(self.layout_for_value),
4092            );
4093        }
4094    }
4095}
4096
4097#[stable(feature = "arc_error", since = "1.52.0")]
4098impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4099    #[allow(deprecated)]
4100    fn cause(&self) -> Option<&dyn core::error::Error> {
4101        core::error::Error::cause(&**self)
4102    }
4103
4104    fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4105        core::error::Error::source(&**self)
4106    }
4107
4108    fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4109        core::error::Error::provide(&**self, req);
4110    }
4111}
4112
4113/// A uniquely owned [`Arc`].
4114///
4115/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4116/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4117/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4118///
4119/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4120/// use case is to have an object be mutable during its initialization phase but then have it become
4121/// immutable and converted to a normal `Arc`.
4122///
4123/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4124///
4125/// ```
4126/// #![feature(unique_rc_arc)]
4127/// use std::sync::{Arc, Weak, UniqueArc};
4128///
4129/// struct Gadget {
4130///     me: Weak<Gadget>,
4131/// }
4132///
4133/// fn create_gadget() -> Option<Arc<Gadget>> {
4134///     let mut rc = UniqueArc::new(Gadget {
4135///         me: Weak::new(),
4136///     });
4137///     rc.me = UniqueArc::downgrade(&rc);
4138///     Some(UniqueArc::into_arc(rc))
4139/// }
4140///
4141/// create_gadget().unwrap();
4142/// ```
4143///
4144/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4145/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4146/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4147/// including fallible or async constructors.
4148#[unstable(feature = "unique_rc_arc", issue = "112566")]
4149pub struct UniqueArc<
4150    T: ?Sized,
4151    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4152> {
4153    ptr: NonNull<ArcInner<T>>,
4154    // Define the ownership of `ArcInner<T>` for drop-check
4155    _marker: PhantomData<ArcInner<T>>,
4156    // Invariance is necessary for soundness: once other `Weak`
4157    // references exist, we already have a form of shared mutability!
4158    _marker2: PhantomData<*mut T>,
4159    alloc: A,
4160}
4161
4162#[unstable(feature = "unique_rc_arc", issue = "112566")]
4163unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4164
4165#[unstable(feature = "unique_rc_arc", issue = "112566")]
4166unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4167
4168#[unstable(feature = "unique_rc_arc", issue = "112566")]
4169// #[unstable(feature = "coerce_unsized", issue = "18598")]
4170impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4171    for UniqueArc<T, A>
4172{
4173}
4174
4175//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4176#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4177impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4178
4179#[unstable(feature = "unique_rc_arc", issue = "112566")]
4180impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4181    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4182        fmt::Display::fmt(&**self, f)
4183    }
4184}
4185
4186#[unstable(feature = "unique_rc_arc", issue = "112566")]
4187impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4189        fmt::Debug::fmt(&**self, f)
4190    }
4191}
4192
4193#[unstable(feature = "unique_rc_arc", issue = "112566")]
4194impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4195    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4196        fmt::Pointer::fmt(&(&raw const **self), f)
4197    }
4198}
4199
4200#[unstable(feature = "unique_rc_arc", issue = "112566")]
4201impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4202    fn borrow(&self) -> &T {
4203        &**self
4204    }
4205}
4206
4207#[unstable(feature = "unique_rc_arc", issue = "112566")]
4208impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4209    fn borrow_mut(&mut self) -> &mut T {
4210        &mut **self
4211    }
4212}
4213
4214#[unstable(feature = "unique_rc_arc", issue = "112566")]
4215impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4216    fn as_ref(&self) -> &T {
4217        &**self
4218    }
4219}
4220
4221#[unstable(feature = "unique_rc_arc", issue = "112566")]
4222impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4223    fn as_mut(&mut self) -> &mut T {
4224        &mut **self
4225    }
4226}
4227
4228#[unstable(feature = "unique_rc_arc", issue = "112566")]
4229impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4230
4231#[unstable(feature = "unique_rc_arc", issue = "112566")]
4232impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4233    /// Equality for two `UniqueArc`s.
4234    ///
4235    /// Two `UniqueArc`s are equal if their inner values are equal.
4236    ///
4237    /// # Examples
4238    ///
4239    /// ```
4240    /// #![feature(unique_rc_arc)]
4241    /// use std::sync::UniqueArc;
4242    ///
4243    /// let five = UniqueArc::new(5);
4244    ///
4245    /// assert!(five == UniqueArc::new(5));
4246    /// ```
4247    #[inline]
4248    fn eq(&self, other: &Self) -> bool {
4249        PartialEq::eq(&**self, &**other)
4250    }
4251}
4252
4253#[unstable(feature = "unique_rc_arc", issue = "112566")]
4254impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4255    /// Partial comparison for two `UniqueArc`s.
4256    ///
4257    /// The two are compared by calling `partial_cmp()` on their inner values.
4258    ///
4259    /// # Examples
4260    ///
4261    /// ```
4262    /// #![feature(unique_rc_arc)]
4263    /// use std::sync::UniqueArc;
4264    /// use std::cmp::Ordering;
4265    ///
4266    /// let five = UniqueArc::new(5);
4267    ///
4268    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4269    /// ```
4270    #[inline(always)]
4271    fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4272        (**self).partial_cmp(&**other)
4273    }
4274
4275    /// Less-than comparison for two `UniqueArc`s.
4276    ///
4277    /// The two are compared by calling `<` on their inner values.
4278    ///
4279    /// # Examples
4280    ///
4281    /// ```
4282    /// #![feature(unique_rc_arc)]
4283    /// use std::sync::UniqueArc;
4284    ///
4285    /// let five = UniqueArc::new(5);
4286    ///
4287    /// assert!(five < UniqueArc::new(6));
4288    /// ```
4289    #[inline(always)]
4290    fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4291        **self < **other
4292    }
4293
4294    /// 'Less than or equal to' comparison for two `UniqueArc`s.
4295    ///
4296    /// The two are compared by calling `<=` on their inner values.
4297    ///
4298    /// # Examples
4299    ///
4300    /// ```
4301    /// #![feature(unique_rc_arc)]
4302    /// use std::sync::UniqueArc;
4303    ///
4304    /// let five = UniqueArc::new(5);
4305    ///
4306    /// assert!(five <= UniqueArc::new(5));
4307    /// ```
4308    #[inline(always)]
4309    fn le(&self, other: &UniqueArc<T, A>) -> bool {
4310        **self <= **other
4311    }
4312
4313    /// Greater-than comparison for two `UniqueArc`s.
4314    ///
4315    /// The two are compared by calling `>` on their inner values.
4316    ///
4317    /// # Examples
4318    ///
4319    /// ```
4320    /// #![feature(unique_rc_arc)]
4321    /// use std::sync::UniqueArc;
4322    ///
4323    /// let five = UniqueArc::new(5);
4324    ///
4325    /// assert!(five > UniqueArc::new(4));
4326    /// ```
4327    #[inline(always)]
4328    fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4329        **self > **other
4330    }
4331
4332    /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4333    ///
4334    /// The two are compared by calling `>=` on their inner values.
4335    ///
4336    /// # Examples
4337    ///
4338    /// ```
4339    /// #![feature(unique_rc_arc)]
4340    /// use std::sync::UniqueArc;
4341    ///
4342    /// let five = UniqueArc::new(5);
4343    ///
4344    /// assert!(five >= UniqueArc::new(5));
4345    /// ```
4346    #[inline(always)]
4347    fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4348        **self >= **other
4349    }
4350}
4351
4352#[unstable(feature = "unique_rc_arc", issue = "112566")]
4353impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4354    /// Comparison for two `UniqueArc`s.
4355    ///
4356    /// The two are compared by calling `cmp()` on their inner values.
4357    ///
4358    /// # Examples
4359    ///
4360    /// ```
4361    /// #![feature(unique_rc_arc)]
4362    /// use std::sync::UniqueArc;
4363    /// use std::cmp::Ordering;
4364    ///
4365    /// let five = UniqueArc::new(5);
4366    ///
4367    /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4368    /// ```
4369    #[inline]
4370    fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4371        (**self).cmp(&**other)
4372    }
4373}
4374
4375#[unstable(feature = "unique_rc_arc", issue = "112566")]
4376impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4377
4378#[unstable(feature = "unique_rc_arc", issue = "112566")]
4379impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4380    fn hash<H: Hasher>(&self, state: &mut H) {
4381        (**self).hash(state);
4382    }
4383}
4384
4385impl<T> UniqueArc<T, Global> {
4386    /// Creates a new `UniqueArc`.
4387    ///
4388    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4389    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4390    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4391    /// point to the new [`Arc`].
4392    #[cfg(not(no_global_oom_handling))]
4393    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4394    #[must_use]
4395    pub fn new(value: T) -> Self {
4396        Self::new_in(value, Global)
4397    }
4398}
4399
4400impl<T, A: Allocator> UniqueArc<T, A> {
4401    /// Creates a new `UniqueArc` in the provided allocator.
4402    ///
4403    /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4404    /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4405    /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4406    /// point to the new [`Arc`].
4407    #[cfg(not(no_global_oom_handling))]
4408    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4409    #[must_use]
4410    // #[unstable(feature = "allocator_api", issue = "32838")]
4411    pub fn new_in(data: T, alloc: A) -> Self {
4412        let (ptr, alloc) = Box::into_unique(Box::new_in(
4413            ArcInner {
4414                strong: atomic::AtomicUsize::new(0),
4415                // keep one weak reference so if all the weak pointers that are created are dropped
4416                // the UniqueArc still stays valid.
4417                weak: atomic::AtomicUsize::new(1),
4418                data,
4419            },
4420            alloc,
4421        ));
4422        Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4423    }
4424}
4425
4426impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4427    /// Converts the `UniqueArc` into a regular [`Arc`].
4428    ///
4429    /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4430    /// is passed to `into_arc`.
4431    ///
4432    /// Any weak references created before this method is called can now be upgraded to strong
4433    /// references.
4434    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4435    #[must_use]
4436    pub fn into_arc(this: Self) -> Arc<T, A> {
4437        let this = ManuallyDrop::new(this);
4438
4439        // Move the allocator out.
4440        // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4441        // a `ManuallyDrop`.
4442        let alloc: A = unsafe { ptr::read(&this.alloc) };
4443
4444        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4445        unsafe {
4446            // Convert our weak reference into a strong reference
4447            (*this.ptr.as_ptr()).strong.store(1, Release);
4448            Arc::from_inner_in(this.ptr, alloc)
4449        }
4450    }
4451}
4452
4453impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4454    /// Creates a new weak reference to the `UniqueArc`.
4455    ///
4456    /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4457    /// to a [`Arc`] using [`UniqueArc::into_arc`].
4458    #[unstable(feature = "unique_rc_arc", issue = "112566")]
4459    #[must_use]
4460    pub fn downgrade(this: &Self) -> Weak<T, A> {
4461        // Using a relaxed ordering is alright here, as knowledge of the
4462        // original reference prevents other threads from erroneously deleting
4463        // the object or converting the object to a normal `Arc<T, A>`.
4464        //
4465        // Note that we don't need to test if the weak counter is locked because there
4466        // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4467        // the weak counter.
4468        //
4469        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4470        let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4471
4472        // See comments in Arc::clone() for why we do this (for mem::forget).
4473        if old_size > MAX_REFCOUNT {
4474            abort();
4475        }
4476
4477        Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4478    }
4479}
4480
4481#[unstable(feature = "unique_rc_arc", issue = "112566")]
4482impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4483    type Target = T;
4484
4485    fn deref(&self) -> &T {
4486        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4487        unsafe { &self.ptr.as_ref().data }
4488    }
4489}
4490
4491// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4492#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4493unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4494
4495#[unstable(feature = "unique_rc_arc", issue = "112566")]
4496impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4497    fn deref_mut(&mut self) -> &mut T {
4498        // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4499        // have unique ownership and therefore it's safe to make a mutable reference because
4500        // `UniqueArc` owns the only strong reference to itself.
4501        // We also need to be careful to only create a mutable reference to the `data` field,
4502        // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4503        // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4504        unsafe { &mut (*self.ptr.as_ptr()).data }
4505    }
4506}
4507
4508#[unstable(feature = "unique_rc_arc", issue = "112566")]
4509// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4510unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4511
4512#[unstable(feature = "unique_rc_arc", issue = "112566")]
4513unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4514    fn drop(&mut self) {
4515        // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4516        // SAFETY: This pointer was allocated at creation time so we know it is valid.
4517        let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4518
4519        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4520    }
4521}