alloc/sync.rs
1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12use core::cell::CloneFromCell;
13#[cfg(not(no_global_oom_handling))]
14use core::clone::CloneToUninit;
15#[cfg(not(no_global_oom_handling))]
16use core::clone::TrivialClone;
17use core::clone::UseCloned;
18use core::cmp::Ordering;
19use core::hash::{Hash, Hasher};
20use core::intrinsics::abort;
21#[cfg(not(no_global_oom_handling))]
22use core::iter;
23use core::marker::{PhantomData, Unsize};
24use core::mem::{self, ManuallyDrop, align_of_val_raw};
25use core::num::NonZeroUsize;
26use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
27#[cfg(not(no_global_oom_handling))]
28use core::ops::{Residual, Try};
29use core::panic::{RefUnwindSafe, UnwindSafe};
30use core::pin::{Pin, PinCoerceUnsized};
31use core::ptr::{self, NonNull};
32#[cfg(not(no_global_oom_handling))]
33use core::slice::from_raw_parts_mut;
34use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
35use core::sync::atomic::{self, Atomic};
36use core::{borrow, fmt, hint};
37
38#[cfg(not(no_global_oom_handling))]
39use crate::alloc::handle_alloc_error;
40use crate::alloc::{AllocError, Allocator, Global, Layout};
41use crate::borrow::{Cow, ToOwned};
42use crate::boxed::Box;
43use crate::rc::is_dangling;
44#[cfg(not(no_global_oom_handling))]
45use crate::string::String;
46#[cfg(not(no_global_oom_handling))]
47use crate::vec::Vec;
48
49/// A soft limit on the amount of references that may be made to an `Arc`.
50///
51/// Going above this limit will abort your program (although not
52/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
53/// Trying to go above it might call a `panic` (if not actually going above it).
54///
55/// This is a global invariant, and also applies when using a compare-exchange loop.
56///
57/// See comment in `Arc::clone`.
58const MAX_REFCOUNT: usize = (isize::MAX) as usize;
59
60/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
61const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
62
63#[cfg(not(sanitize = "thread"))]
64macro_rules! acquire {
65 ($x:expr) => {
66 atomic::fence(Acquire)
67 };
68}
69
70// ThreadSanitizer does not support memory fences. To avoid false positive
71// reports in Arc / Weak implementation use atomic loads for synchronization
72// instead.
73#[cfg(sanitize = "thread")]
74macro_rules! acquire {
75 ($x:expr) => {
76 $x.load(Acquire)
77 };
78}
79
80/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
81/// Reference Counted'.
82///
83/// The type `Arc<T>` provides shared ownership of a value of type `T`,
84/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
85/// a new `Arc` instance, which points to the same allocation on the heap as the
86/// source `Arc`, while increasing a reference count. When the last `Arc`
87/// pointer to a given allocation is destroyed, the value stored in that allocation (often
88/// referred to as "inner value") is also dropped.
89///
90/// Shared references in Rust disallow mutation by default, and `Arc` is no
91/// exception: you cannot generally obtain a mutable reference to something
92/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
93///
94/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
95/// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
96///
97/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
98/// without requiring interior mutability. This approach clones the data only when
99/// needed (when there are multiple references) and can be more efficient when mutations
100/// are infrequent.
101///
102/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
103/// which provides direct mutable access to the inner value without any cloning.
104///
105/// ```
106/// use std::sync::Arc;
107///
108/// let mut data = Arc::new(vec![1, 2, 3]);
109///
110/// // This will clone the vector only if there are other references to it
111/// Arc::make_mut(&mut data).push(4);
112///
113/// assert_eq!(*data, vec![1, 2, 3, 4]);
114/// ```
115///
116/// **Note**: This type is only available on platforms that support atomic
117/// loads and stores of pointers, which includes all platforms that support
118/// the `std` crate but not all those which only support [`alloc`](crate).
119/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
120///
121/// ## Thread Safety
122///
123/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
124/// counting. This means that it is thread-safe. The disadvantage is that
125/// atomic operations are more expensive than ordinary memory accesses. If you
126/// are not sharing reference-counted allocations between threads, consider using
127/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
128/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
129/// However, a library might choose `Arc<T>` in order to give library consumers
130/// more flexibility.
131///
132/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
133/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
134/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
135/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
136/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
137/// data, but it doesn't add thread safety to its data. Consider
138/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
139/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
140/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
141/// non-atomic operations.
142///
143/// In the end, this means that you may need to pair `Arc<T>` with some sort of
144/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
145///
146/// ## Breaking cycles with `Weak`
147///
148/// The [`downgrade`][downgrade] method can be used to create a non-owning
149/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
150/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
151/// already been dropped. In other words, `Weak` pointers do not keep the value
152/// inside the allocation alive; however, they *do* keep the allocation
153/// (the backing store for the value) alive.
154///
155/// A cycle between `Arc` pointers will never be deallocated. For this reason,
156/// [`Weak`] is used to break cycles. For example, a tree could have
157/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
158/// pointers from children back to their parents.
159///
160/// # Cloning references
161///
162/// Creating a new reference from an existing reference-counted pointer is done using the
163/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
164///
165/// ```
166/// use std::sync::Arc;
167/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
168/// // The two syntaxes below are equivalent.
169/// let a = foo.clone();
170/// let b = Arc::clone(&foo);
171/// // a, b, and foo are all Arcs that point to the same memory location
172/// ```
173///
174/// ## `Deref` behavior
175///
176/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
177/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
178/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
179/// functions, called using [fully qualified syntax]:
180///
181/// ```
182/// use std::sync::Arc;
183///
184/// let my_arc = Arc::new(());
185/// let my_weak = Arc::downgrade(&my_arc);
186/// ```
187///
188/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
189/// fully qualified syntax. Some people prefer to use fully qualified syntax,
190/// while others prefer using method-call syntax.
191///
192/// ```
193/// use std::sync::Arc;
194///
195/// let arc = Arc::new(());
196/// // Method-call syntax
197/// let arc2 = arc.clone();
198/// // Fully qualified syntax
199/// let arc3 = Arc::clone(&arc);
200/// ```
201///
202/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
203/// already been dropped.
204///
205/// [`Rc<T>`]: crate::rc::Rc
206/// [clone]: Clone::clone
207/// [mutex]: ../../std/sync/struct.Mutex.html
208/// [rwlock]: ../../std/sync/struct.RwLock.html
209/// [atomic]: core::sync::atomic
210/// [downgrade]: Arc::downgrade
211/// [upgrade]: Weak::upgrade
212/// [RefCell\<T>]: core::cell::RefCell
213/// [`RefCell<T>`]: core::cell::RefCell
214/// [`std::sync`]: ../../std/sync/index.html
215/// [`Arc::clone(&from)`]: Arc::clone
216/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
217///
218/// # Examples
219///
220/// Sharing some immutable data between threads:
221///
222/// ```
223/// use std::sync::Arc;
224/// use std::thread;
225///
226/// let five = Arc::new(5);
227///
228/// for _ in 0..10 {
229/// let five = Arc::clone(&five);
230///
231/// thread::spawn(move || {
232/// println!("{five:?}");
233/// });
234/// }
235/// ```
236///
237/// Sharing a mutable [`AtomicUsize`]:
238///
239/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
240///
241/// ```
242/// use std::sync::Arc;
243/// use std::sync::atomic::{AtomicUsize, Ordering};
244/// use std::thread;
245///
246/// let val = Arc::new(AtomicUsize::new(5));
247///
248/// for _ in 0..10 {
249/// let val = Arc::clone(&val);
250///
251/// thread::spawn(move || {
252/// let v = val.fetch_add(1, Ordering::Relaxed);
253/// println!("{v:?}");
254/// });
255/// }
256/// ```
257///
258/// See the [`rc` documentation][rc_examples] for more examples of reference
259/// counting in general.
260///
261/// [rc_examples]: crate::rc#examples
262#[doc(search_unbox)]
263#[rustc_diagnostic_item = "Arc"]
264#[stable(feature = "rust1", since = "1.0.0")]
265#[rustc_insignificant_dtor]
266pub struct Arc<
267 T: ?Sized,
268 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
269> {
270 ptr: NonNull<ArcInner<T>>,
271 phantom: PhantomData<ArcInner<T>>,
272 alloc: A,
273}
274
275#[stable(feature = "rust1", since = "1.0.0")]
276unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
277#[stable(feature = "rust1", since = "1.0.0")]
278unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
279
280#[stable(feature = "catch_unwind", since = "1.9.0")]
281impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
282
283#[unstable(feature = "coerce_unsized", issue = "18598")]
284impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
285
286#[unstable(feature = "dispatch_from_dyn", issue = "none")]
287impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
288
289// SAFETY: `Arc::clone` doesn't access any `Cell`s which could contain the `Arc` being cloned.
290#[unstable(feature = "cell_get_cloned", issue = "145329")]
291unsafe impl<T: ?Sized> CloneFromCell for Arc<T> {}
292
293impl<T: ?Sized> Arc<T> {
294 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
295 unsafe { Self::from_inner_in(ptr, Global) }
296 }
297
298 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
299 unsafe { Self::from_ptr_in(ptr, Global) }
300 }
301}
302
303impl<T: ?Sized, A: Allocator> Arc<T, A> {
304 #[inline]
305 fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
306 let this = mem::ManuallyDrop::new(this);
307 (this.ptr, unsafe { ptr::read(&this.alloc) })
308 }
309
310 #[inline]
311 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
312 Self { ptr, phantom: PhantomData, alloc }
313 }
314
315 #[inline]
316 unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
317 unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
318 }
319}
320
321/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
322/// managed allocation.
323///
324/// The allocation is accessed by calling [`upgrade`] on the `Weak`
325/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
326///
327/// Since a `Weak` reference does not count towards ownership, it will not
328/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
329/// guarantees about the value still being present. Thus it may return [`None`]
330/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
331/// itself (the backing store) from being deallocated.
332///
333/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
334/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
335/// prevent circular references between [`Arc`] pointers, since mutual owning references
336/// would never allow either [`Arc`] to be dropped. For example, a tree could
337/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
338/// pointers from children back to their parents.
339///
340/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
341///
342/// [`upgrade`]: Weak::upgrade
343#[stable(feature = "arc_weak", since = "1.4.0")]
344#[rustc_diagnostic_item = "ArcWeak"]
345pub struct Weak<
346 T: ?Sized,
347 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
348> {
349 // This is a `NonNull` to allow optimizing the size of this type in enums,
350 // but it is not necessarily a valid pointer.
351 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
352 // to allocate space on the heap. That's not a value a real pointer
353 // will ever have because ArcInner has alignment at least 2.
354 ptr: NonNull<ArcInner<T>>,
355 alloc: A,
356}
357
358#[stable(feature = "arc_weak", since = "1.4.0")]
359unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
360#[stable(feature = "arc_weak", since = "1.4.0")]
361unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
362
363#[unstable(feature = "coerce_unsized", issue = "18598")]
364impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
365#[unstable(feature = "dispatch_from_dyn", issue = "none")]
366impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
367
368// SAFETY: `Weak::clone` doesn't access any `Cell`s which could contain the `Weak` being cloned.
369#[unstable(feature = "cell_get_cloned", issue = "145329")]
370unsafe impl<T: ?Sized> CloneFromCell for Weak<T> {}
371
372#[stable(feature = "arc_weak", since = "1.4.0")]
373impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
374 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
375 write!(f, "(Weak)")
376 }
377}
378
379// This is repr(C) to future-proof against possible field-reordering, which
380// would interfere with otherwise safe [into|from]_raw() of transmutable
381// inner types.
382// Unlike RcInner, repr(align(2)) is not strictly required because atomic types
383// have the alignment same as its size, but we use it for consistency and clarity.
384#[repr(C, align(2))]
385struct ArcInner<T: ?Sized> {
386 strong: Atomic<usize>,
387
388 // the value usize::MAX acts as a sentinel for temporarily "locking" the
389 // ability to upgrade weak pointers or downgrade strong ones; this is used
390 // to avoid races in `make_mut` and `get_mut`.
391 weak: Atomic<usize>,
392
393 data: T,
394}
395
396/// Calculate layout for `ArcInner<T>` using the inner value's layout
397fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
398 // Calculate layout using the given value layout.
399 // Previously, layout was calculated on the expression
400 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
401 // reference (see #54908).
402 Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
403}
404
405unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
406unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
407
408impl<T> Arc<T> {
409 /// Constructs a new `Arc<T>`.
410 ///
411 /// # Examples
412 ///
413 /// ```
414 /// use std::sync::Arc;
415 ///
416 /// let five = Arc::new(5);
417 /// ```
418 #[cfg(not(no_global_oom_handling))]
419 #[inline]
420 #[stable(feature = "rust1", since = "1.0.0")]
421 pub fn new(data: T) -> Arc<T> {
422 // Start the weak pointer count as 1 which is the weak pointer that's
423 // held by all the strong pointers (kinda), see std/rc.rs for more info
424 let x: Box<_> = Box::new(ArcInner {
425 strong: atomic::AtomicUsize::new(1),
426 weak: atomic::AtomicUsize::new(1),
427 data,
428 });
429 unsafe { Self::from_inner(Box::leak(x).into()) }
430 }
431
432 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
433 /// to allow you to construct a `T` which holds a weak pointer to itself.
434 ///
435 /// Generally, a structure circularly referencing itself, either directly or
436 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
437 /// Using this function, you get access to the weak pointer during the
438 /// initialization of `T`, before the `Arc<T>` is created, such that you can
439 /// clone and store it inside the `T`.
440 ///
441 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
442 /// then calls your closure, giving it a `Weak<T>` to this allocation,
443 /// and only afterwards completes the construction of the `Arc<T>` by placing
444 /// the `T` returned from your closure into the allocation.
445 ///
446 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
447 /// returns, calling [`upgrade`] on the weak reference inside your closure will
448 /// fail and result in a `None` value.
449 ///
450 /// # Panics
451 ///
452 /// If `data_fn` panics, the panic is propagated to the caller, and the
453 /// temporary [`Weak<T>`] is dropped normally.
454 ///
455 /// # Example
456 ///
457 /// ```
458 /// # #![allow(dead_code)]
459 /// use std::sync::{Arc, Weak};
460 ///
461 /// struct Gadget {
462 /// me: Weak<Gadget>,
463 /// }
464 ///
465 /// impl Gadget {
466 /// /// Constructs a reference counted Gadget.
467 /// fn new() -> Arc<Self> {
468 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
469 /// // `Arc` we're constructing.
470 /// Arc::new_cyclic(|me| {
471 /// // Create the actual struct here.
472 /// Gadget { me: me.clone() }
473 /// })
474 /// }
475 ///
476 /// /// Returns a reference counted pointer to Self.
477 /// fn me(&self) -> Arc<Self> {
478 /// self.me.upgrade().unwrap()
479 /// }
480 /// }
481 /// ```
482 /// [`upgrade`]: Weak::upgrade
483 #[cfg(not(no_global_oom_handling))]
484 #[inline]
485 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
486 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
487 where
488 F: FnOnce(&Weak<T>) -> T,
489 {
490 Self::new_cyclic_in(data_fn, Global)
491 }
492
493 /// Constructs a new `Arc` with uninitialized contents.
494 ///
495 /// # Examples
496 ///
497 /// ```
498 /// use std::sync::Arc;
499 ///
500 /// let mut five = Arc::<u32>::new_uninit();
501 ///
502 /// // Deferred initialization:
503 /// Arc::get_mut(&mut five).unwrap().write(5);
504 ///
505 /// let five = unsafe { five.assume_init() };
506 ///
507 /// assert_eq!(*five, 5)
508 /// ```
509 #[cfg(not(no_global_oom_handling))]
510 #[inline]
511 #[stable(feature = "new_uninit", since = "1.82.0")]
512 #[must_use]
513 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
514 unsafe {
515 Arc::from_ptr(Arc::allocate_for_layout(
516 Layout::new::<T>(),
517 |layout| Global.allocate(layout),
518 <*mut u8>::cast,
519 ))
520 }
521 }
522
523 /// Constructs a new `Arc` with uninitialized contents, with the memory
524 /// being filled with `0` bytes.
525 ///
526 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
527 /// of this method.
528 ///
529 /// # Examples
530 ///
531 /// ```
532 /// use std::sync::Arc;
533 ///
534 /// let zero = Arc::<u32>::new_zeroed();
535 /// let zero = unsafe { zero.assume_init() };
536 ///
537 /// assert_eq!(*zero, 0)
538 /// ```
539 ///
540 /// [zeroed]: mem::MaybeUninit::zeroed
541 #[cfg(not(no_global_oom_handling))]
542 #[inline]
543 #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
544 #[must_use]
545 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
546 unsafe {
547 Arc::from_ptr(Arc::allocate_for_layout(
548 Layout::new::<T>(),
549 |layout| Global.allocate_zeroed(layout),
550 <*mut u8>::cast,
551 ))
552 }
553 }
554
555 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
556 /// `data` will be pinned in memory and unable to be moved.
557 #[cfg(not(no_global_oom_handling))]
558 #[stable(feature = "pin", since = "1.33.0")]
559 #[must_use]
560 pub fn pin(data: T) -> Pin<Arc<T>> {
561 unsafe { Pin::new_unchecked(Arc::new(data)) }
562 }
563
564 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
565 #[unstable(feature = "allocator_api", issue = "32838")]
566 #[inline]
567 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
568 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
569 }
570
571 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
572 ///
573 /// # Examples
574 ///
575 /// ```
576 /// #![feature(allocator_api)]
577 /// use std::sync::Arc;
578 ///
579 /// let five = Arc::try_new(5)?;
580 /// # Ok::<(), std::alloc::AllocError>(())
581 /// ```
582 #[unstable(feature = "allocator_api", issue = "32838")]
583 #[inline]
584 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
585 // Start the weak pointer count as 1 which is the weak pointer that's
586 // held by all the strong pointers (kinda), see std/rc.rs for more info
587 let x: Box<_> = Box::try_new(ArcInner {
588 strong: atomic::AtomicUsize::new(1),
589 weak: atomic::AtomicUsize::new(1),
590 data,
591 })?;
592 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
593 }
594
595 /// Constructs a new `Arc` with uninitialized contents, returning an error
596 /// if allocation fails.
597 ///
598 /// # Examples
599 ///
600 /// ```
601 /// #![feature(allocator_api)]
602 ///
603 /// use std::sync::Arc;
604 ///
605 /// let mut five = Arc::<u32>::try_new_uninit()?;
606 ///
607 /// // Deferred initialization:
608 /// Arc::get_mut(&mut five).unwrap().write(5);
609 ///
610 /// let five = unsafe { five.assume_init() };
611 ///
612 /// assert_eq!(*five, 5);
613 /// # Ok::<(), std::alloc::AllocError>(())
614 /// ```
615 #[unstable(feature = "allocator_api", issue = "32838")]
616 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
617 unsafe {
618 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
619 Layout::new::<T>(),
620 |layout| Global.allocate(layout),
621 <*mut u8>::cast,
622 )?))
623 }
624 }
625
626 /// Constructs a new `Arc` with uninitialized contents, with the memory
627 /// being filled with `0` bytes, returning an error if allocation fails.
628 ///
629 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
630 /// of this method.
631 ///
632 /// # Examples
633 ///
634 /// ```
635 /// #![feature( allocator_api)]
636 ///
637 /// use std::sync::Arc;
638 ///
639 /// let zero = Arc::<u32>::try_new_zeroed()?;
640 /// let zero = unsafe { zero.assume_init() };
641 ///
642 /// assert_eq!(*zero, 0);
643 /// # Ok::<(), std::alloc::AllocError>(())
644 /// ```
645 ///
646 /// [zeroed]: mem::MaybeUninit::zeroed
647 #[unstable(feature = "allocator_api", issue = "32838")]
648 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
649 unsafe {
650 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
651 Layout::new::<T>(),
652 |layout| Global.allocate_zeroed(layout),
653 <*mut u8>::cast,
654 )?))
655 }
656 }
657
658 /// Maps the value in an `Arc`, reusing the allocation if possible.
659 ///
660 /// `f` is called on a reference to the value in the `Arc`, and the result is returned, also in
661 /// an `Arc`.
662 ///
663 /// Note: this is an associated function, which means that you have
664 /// to call it as `Arc::map(a, f)` instead of `r.map(a)`. This
665 /// is so that there is no conflict with a method on the inner type.
666 ///
667 /// # Examples
668 ///
669 /// ```
670 /// #![feature(smart_pointer_try_map)]
671 ///
672 /// use std::sync::Arc;
673 ///
674 /// let r = Arc::new(7);
675 /// let new = Arc::map(r, |i| i + 7);
676 /// assert_eq!(*new, 14);
677 /// ```
678 #[cfg(not(no_global_oom_handling))]
679 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
680 pub fn map<U>(this: Self, f: impl FnOnce(&T) -> U) -> Arc<U> {
681 if size_of::<T>() == size_of::<U>()
682 && align_of::<T>() == align_of::<U>()
683 && Arc::is_unique(&this)
684 {
685 unsafe {
686 let ptr = Arc::into_raw(this);
687 let value = ptr.read();
688 let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
689
690 Arc::get_mut_unchecked(&mut allocation).write(f(&value));
691 allocation.assume_init()
692 }
693 } else {
694 Arc::new(f(&*this))
695 }
696 }
697
698 /// Attempts to map the value in an `Arc`, reusing the allocation if possible.
699 ///
700 /// `f` is called on a reference to the value in the `Arc`, and if the operation succeeds, the
701 /// result is returned, also in an `Arc`.
702 ///
703 /// Note: this is an associated function, which means that you have
704 /// to call it as `Arc::try_map(a, f)` instead of `a.try_map(f)`. This
705 /// is so that there is no conflict with a method on the inner type.
706 ///
707 /// # Examples
708 ///
709 /// ```
710 /// #![feature(smart_pointer_try_map)]
711 ///
712 /// use std::sync::Arc;
713 ///
714 /// let b = Arc::new(7);
715 /// let new = Arc::try_map(b, |&i| u32::try_from(i)).unwrap();
716 /// assert_eq!(*new, 7);
717 /// ```
718 #[cfg(not(no_global_oom_handling))]
719 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
720 pub fn try_map<R>(
721 this: Self,
722 f: impl FnOnce(&T) -> R,
723 ) -> <R::Residual as Residual<Arc<R::Output>>>::TryType
724 where
725 R: Try,
726 R::Residual: Residual<Arc<R::Output>>,
727 {
728 if size_of::<T>() == size_of::<R::Output>()
729 && align_of::<T>() == align_of::<R::Output>()
730 && Arc::is_unique(&this)
731 {
732 unsafe {
733 let ptr = Arc::into_raw(this);
734 let value = ptr.read();
735 let mut allocation = Arc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
736
737 Arc::get_mut_unchecked(&mut allocation).write(f(&value)?);
738 try { allocation.assume_init() }
739 }
740 } else {
741 try { Arc::new(f(&*this)?) }
742 }
743 }
744}
745
746impl<T, A: Allocator> Arc<T, A> {
747 /// Constructs a new `Arc<T>` in the provided allocator.
748 ///
749 /// # Examples
750 ///
751 /// ```
752 /// #![feature(allocator_api)]
753 ///
754 /// use std::sync::Arc;
755 /// use std::alloc::System;
756 ///
757 /// let five = Arc::new_in(5, System);
758 /// ```
759 #[inline]
760 #[cfg(not(no_global_oom_handling))]
761 #[unstable(feature = "allocator_api", issue = "32838")]
762 pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
763 // Start the weak pointer count as 1 which is the weak pointer that's
764 // held by all the strong pointers (kinda), see std/rc.rs for more info
765 let x = Box::new_in(
766 ArcInner {
767 strong: atomic::AtomicUsize::new(1),
768 weak: atomic::AtomicUsize::new(1),
769 data,
770 },
771 alloc,
772 );
773 let (ptr, alloc) = Box::into_unique(x);
774 unsafe { Self::from_inner_in(ptr.into(), alloc) }
775 }
776
777 /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
778 ///
779 /// # Examples
780 ///
781 /// ```
782 /// #![feature(get_mut_unchecked)]
783 /// #![feature(allocator_api)]
784 ///
785 /// use std::sync::Arc;
786 /// use std::alloc::System;
787 ///
788 /// let mut five = Arc::<u32, _>::new_uninit_in(System);
789 ///
790 /// let five = unsafe {
791 /// // Deferred initialization:
792 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
793 ///
794 /// five.assume_init()
795 /// };
796 ///
797 /// assert_eq!(*five, 5)
798 /// ```
799 #[cfg(not(no_global_oom_handling))]
800 #[unstable(feature = "allocator_api", issue = "32838")]
801 #[inline]
802 pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
803 unsafe {
804 Arc::from_ptr_in(
805 Arc::allocate_for_layout(
806 Layout::new::<T>(),
807 |layout| alloc.allocate(layout),
808 <*mut u8>::cast,
809 ),
810 alloc,
811 )
812 }
813 }
814
815 /// Constructs a new `Arc` with uninitialized contents, with the memory
816 /// being filled with `0` bytes, in the provided allocator.
817 ///
818 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
819 /// of this method.
820 ///
821 /// # Examples
822 ///
823 /// ```
824 /// #![feature(allocator_api)]
825 ///
826 /// use std::sync::Arc;
827 /// use std::alloc::System;
828 ///
829 /// let zero = Arc::<u32, _>::new_zeroed_in(System);
830 /// let zero = unsafe { zero.assume_init() };
831 ///
832 /// assert_eq!(*zero, 0)
833 /// ```
834 ///
835 /// [zeroed]: mem::MaybeUninit::zeroed
836 #[cfg(not(no_global_oom_handling))]
837 #[unstable(feature = "allocator_api", issue = "32838")]
838 #[inline]
839 pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
840 unsafe {
841 Arc::from_ptr_in(
842 Arc::allocate_for_layout(
843 Layout::new::<T>(),
844 |layout| alloc.allocate_zeroed(layout),
845 <*mut u8>::cast,
846 ),
847 alloc,
848 )
849 }
850 }
851
852 /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
853 /// to allow you to construct a `T` which holds a weak pointer to itself.
854 ///
855 /// Generally, a structure circularly referencing itself, either directly or
856 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
857 /// Using this function, you get access to the weak pointer during the
858 /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
859 /// clone and store it inside the `T`.
860 ///
861 /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
862 /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
863 /// and only afterwards completes the construction of the `Arc<T, A>` by placing
864 /// the `T` returned from your closure into the allocation.
865 ///
866 /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
867 /// returns, calling [`upgrade`] on the weak reference inside your closure will
868 /// fail and result in a `None` value.
869 ///
870 /// # Panics
871 ///
872 /// If `data_fn` panics, the panic is propagated to the caller, and the
873 /// temporary [`Weak<T>`] is dropped normally.
874 ///
875 /// # Example
876 ///
877 /// See [`new_cyclic`]
878 ///
879 /// [`new_cyclic`]: Arc::new_cyclic
880 /// [`upgrade`]: Weak::upgrade
881 #[cfg(not(no_global_oom_handling))]
882 #[inline]
883 #[unstable(feature = "allocator_api", issue = "32838")]
884 pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
885 where
886 F: FnOnce(&Weak<T, A>) -> T,
887 {
888 // Construct the inner in the "uninitialized" state with a single
889 // weak reference.
890 let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
891 ArcInner {
892 strong: atomic::AtomicUsize::new(0),
893 weak: atomic::AtomicUsize::new(1),
894 data: mem::MaybeUninit::<T>::uninit(),
895 },
896 alloc,
897 ));
898 let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
899 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
900
901 let weak = Weak { ptr: init_ptr, alloc };
902
903 // It's important we don't give up ownership of the weak pointer, or
904 // else the memory might be freed by the time `data_fn` returns. If
905 // we really wanted to pass ownership, we could create an additional
906 // weak pointer for ourselves, but this would result in additional
907 // updates to the weak reference count which might not be necessary
908 // otherwise.
909 let data = data_fn(&weak);
910
911 // Now we can properly initialize the inner value and turn our weak
912 // reference into a strong reference.
913 let strong = unsafe {
914 let inner = init_ptr.as_ptr();
915 ptr::write(&raw mut (*inner).data, data);
916
917 // The above write to the data field must be visible to any threads which
918 // observe a non-zero strong count. Therefore we need at least "Release" ordering
919 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
920 //
921 // "Acquire" ordering is not required. When considering the possible behaviors
922 // of `data_fn` we only need to look at what it could do with a reference to a
923 // non-upgradeable `Weak`:
924 // - It can *clone* the `Weak`, increasing the weak reference count.
925 // - It can drop those clones, decreasing the weak reference count (but never to zero).
926 //
927 // These side effects do not impact us in any way, and no other side effects are
928 // possible with safe code alone.
929 let prev_value = (*inner).strong.fetch_add(1, Release);
930 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
931
932 // Strong references should collectively own a shared weak reference,
933 // so don't run the destructor for our old weak reference.
934 // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
935 // and forgetting the weak reference.
936 let alloc = weak.into_raw_with_allocator().1;
937
938 Arc::from_inner_in(init_ptr, alloc)
939 };
940
941 strong
942 }
943
944 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
945 /// then `data` will be pinned in memory and unable to be moved.
946 #[cfg(not(no_global_oom_handling))]
947 #[unstable(feature = "allocator_api", issue = "32838")]
948 #[inline]
949 pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
950 where
951 A: 'static,
952 {
953 unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
954 }
955
956 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
957 /// fails.
958 #[inline]
959 #[unstable(feature = "allocator_api", issue = "32838")]
960 pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
961 where
962 A: 'static,
963 {
964 unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
965 }
966
967 /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
968 ///
969 /// # Examples
970 ///
971 /// ```
972 /// #![feature(allocator_api)]
973 ///
974 /// use std::sync::Arc;
975 /// use std::alloc::System;
976 ///
977 /// let five = Arc::try_new_in(5, System)?;
978 /// # Ok::<(), std::alloc::AllocError>(())
979 /// ```
980 #[unstable(feature = "allocator_api", issue = "32838")]
981 #[inline]
982 pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
983 // Start the weak pointer count as 1 which is the weak pointer that's
984 // held by all the strong pointers (kinda), see std/rc.rs for more info
985 let x = Box::try_new_in(
986 ArcInner {
987 strong: atomic::AtomicUsize::new(1),
988 weak: atomic::AtomicUsize::new(1),
989 data,
990 },
991 alloc,
992 )?;
993 let (ptr, alloc) = Box::into_unique(x);
994 Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
995 }
996
997 /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
998 /// error if allocation fails.
999 ///
1000 /// # Examples
1001 ///
1002 /// ```
1003 /// #![feature(allocator_api)]
1004 /// #![feature(get_mut_unchecked)]
1005 ///
1006 /// use std::sync::Arc;
1007 /// use std::alloc::System;
1008 ///
1009 /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
1010 ///
1011 /// let five = unsafe {
1012 /// // Deferred initialization:
1013 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
1014 ///
1015 /// five.assume_init()
1016 /// };
1017 ///
1018 /// assert_eq!(*five, 5);
1019 /// # Ok::<(), std::alloc::AllocError>(())
1020 /// ```
1021 #[unstable(feature = "allocator_api", issue = "32838")]
1022 #[inline]
1023 pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1024 unsafe {
1025 Ok(Arc::from_ptr_in(
1026 Arc::try_allocate_for_layout(
1027 Layout::new::<T>(),
1028 |layout| alloc.allocate(layout),
1029 <*mut u8>::cast,
1030 )?,
1031 alloc,
1032 ))
1033 }
1034 }
1035
1036 /// Constructs a new `Arc` with uninitialized contents, with the memory
1037 /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
1038 /// fails.
1039 ///
1040 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
1041 /// of this method.
1042 ///
1043 /// # Examples
1044 ///
1045 /// ```
1046 /// #![feature(allocator_api)]
1047 ///
1048 /// use std::sync::Arc;
1049 /// use std::alloc::System;
1050 ///
1051 /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
1052 /// let zero = unsafe { zero.assume_init() };
1053 ///
1054 /// assert_eq!(*zero, 0);
1055 /// # Ok::<(), std::alloc::AllocError>(())
1056 /// ```
1057 ///
1058 /// [zeroed]: mem::MaybeUninit::zeroed
1059 #[unstable(feature = "allocator_api", issue = "32838")]
1060 #[inline]
1061 pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
1062 unsafe {
1063 Ok(Arc::from_ptr_in(
1064 Arc::try_allocate_for_layout(
1065 Layout::new::<T>(),
1066 |layout| alloc.allocate_zeroed(layout),
1067 <*mut u8>::cast,
1068 )?,
1069 alloc,
1070 ))
1071 }
1072 }
1073 /// Returns the inner value, if the `Arc` has exactly one strong reference.
1074 ///
1075 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
1076 /// passed in.
1077 ///
1078 /// This will succeed even if there are outstanding weak references.
1079 ///
1080 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
1081 /// keep the `Arc` in the [`Err`] case.
1082 /// Immediately dropping the [`Err`]-value, as the expression
1083 /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
1084 /// drop to zero and the inner value of the `Arc` to be dropped.
1085 /// For instance, if two threads execute such an expression in parallel,
1086 /// there is a race condition without the possibility of unsafety:
1087 /// The threads could first both check whether they own the last instance
1088 /// in `Arc::try_unwrap`, determine that they both do not, and then both
1089 /// discard and drop their instance in the call to [`ok`][`Result::ok`].
1090 /// In this scenario, the value inside the `Arc` is safely destroyed
1091 /// by exactly one of the threads, but neither thread will ever be able
1092 /// to use the value.
1093 ///
1094 /// # Examples
1095 ///
1096 /// ```
1097 /// use std::sync::Arc;
1098 ///
1099 /// let x = Arc::new(3);
1100 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1101 ///
1102 /// let x = Arc::new(4);
1103 /// let _y = Arc::clone(&x);
1104 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1105 /// ```
1106 #[inline]
1107 #[stable(feature = "arc_unique", since = "1.4.0")]
1108 pub fn try_unwrap(this: Self) -> Result<T, Self> {
1109 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1110 return Err(this);
1111 }
1112
1113 acquire!(this.inner().strong);
1114
1115 let this = ManuallyDrop::new(this);
1116 let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1117 let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1118
1119 // Make a weak pointer to clean up the implicit strong-weak reference
1120 let _weak = Weak { ptr: this.ptr, alloc };
1121
1122 Ok(elem)
1123 }
1124
1125 /// Returns the inner value, if the `Arc` has exactly one strong reference.
1126 ///
1127 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1128 ///
1129 /// This will succeed even if there are outstanding weak references.
1130 ///
1131 /// If `Arc::into_inner` is called on every clone of this `Arc`,
1132 /// it is guaranteed that exactly one of the calls returns the inner value.
1133 /// This means in particular that the inner value is not dropped.
1134 ///
1135 /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1136 /// is meant for different use-cases. If used as a direct replacement
1137 /// for `Arc::into_inner` anyway, such as with the expression
1138 /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1139 /// **not** give the same guarantee as described in the previous paragraph.
1140 /// For more information, see the examples below and read the documentation
1141 /// of [`Arc::try_unwrap`].
1142 ///
1143 /// # Examples
1144 ///
1145 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1146 /// ```
1147 /// use std::sync::Arc;
1148 ///
1149 /// let x = Arc::new(3);
1150 /// let y = Arc::clone(&x);
1151 ///
1152 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1153 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1154 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1155 ///
1156 /// let x_inner_value = x_thread.join().unwrap();
1157 /// let y_inner_value = y_thread.join().unwrap();
1158 ///
1159 /// // One of the threads is guaranteed to receive the inner value:
1160 /// assert!(matches!(
1161 /// (x_inner_value, y_inner_value),
1162 /// (None, Some(3)) | (Some(3), None)
1163 /// ));
1164 /// // The result could also be `(None, None)` if the threads called
1165 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1166 /// ```
1167 ///
1168 /// A more practical example demonstrating the need for `Arc::into_inner`:
1169 /// ```
1170 /// use std::sync::Arc;
1171 ///
1172 /// // Definition of a simple singly linked list using `Arc`:
1173 /// #[derive(Clone)]
1174 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1175 /// struct Node<T>(T, Option<Arc<Node<T>>>);
1176 ///
1177 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1178 /// // can cause a stack overflow. To prevent this, we can provide a
1179 /// // manual `Drop` implementation that does the destruction in a loop:
1180 /// impl<T> Drop for LinkedList<T> {
1181 /// fn drop(&mut self) {
1182 /// let mut link = self.0.take();
1183 /// while let Some(arc_node) = link.take() {
1184 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1185 /// link = next;
1186 /// }
1187 /// }
1188 /// }
1189 /// }
1190 ///
1191 /// // Implementation of `new` and `push` omitted
1192 /// impl<T> LinkedList<T> {
1193 /// /* ... */
1194 /// # fn new() -> Self {
1195 /// # LinkedList(None)
1196 /// # }
1197 /// # fn push(&mut self, x: T) {
1198 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
1199 /// # }
1200 /// }
1201 ///
1202 /// // The following code could have still caused a stack overflow
1203 /// // despite the manual `Drop` impl if that `Drop` impl had used
1204 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1205 ///
1206 /// // Create a long list and clone it
1207 /// let mut x = LinkedList::new();
1208 /// let size = 100000;
1209 /// # let size = if cfg!(miri) { 100 } else { size };
1210 /// for i in 0..size {
1211 /// x.push(i); // Adds i to the front of x
1212 /// }
1213 /// let y = x.clone();
1214 ///
1215 /// // Drop the clones in parallel
1216 /// let x_thread = std::thread::spawn(|| drop(x));
1217 /// let y_thread = std::thread::spawn(|| drop(y));
1218 /// x_thread.join().unwrap();
1219 /// y_thread.join().unwrap();
1220 /// ```
1221 #[inline]
1222 #[stable(feature = "arc_into_inner", since = "1.70.0")]
1223 pub fn into_inner(this: Self) -> Option<T> {
1224 // Make sure that the ordinary `Drop` implementation isn’t called as well
1225 let mut this = mem::ManuallyDrop::new(this);
1226
1227 // Following the implementation of `drop` and `drop_slow`
1228 if this.inner().strong.fetch_sub(1, Release) != 1 {
1229 return None;
1230 }
1231
1232 acquire!(this.inner().strong);
1233
1234 // SAFETY: This mirrors the line
1235 //
1236 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1237 //
1238 // in `drop_slow`. Instead of dropping the value behind the pointer,
1239 // it is read and eventually returned; `ptr::read` has the same
1240 // safety conditions as `ptr::drop_in_place`.
1241
1242 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1243 let alloc = unsafe { ptr::read(&this.alloc) };
1244
1245 drop(Weak { ptr: this.ptr, alloc });
1246
1247 Some(inner)
1248 }
1249}
1250
1251impl<T> Arc<[T]> {
1252 /// Constructs a new atomically reference-counted slice with uninitialized contents.
1253 ///
1254 /// # Examples
1255 ///
1256 /// ```
1257 /// use std::sync::Arc;
1258 ///
1259 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1260 ///
1261 /// // Deferred initialization:
1262 /// let data = Arc::get_mut(&mut values).unwrap();
1263 /// data[0].write(1);
1264 /// data[1].write(2);
1265 /// data[2].write(3);
1266 ///
1267 /// let values = unsafe { values.assume_init() };
1268 ///
1269 /// assert_eq!(*values, [1, 2, 3])
1270 /// ```
1271 #[cfg(not(no_global_oom_handling))]
1272 #[inline]
1273 #[stable(feature = "new_uninit", since = "1.82.0")]
1274 #[must_use]
1275 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1276 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1277 }
1278
1279 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1280 /// filled with `0` bytes.
1281 ///
1282 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1283 /// incorrect usage of this method.
1284 ///
1285 /// # Examples
1286 ///
1287 /// ```
1288 /// use std::sync::Arc;
1289 ///
1290 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1291 /// let values = unsafe { values.assume_init() };
1292 ///
1293 /// assert_eq!(*values, [0, 0, 0])
1294 /// ```
1295 ///
1296 /// [zeroed]: mem::MaybeUninit::zeroed
1297 #[cfg(not(no_global_oom_handling))]
1298 #[inline]
1299 #[stable(feature = "new_zeroed_alloc", since = "1.92.0")]
1300 #[must_use]
1301 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1302 unsafe {
1303 Arc::from_ptr(Arc::allocate_for_layout(
1304 Layout::array::<T>(len).unwrap(),
1305 |layout| Global.allocate_zeroed(layout),
1306 |mem| {
1307 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1308 as *mut ArcInner<[mem::MaybeUninit<T>]>
1309 },
1310 ))
1311 }
1312 }
1313
1314 /// Converts the reference-counted slice into a reference-counted array.
1315 ///
1316 /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1317 ///
1318 /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1319 #[unstable(feature = "alloc_slice_into_array", issue = "148082")]
1320 #[inline]
1321 #[must_use]
1322 pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1323 if self.len() == N {
1324 let ptr = Self::into_raw(self) as *const [T; N];
1325
1326 // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1327 let me = unsafe { Arc::from_raw(ptr) };
1328 Some(me)
1329 } else {
1330 None
1331 }
1332 }
1333}
1334
1335impl<T, A: Allocator> Arc<[T], A> {
1336 /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1337 /// provided allocator.
1338 ///
1339 /// # Examples
1340 ///
1341 /// ```
1342 /// #![feature(get_mut_unchecked)]
1343 /// #![feature(allocator_api)]
1344 ///
1345 /// use std::sync::Arc;
1346 /// use std::alloc::System;
1347 ///
1348 /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1349 ///
1350 /// let values = unsafe {
1351 /// // Deferred initialization:
1352 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1353 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1354 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1355 ///
1356 /// values.assume_init()
1357 /// };
1358 ///
1359 /// assert_eq!(*values, [1, 2, 3])
1360 /// ```
1361 #[cfg(not(no_global_oom_handling))]
1362 #[unstable(feature = "allocator_api", issue = "32838")]
1363 #[inline]
1364 pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1365 unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1366 }
1367
1368 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1369 /// filled with `0` bytes, in the provided allocator.
1370 ///
1371 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1372 /// incorrect usage of this method.
1373 ///
1374 /// # Examples
1375 ///
1376 /// ```
1377 /// #![feature(allocator_api)]
1378 ///
1379 /// use std::sync::Arc;
1380 /// use std::alloc::System;
1381 ///
1382 /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1383 /// let values = unsafe { values.assume_init() };
1384 ///
1385 /// assert_eq!(*values, [0, 0, 0])
1386 /// ```
1387 ///
1388 /// [zeroed]: mem::MaybeUninit::zeroed
1389 #[cfg(not(no_global_oom_handling))]
1390 #[unstable(feature = "allocator_api", issue = "32838")]
1391 #[inline]
1392 pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1393 unsafe {
1394 Arc::from_ptr_in(
1395 Arc::allocate_for_layout(
1396 Layout::array::<T>(len).unwrap(),
1397 |layout| alloc.allocate_zeroed(layout),
1398 |mem| {
1399 ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1400 as *mut ArcInner<[mem::MaybeUninit<T>]>
1401 },
1402 ),
1403 alloc,
1404 )
1405 }
1406 }
1407}
1408
1409impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1410 /// Converts to `Arc<T>`.
1411 ///
1412 /// # Safety
1413 ///
1414 /// As with [`MaybeUninit::assume_init`],
1415 /// it is up to the caller to guarantee that the inner value
1416 /// really is in an initialized state.
1417 /// Calling this when the content is not yet fully initialized
1418 /// causes immediate undefined behavior.
1419 ///
1420 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1421 ///
1422 /// # Examples
1423 ///
1424 /// ```
1425 /// use std::sync::Arc;
1426 ///
1427 /// let mut five = Arc::<u32>::new_uninit();
1428 ///
1429 /// // Deferred initialization:
1430 /// Arc::get_mut(&mut five).unwrap().write(5);
1431 ///
1432 /// let five = unsafe { five.assume_init() };
1433 ///
1434 /// assert_eq!(*five, 5)
1435 /// ```
1436 #[stable(feature = "new_uninit", since = "1.82.0")]
1437 #[must_use = "`self` will be dropped if the result is not used"]
1438 #[inline]
1439 pub unsafe fn assume_init(self) -> Arc<T, A> {
1440 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1441 unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1442 }
1443}
1444
1445impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1446 /// Converts to `Arc<[T]>`.
1447 ///
1448 /// # Safety
1449 ///
1450 /// As with [`MaybeUninit::assume_init`],
1451 /// it is up to the caller to guarantee that the inner value
1452 /// really is in an initialized state.
1453 /// Calling this when the content is not yet fully initialized
1454 /// causes immediate undefined behavior.
1455 ///
1456 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1457 ///
1458 /// # Examples
1459 ///
1460 /// ```
1461 /// use std::sync::Arc;
1462 ///
1463 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1464 ///
1465 /// // Deferred initialization:
1466 /// let data = Arc::get_mut(&mut values).unwrap();
1467 /// data[0].write(1);
1468 /// data[1].write(2);
1469 /// data[2].write(3);
1470 ///
1471 /// let values = unsafe { values.assume_init() };
1472 ///
1473 /// assert_eq!(*values, [1, 2, 3])
1474 /// ```
1475 #[stable(feature = "new_uninit", since = "1.82.0")]
1476 #[must_use = "`self` will be dropped if the result is not used"]
1477 #[inline]
1478 pub unsafe fn assume_init(self) -> Arc<[T], A> {
1479 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1480 unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1481 }
1482}
1483
1484impl<T: ?Sized> Arc<T> {
1485 /// Constructs an `Arc<T>` from a raw pointer.
1486 ///
1487 /// The raw pointer must have been previously returned by a call to
1488 /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1489 ///
1490 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1491 /// is trivially true if `U` is `T`.
1492 /// * If `U` is unsized, its data pointer must have the same size and
1493 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1494 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1495 /// coercion].
1496 ///
1497 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1498 /// and alignment, this is basically like transmuting references of
1499 /// different types. See [`mem::transmute`][transmute] for more information
1500 /// on what restrictions apply in this case.
1501 ///
1502 /// The raw pointer must point to a block of memory allocated by the global allocator.
1503 ///
1504 /// The user of `from_raw` has to make sure a specific value of `T` is only
1505 /// dropped once.
1506 ///
1507 /// This function is unsafe because improper use may lead to memory unsafety,
1508 /// even if the returned `Arc<T>` is never accessed.
1509 ///
1510 /// [into_raw]: Arc::into_raw
1511 /// [transmute]: core::mem::transmute
1512 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1513 ///
1514 /// # Examples
1515 ///
1516 /// ```
1517 /// use std::sync::Arc;
1518 ///
1519 /// let x = Arc::new("hello".to_owned());
1520 /// let x_ptr = Arc::into_raw(x);
1521 ///
1522 /// unsafe {
1523 /// // Convert back to an `Arc` to prevent leak.
1524 /// let x = Arc::from_raw(x_ptr);
1525 /// assert_eq!(&*x, "hello");
1526 ///
1527 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1528 /// }
1529 ///
1530 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1531 /// ```
1532 ///
1533 /// Convert a slice back into its original array:
1534 ///
1535 /// ```
1536 /// use std::sync::Arc;
1537 ///
1538 /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1539 /// let x_ptr: *const [u32] = Arc::into_raw(x);
1540 ///
1541 /// unsafe {
1542 /// let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1543 /// assert_eq!(&*x, &[1, 2, 3]);
1544 /// }
1545 /// ```
1546 #[inline]
1547 #[stable(feature = "rc_raw", since = "1.17.0")]
1548 pub unsafe fn from_raw(ptr: *const T) -> Self {
1549 unsafe { Arc::from_raw_in(ptr, Global) }
1550 }
1551
1552 /// Consumes the `Arc`, returning the wrapped pointer.
1553 ///
1554 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1555 /// [`Arc::from_raw`].
1556 ///
1557 /// # Examples
1558 ///
1559 /// ```
1560 /// use std::sync::Arc;
1561 ///
1562 /// let x = Arc::new("hello".to_owned());
1563 /// let x_ptr = Arc::into_raw(x);
1564 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1565 /// # // Prevent leaks for Miri.
1566 /// # drop(unsafe { Arc::from_raw(x_ptr) });
1567 /// ```
1568 #[must_use = "losing the pointer will leak memory"]
1569 #[stable(feature = "rc_raw", since = "1.17.0")]
1570 #[rustc_never_returns_null_ptr]
1571 pub fn into_raw(this: Self) -> *const T {
1572 let this = ManuallyDrop::new(this);
1573 Self::as_ptr(&*this)
1574 }
1575
1576 /// Increments the strong reference count on the `Arc<T>` associated with the
1577 /// provided pointer by one.
1578 ///
1579 /// # Safety
1580 ///
1581 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1582 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1583 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1584 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1585 /// allocated by the global allocator.
1586 ///
1587 /// [from_raw_in]: Arc::from_raw_in
1588 ///
1589 /// # Examples
1590 ///
1591 /// ```
1592 /// use std::sync::Arc;
1593 ///
1594 /// let five = Arc::new(5);
1595 ///
1596 /// unsafe {
1597 /// let ptr = Arc::into_raw(five);
1598 /// Arc::increment_strong_count(ptr);
1599 ///
1600 /// // This assertion is deterministic because we haven't shared
1601 /// // the `Arc` between threads.
1602 /// let five = Arc::from_raw(ptr);
1603 /// assert_eq!(2, Arc::strong_count(&five));
1604 /// # // Prevent leaks for Miri.
1605 /// # Arc::decrement_strong_count(ptr);
1606 /// }
1607 /// ```
1608 #[inline]
1609 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1610 pub unsafe fn increment_strong_count(ptr: *const T) {
1611 unsafe { Arc::increment_strong_count_in(ptr, Global) }
1612 }
1613
1614 /// Decrements the strong reference count on the `Arc<T>` associated with the
1615 /// provided pointer by one.
1616 ///
1617 /// # Safety
1618 ///
1619 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1620 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1621 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1622 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1623 /// allocated by the global allocator. This method can be used to release the final
1624 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1625 /// released.
1626 ///
1627 /// [from_raw_in]: Arc::from_raw_in
1628 ///
1629 /// # Examples
1630 ///
1631 /// ```
1632 /// use std::sync::Arc;
1633 ///
1634 /// let five = Arc::new(5);
1635 ///
1636 /// unsafe {
1637 /// let ptr = Arc::into_raw(five);
1638 /// Arc::increment_strong_count(ptr);
1639 ///
1640 /// // Those assertions are deterministic because we haven't shared
1641 /// // the `Arc` between threads.
1642 /// let five = Arc::from_raw(ptr);
1643 /// assert_eq!(2, Arc::strong_count(&five));
1644 /// Arc::decrement_strong_count(ptr);
1645 /// assert_eq!(1, Arc::strong_count(&five));
1646 /// }
1647 /// ```
1648 #[inline]
1649 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1650 pub unsafe fn decrement_strong_count(ptr: *const T) {
1651 unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1652 }
1653}
1654
1655impl<T: ?Sized, A: Allocator> Arc<T, A> {
1656 /// Returns a reference to the underlying allocator.
1657 ///
1658 /// Note: this is an associated function, which means that you have
1659 /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1660 /// is so that there is no conflict with a method on the inner type.
1661 #[inline]
1662 #[unstable(feature = "allocator_api", issue = "32838")]
1663 pub fn allocator(this: &Self) -> &A {
1664 &this.alloc
1665 }
1666
1667 /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1668 ///
1669 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1670 /// [`Arc::from_raw_in`].
1671 ///
1672 /// # Examples
1673 ///
1674 /// ```
1675 /// #![feature(allocator_api)]
1676 /// use std::sync::Arc;
1677 /// use std::alloc::System;
1678 ///
1679 /// let x = Arc::new_in("hello".to_owned(), System);
1680 /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1681 /// assert_eq!(unsafe { &*ptr }, "hello");
1682 /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1683 /// assert_eq!(&*x, "hello");
1684 /// ```
1685 #[must_use = "losing the pointer will leak memory"]
1686 #[unstable(feature = "allocator_api", issue = "32838")]
1687 pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1688 let this = mem::ManuallyDrop::new(this);
1689 let ptr = Self::as_ptr(&this);
1690 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1691 let alloc = unsafe { ptr::read(&this.alloc) };
1692 (ptr, alloc)
1693 }
1694
1695 /// Provides a raw pointer to the data.
1696 ///
1697 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1698 /// as long as there are strong counts in the `Arc`.
1699 ///
1700 /// # Examples
1701 ///
1702 /// ```
1703 /// use std::sync::Arc;
1704 ///
1705 /// let x = Arc::new("hello".to_owned());
1706 /// let y = Arc::clone(&x);
1707 /// let x_ptr = Arc::as_ptr(&x);
1708 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1709 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1710 /// ```
1711 #[must_use]
1712 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1713 #[rustc_never_returns_null_ptr]
1714 pub fn as_ptr(this: &Self) -> *const T {
1715 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1716
1717 // SAFETY: This cannot go through Deref::deref or ArcInnerPtr::inner because
1718 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1719 // write through the pointer after the Arc is recovered through `from_raw`.
1720 unsafe { &raw mut (*ptr).data }
1721 }
1722
1723 /// Constructs an `Arc<T, A>` from a raw pointer.
1724 ///
1725 /// The raw pointer must have been previously returned by a call to [`Arc<U,
1726 /// A>::into_raw`][into_raw] with the following requirements:
1727 ///
1728 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1729 /// is trivially true if `U` is `T`.
1730 /// * If `U` is unsized, its data pointer must have the same size and
1731 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1732 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1733 /// coercion].
1734 ///
1735 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1736 /// and alignment, this is basically like transmuting references of
1737 /// different types. See [`mem::transmute`][transmute] for more information
1738 /// on what restrictions apply in this case.
1739 ///
1740 /// The raw pointer must point to a block of memory allocated by `alloc`
1741 ///
1742 /// The user of `from_raw` has to make sure a specific value of `T` is only
1743 /// dropped once.
1744 ///
1745 /// This function is unsafe because improper use may lead to memory unsafety,
1746 /// even if the returned `Arc<T>` is never accessed.
1747 ///
1748 /// [into_raw]: Arc::into_raw
1749 /// [transmute]: core::mem::transmute
1750 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1751 ///
1752 /// # Examples
1753 ///
1754 /// ```
1755 /// #![feature(allocator_api)]
1756 ///
1757 /// use std::sync::Arc;
1758 /// use std::alloc::System;
1759 ///
1760 /// let x = Arc::new_in("hello".to_owned(), System);
1761 /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1762 ///
1763 /// unsafe {
1764 /// // Convert back to an `Arc` to prevent leak.
1765 /// let x = Arc::from_raw_in(x_ptr, System);
1766 /// assert_eq!(&*x, "hello");
1767 ///
1768 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1769 /// }
1770 ///
1771 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1772 /// ```
1773 ///
1774 /// Convert a slice back into its original array:
1775 ///
1776 /// ```
1777 /// #![feature(allocator_api)]
1778 ///
1779 /// use std::sync::Arc;
1780 /// use std::alloc::System;
1781 ///
1782 /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1783 /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1784 ///
1785 /// unsafe {
1786 /// let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1787 /// assert_eq!(&*x, &[1, 2, 3]);
1788 /// }
1789 /// ```
1790 #[inline]
1791 #[unstable(feature = "allocator_api", issue = "32838")]
1792 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1793 unsafe {
1794 let offset = data_offset(ptr);
1795
1796 // Reverse the offset to find the original ArcInner.
1797 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1798
1799 Self::from_ptr_in(arc_ptr, alloc)
1800 }
1801 }
1802
1803 /// Creates a new [`Weak`] pointer to this allocation.
1804 ///
1805 /// # Examples
1806 ///
1807 /// ```
1808 /// use std::sync::Arc;
1809 ///
1810 /// let five = Arc::new(5);
1811 ///
1812 /// let weak_five = Arc::downgrade(&five);
1813 /// ```
1814 #[must_use = "this returns a new `Weak` pointer, \
1815 without modifying the original `Arc`"]
1816 #[stable(feature = "arc_weak", since = "1.4.0")]
1817 pub fn downgrade(this: &Self) -> Weak<T, A>
1818 where
1819 A: Clone,
1820 {
1821 // This Relaxed is OK because we're checking the value in the CAS
1822 // below.
1823 let mut cur = this.inner().weak.load(Relaxed);
1824
1825 loop {
1826 // check if the weak counter is currently "locked"; if so, spin.
1827 if cur == usize::MAX {
1828 hint::spin_loop();
1829 cur = this.inner().weak.load(Relaxed);
1830 continue;
1831 }
1832
1833 // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1834 assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1835
1836 // NOTE: this code currently ignores the possibility of overflow
1837 // into usize::MAX; in general both Rc and Arc need to be adjusted
1838 // to deal with overflow.
1839
1840 // Unlike with Clone(), we need this to be an Acquire read to
1841 // synchronize with the write coming from `is_unique`, so that the
1842 // events prior to that write happen before this read.
1843 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1844 Ok(_) => {
1845 // Make sure we do not create a dangling Weak
1846 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1847 return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1848 }
1849 Err(old) => cur = old,
1850 }
1851 }
1852 }
1853
1854 /// Gets the number of [`Weak`] pointers to this allocation.
1855 ///
1856 /// # Safety
1857 ///
1858 /// This method by itself is safe, but using it correctly requires extra care.
1859 /// Another thread can change the weak count at any time,
1860 /// including potentially between calling this method and acting on the result.
1861 ///
1862 /// # Examples
1863 ///
1864 /// ```
1865 /// use std::sync::Arc;
1866 ///
1867 /// let five = Arc::new(5);
1868 /// let _weak_five = Arc::downgrade(&five);
1869 ///
1870 /// // This assertion is deterministic because we haven't shared
1871 /// // the `Arc` or `Weak` between threads.
1872 /// assert_eq!(1, Arc::weak_count(&five));
1873 /// ```
1874 #[inline]
1875 #[must_use]
1876 #[stable(feature = "arc_counts", since = "1.15.0")]
1877 pub fn weak_count(this: &Self) -> usize {
1878 let cnt = this.inner().weak.load(Relaxed);
1879 // If the weak count is currently locked, the value of the
1880 // count was 0 just before taking the lock.
1881 if cnt == usize::MAX { 0 } else { cnt - 1 }
1882 }
1883
1884 /// Gets the number of strong (`Arc`) pointers to this allocation.
1885 ///
1886 /// # Safety
1887 ///
1888 /// This method by itself is safe, but using it correctly requires extra care.
1889 /// Another thread can change the strong count at any time,
1890 /// including potentially between calling this method and acting on the result.
1891 ///
1892 /// # Examples
1893 ///
1894 /// ```
1895 /// use std::sync::Arc;
1896 ///
1897 /// let five = Arc::new(5);
1898 /// let _also_five = Arc::clone(&five);
1899 ///
1900 /// // This assertion is deterministic because we haven't shared
1901 /// // the `Arc` between threads.
1902 /// assert_eq!(2, Arc::strong_count(&five));
1903 /// ```
1904 #[inline]
1905 #[must_use]
1906 #[stable(feature = "arc_counts", since = "1.15.0")]
1907 pub fn strong_count(this: &Self) -> usize {
1908 this.inner().strong.load(Relaxed)
1909 }
1910
1911 /// Increments the strong reference count on the `Arc<T>` associated with the
1912 /// provided pointer by one.
1913 ///
1914 /// # Safety
1915 ///
1916 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1917 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1918 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1919 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1920 /// allocated by `alloc`.
1921 ///
1922 /// [from_raw_in]: Arc::from_raw_in
1923 ///
1924 /// # Examples
1925 ///
1926 /// ```
1927 /// #![feature(allocator_api)]
1928 ///
1929 /// use std::sync::Arc;
1930 /// use std::alloc::System;
1931 ///
1932 /// let five = Arc::new_in(5, System);
1933 ///
1934 /// unsafe {
1935 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1936 /// Arc::increment_strong_count_in(ptr, System);
1937 ///
1938 /// // This assertion is deterministic because we haven't shared
1939 /// // the `Arc` between threads.
1940 /// let five = Arc::from_raw_in(ptr, System);
1941 /// assert_eq!(2, Arc::strong_count(&five));
1942 /// # // Prevent leaks for Miri.
1943 /// # Arc::decrement_strong_count_in(ptr, System);
1944 /// }
1945 /// ```
1946 #[inline]
1947 #[unstable(feature = "allocator_api", issue = "32838")]
1948 pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1949 where
1950 A: Clone,
1951 {
1952 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1953 let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1954 // Now increase refcount, but don't drop new refcount either
1955 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1956 }
1957
1958 /// Decrements the strong reference count on the `Arc<T>` associated with the
1959 /// provided pointer by one.
1960 ///
1961 /// # Safety
1962 ///
1963 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1964 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1965 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1966 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1967 /// allocated by `alloc`. This method can be used to release the final
1968 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1969 /// released.
1970 ///
1971 /// [from_raw_in]: Arc::from_raw_in
1972 ///
1973 /// # Examples
1974 ///
1975 /// ```
1976 /// #![feature(allocator_api)]
1977 ///
1978 /// use std::sync::Arc;
1979 /// use std::alloc::System;
1980 ///
1981 /// let five = Arc::new_in(5, System);
1982 ///
1983 /// unsafe {
1984 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1985 /// Arc::increment_strong_count_in(ptr, System);
1986 ///
1987 /// // Those assertions are deterministic because we haven't shared
1988 /// // the `Arc` between threads.
1989 /// let five = Arc::from_raw_in(ptr, System);
1990 /// assert_eq!(2, Arc::strong_count(&five));
1991 /// Arc::decrement_strong_count_in(ptr, System);
1992 /// assert_eq!(1, Arc::strong_count(&five));
1993 /// }
1994 /// ```
1995 #[inline]
1996 #[unstable(feature = "allocator_api", issue = "32838")]
1997 pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1998 unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1999 }
2000
2001 #[inline]
2002 fn inner(&self) -> &ArcInner<T> {
2003 // This unsafety is ok because while this arc is alive we're guaranteed
2004 // that the inner pointer is valid. Furthermore, we know that the
2005 // `ArcInner` structure itself is `Sync` because the inner data is
2006 // `Sync` as well, so we're ok loaning out an immutable pointer to these
2007 // contents.
2008 unsafe { self.ptr.as_ref() }
2009 }
2010
2011 // Non-inlined part of `drop`.
2012 #[inline(never)]
2013 unsafe fn drop_slow(&mut self) {
2014 // Drop the weak ref collectively held by all strong references when this
2015 // variable goes out of scope. This ensures that the memory is deallocated
2016 // even if the destructor of `T` panics.
2017 // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
2018 // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
2019 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
2020
2021 // Destroy the data at this time, even though we must not free the box
2022 // allocation itself (there might still be weak pointers lying around).
2023 // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
2024 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
2025 }
2026
2027 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
2028 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
2029 ///
2030 /// # Examples
2031 ///
2032 /// ```
2033 /// use std::sync::Arc;
2034 ///
2035 /// let five = Arc::new(5);
2036 /// let same_five = Arc::clone(&five);
2037 /// let other_five = Arc::new(5);
2038 ///
2039 /// assert!(Arc::ptr_eq(&five, &same_five));
2040 /// assert!(!Arc::ptr_eq(&five, &other_five));
2041 /// ```
2042 ///
2043 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2044 #[inline]
2045 #[must_use]
2046 #[stable(feature = "ptr_eq", since = "1.17.0")]
2047 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
2048 ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
2049 }
2050}
2051
2052impl<T: ?Sized> Arc<T> {
2053 /// Allocates an `ArcInner<T>` with sufficient space for
2054 /// a possibly-unsized inner value where the value has the layout provided.
2055 ///
2056 /// The function `mem_to_arcinner` is called with the data pointer
2057 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2058 #[cfg(not(no_global_oom_handling))]
2059 unsafe fn allocate_for_layout(
2060 value_layout: Layout,
2061 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2062 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2063 ) -> *mut ArcInner<T> {
2064 let layout = arcinner_layout_for_value_layout(value_layout);
2065
2066 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
2067
2068 unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
2069 }
2070
2071 /// Allocates an `ArcInner<T>` with sufficient space for
2072 /// a possibly-unsized inner value where the value has the layout provided,
2073 /// returning an error if allocation fails.
2074 ///
2075 /// The function `mem_to_arcinner` is called with the data pointer
2076 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2077 unsafe fn try_allocate_for_layout(
2078 value_layout: Layout,
2079 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
2080 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2081 ) -> Result<*mut ArcInner<T>, AllocError> {
2082 let layout = arcinner_layout_for_value_layout(value_layout);
2083
2084 let ptr = allocate(layout)?;
2085
2086 let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
2087
2088 Ok(inner)
2089 }
2090
2091 unsafe fn initialize_arcinner(
2092 ptr: NonNull<[u8]>,
2093 layout: Layout,
2094 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2095 ) -> *mut ArcInner<T> {
2096 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
2097 debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
2098
2099 unsafe {
2100 (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2101 (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2102 }
2103
2104 inner
2105 }
2106}
2107
2108impl<T: ?Sized, A: Allocator> Arc<T, A> {
2109 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2110 #[inline]
2111 #[cfg(not(no_global_oom_handling))]
2112 unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2113 // Allocate for the `ArcInner<T>` using the given value.
2114 unsafe {
2115 Arc::allocate_for_layout(
2116 Layout::for_value_raw(ptr),
2117 |layout| alloc.allocate(layout),
2118 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2119 )
2120 }
2121 }
2122
2123 #[cfg(not(no_global_oom_handling))]
2124 fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2125 unsafe {
2126 let value_size = size_of_val(&*src);
2127 let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2128
2129 // Copy value as bytes
2130 ptr::copy_nonoverlapping(
2131 (&raw const *src) as *const u8,
2132 (&raw mut (*ptr).data) as *mut u8,
2133 value_size,
2134 );
2135
2136 // Free the allocation without dropping its contents
2137 let (bptr, alloc) = Box::into_raw_with_allocator(src);
2138 let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2139 drop(src);
2140
2141 Self::from_ptr_in(ptr, alloc)
2142 }
2143 }
2144}
2145
2146impl<T> Arc<[T]> {
2147 /// Allocates an `ArcInner<[T]>` with the given length.
2148 #[cfg(not(no_global_oom_handling))]
2149 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2150 unsafe {
2151 Self::allocate_for_layout(
2152 Layout::array::<T>(len).unwrap(),
2153 |layout| Global.allocate(layout),
2154 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2155 )
2156 }
2157 }
2158
2159 /// Copy elements from slice into newly allocated `Arc<[T]>`
2160 ///
2161 /// Unsafe because the caller must either take ownership, bind `T: Copy` or
2162 /// bind `T: TrivialClone`.
2163 #[cfg(not(no_global_oom_handling))]
2164 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2165 unsafe {
2166 let ptr = Self::allocate_for_slice(v.len());
2167
2168 ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2169
2170 Self::from_ptr(ptr)
2171 }
2172 }
2173
2174 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2175 ///
2176 /// Behavior is undefined should the size be wrong.
2177 #[cfg(not(no_global_oom_handling))]
2178 unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2179 // Panic guard while cloning T elements.
2180 // In the event of a panic, elements that have been written
2181 // into the new ArcInner will be dropped, then the memory freed.
2182 struct Guard<T> {
2183 mem: NonNull<u8>,
2184 elems: *mut T,
2185 layout: Layout,
2186 n_elems: usize,
2187 }
2188
2189 impl<T> Drop for Guard<T> {
2190 fn drop(&mut self) {
2191 unsafe {
2192 let slice = from_raw_parts_mut(self.elems, self.n_elems);
2193 ptr::drop_in_place(slice);
2194
2195 Global.deallocate(self.mem, self.layout);
2196 }
2197 }
2198 }
2199
2200 unsafe {
2201 let ptr = Self::allocate_for_slice(len);
2202
2203 let mem = ptr as *mut _ as *mut u8;
2204 let layout = Layout::for_value_raw(ptr);
2205
2206 // Pointer to first element
2207 let elems = (&raw mut (*ptr).data) as *mut T;
2208
2209 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2210
2211 for (i, item) in iter.enumerate() {
2212 ptr::write(elems.add(i), item);
2213 guard.n_elems += 1;
2214 }
2215
2216 // All clear. Forget the guard so it doesn't free the new ArcInner.
2217 mem::forget(guard);
2218
2219 Self::from_ptr(ptr)
2220 }
2221 }
2222}
2223
2224impl<T, A: Allocator> Arc<[T], A> {
2225 /// Allocates an `ArcInner<[T]>` with the given length.
2226 #[inline]
2227 #[cfg(not(no_global_oom_handling))]
2228 unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2229 unsafe {
2230 Arc::allocate_for_layout(
2231 Layout::array::<T>(len).unwrap(),
2232 |layout| alloc.allocate(layout),
2233 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2234 )
2235 }
2236 }
2237}
2238
2239/// Specialization trait used for `From<&[T]>`.
2240#[cfg(not(no_global_oom_handling))]
2241trait ArcFromSlice<T> {
2242 fn from_slice(slice: &[T]) -> Self;
2243}
2244
2245#[cfg(not(no_global_oom_handling))]
2246impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2247 #[inline]
2248 default fn from_slice(v: &[T]) -> Self {
2249 unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2250 }
2251}
2252
2253#[cfg(not(no_global_oom_handling))]
2254impl<T: TrivialClone> ArcFromSlice<T> for Arc<[T]> {
2255 #[inline]
2256 fn from_slice(v: &[T]) -> Self {
2257 // SAFETY: `T` implements `TrivialClone`, so this is sound and equivalent
2258 // to the above.
2259 unsafe { Arc::copy_from_slice(v) }
2260 }
2261}
2262
2263#[stable(feature = "rust1", since = "1.0.0")]
2264impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2265 /// Makes a clone of the `Arc` pointer.
2266 ///
2267 /// This creates another pointer to the same allocation, increasing the
2268 /// strong reference count.
2269 ///
2270 /// # Examples
2271 ///
2272 /// ```
2273 /// use std::sync::Arc;
2274 ///
2275 /// let five = Arc::new(5);
2276 ///
2277 /// let _ = Arc::clone(&five);
2278 /// ```
2279 #[inline]
2280 fn clone(&self) -> Arc<T, A> {
2281 // Using a relaxed ordering is alright here, as knowledge of the
2282 // original reference prevents other threads from erroneously deleting
2283 // the object.
2284 //
2285 // As explained in the [Boost documentation][1], Increasing the
2286 // reference counter can always be done with memory_order_relaxed: New
2287 // references to an object can only be formed from an existing
2288 // reference, and passing an existing reference from one thread to
2289 // another must already provide any required synchronization.
2290 //
2291 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2292 let old_size = self.inner().strong.fetch_add(1, Relaxed);
2293
2294 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2295 // Arcs. If we don't do this the count can overflow and users will use-after free. This
2296 // branch will never be taken in any realistic program. We abort because such a program is
2297 // incredibly degenerate, and we don't care to support it.
2298 //
2299 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2300 // But we do that check *after* having done the increment, so there is a chance here that
2301 // the worst already happened and we actually do overflow the `usize` counter. However, that
2302 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2303 // above and the `abort` below, which seems exceedingly unlikely.
2304 //
2305 // This is a global invariant, and also applies when using a compare-exchange loop to increment
2306 // counters in other methods.
2307 // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2308 // and then overflow using a few `fetch_add`s.
2309 if old_size > MAX_REFCOUNT {
2310 abort();
2311 }
2312
2313 unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2314 }
2315}
2316
2317#[unstable(feature = "ergonomic_clones", issue = "132290")]
2318impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2319
2320#[stable(feature = "rust1", since = "1.0.0")]
2321impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2322 type Target = T;
2323
2324 #[inline]
2325 fn deref(&self) -> &T {
2326 &self.inner().data
2327 }
2328}
2329
2330#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2331unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2332
2333#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2334unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2335
2336#[unstable(feature = "deref_pure_trait", issue = "87121")]
2337unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2338
2339#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2340impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2341
2342#[cfg(not(no_global_oom_handling))]
2343impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2344 /// Makes a mutable reference into the given `Arc`.
2345 ///
2346 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2347 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
2348 /// referred to as clone-on-write.
2349 ///
2350 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2351 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2352 /// be cloned.
2353 ///
2354 /// See also [`get_mut`], which will fail rather than cloning the inner value
2355 /// or dissociating [`Weak`] pointers.
2356 ///
2357 /// [`clone`]: Clone::clone
2358 /// [`get_mut`]: Arc::get_mut
2359 ///
2360 /// # Examples
2361 ///
2362 /// ```
2363 /// use std::sync::Arc;
2364 ///
2365 /// let mut data = Arc::new(5);
2366 ///
2367 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2368 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2369 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
2370 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2371 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
2372 ///
2373 /// // Now `data` and `other_data` point to different allocations.
2374 /// assert_eq!(*data, 8);
2375 /// assert_eq!(*other_data, 12);
2376 /// ```
2377 ///
2378 /// [`Weak`] pointers will be dissociated:
2379 ///
2380 /// ```
2381 /// use std::sync::Arc;
2382 ///
2383 /// let mut data = Arc::new(75);
2384 /// let weak = Arc::downgrade(&data);
2385 ///
2386 /// assert!(75 == *data);
2387 /// assert!(75 == *weak.upgrade().unwrap());
2388 ///
2389 /// *Arc::make_mut(&mut data) += 1;
2390 ///
2391 /// assert!(76 == *data);
2392 /// assert!(weak.upgrade().is_none());
2393 /// ```
2394 #[inline]
2395 #[stable(feature = "arc_unique", since = "1.4.0")]
2396 pub fn make_mut(this: &mut Self) -> &mut T {
2397 let size_of_val = size_of_val::<T>(&**this);
2398
2399 // Note that we hold both a strong reference and a weak reference.
2400 // Thus, releasing our strong reference only will not, by itself, cause
2401 // the memory to be deallocated.
2402 //
2403 // Use Acquire to ensure that we see any writes to `weak` that happen
2404 // before release writes (i.e., decrements) to `strong`. Since we hold a
2405 // weak count, there's no chance the ArcInner itself could be
2406 // deallocated.
2407 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2408 // Another strong pointer exists, so we must clone.
2409
2410 let this_data_ref: &T = &**this;
2411 // `in_progress` drops the allocation if we panic before finishing initializing it.
2412 let mut in_progress: UniqueArcUninit<T, A> =
2413 UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2414
2415 let initialized_clone = unsafe {
2416 // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2417 this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2418 // Cast type of pointer, now that it is initialized.
2419 in_progress.into_arc()
2420 };
2421 *this = initialized_clone;
2422 } else if this.inner().weak.load(Relaxed) != 1 {
2423 // Relaxed suffices in the above because this is fundamentally an
2424 // optimization: we are always racing with weak pointers being
2425 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2426
2427 // We removed the last strong ref, but there are additional weak
2428 // refs remaining. We'll move the contents to a new Arc, and
2429 // invalidate the other weak refs.
2430
2431 // Note that it is not possible for the read of `weak` to yield
2432 // usize::MAX (i.e., locked), since the weak count can only be
2433 // locked by a thread with a strong reference.
2434
2435 // Materialize our own implicit weak pointer, so that it can clean
2436 // up the ArcInner as needed.
2437 let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2438
2439 // Can just steal the data, all that's left is Weaks
2440 //
2441 // We don't need panic-protection like the above branch does, but we might as well
2442 // use the same mechanism.
2443 let mut in_progress: UniqueArcUninit<T, A> =
2444 UniqueArcUninit::new(&**this, this.alloc.clone());
2445 unsafe {
2446 // Initialize `in_progress` with move of **this.
2447 // We have to express this in terms of bytes because `T: ?Sized`; there is no
2448 // operation that just copies a value based on its `size_of_val()`.
2449 ptr::copy_nonoverlapping(
2450 ptr::from_ref(&**this).cast::<u8>(),
2451 in_progress.data_ptr().cast::<u8>(),
2452 size_of_val,
2453 );
2454
2455 ptr::write(this, in_progress.into_arc());
2456 }
2457 } else {
2458 // We were the sole reference of either kind; bump back up the
2459 // strong ref count.
2460 this.inner().strong.store(1, Release);
2461 }
2462
2463 // As with `get_mut()`, the unsafety is ok because our reference was
2464 // either unique to begin with, or became one upon cloning the contents.
2465 unsafe { Self::get_mut_unchecked(this) }
2466 }
2467}
2468
2469impl<T: Clone, A: Allocator> Arc<T, A> {
2470 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2471 /// clone.
2472 ///
2473 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2474 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2475 ///
2476 /// # Examples
2477 ///
2478 /// ```
2479 /// # use std::{ptr, sync::Arc};
2480 /// let inner = String::from("test");
2481 /// let ptr = inner.as_ptr();
2482 ///
2483 /// let arc = Arc::new(inner);
2484 /// let inner = Arc::unwrap_or_clone(arc);
2485 /// // The inner value was not cloned
2486 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2487 ///
2488 /// let arc = Arc::new(inner);
2489 /// let arc2 = arc.clone();
2490 /// let inner = Arc::unwrap_or_clone(arc);
2491 /// // Because there were 2 references, we had to clone the inner value.
2492 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2493 /// // `arc2` is the last reference, so when we unwrap it we get back
2494 /// // the original `String`.
2495 /// let inner = Arc::unwrap_or_clone(arc2);
2496 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2497 /// ```
2498 #[inline]
2499 #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2500 pub fn unwrap_or_clone(this: Self) -> T {
2501 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2502 }
2503}
2504
2505impl<T: ?Sized, A: Allocator> Arc<T, A> {
2506 /// Returns a mutable reference into the given `Arc`, if there are
2507 /// no other `Arc` or [`Weak`] pointers to the same allocation.
2508 ///
2509 /// Returns [`None`] otherwise, because it is not safe to
2510 /// mutate a shared value.
2511 ///
2512 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2513 /// the inner value when there are other `Arc` pointers.
2514 ///
2515 /// [make_mut]: Arc::make_mut
2516 /// [clone]: Clone::clone
2517 ///
2518 /// # Examples
2519 ///
2520 /// ```
2521 /// use std::sync::Arc;
2522 ///
2523 /// let mut x = Arc::new(3);
2524 /// *Arc::get_mut(&mut x).unwrap() = 4;
2525 /// assert_eq!(*x, 4);
2526 ///
2527 /// let _y = Arc::clone(&x);
2528 /// assert!(Arc::get_mut(&mut x).is_none());
2529 /// ```
2530 #[inline]
2531 #[stable(feature = "arc_unique", since = "1.4.0")]
2532 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2533 if Self::is_unique(this) {
2534 // This unsafety is ok because we're guaranteed that the pointer
2535 // returned is the *only* pointer that will ever be returned to T. Our
2536 // reference count is guaranteed to be 1 at this point, and we required
2537 // the Arc itself to be `mut`, so we're returning the only possible
2538 // reference to the inner data.
2539 unsafe { Some(Arc::get_mut_unchecked(this)) }
2540 } else {
2541 None
2542 }
2543 }
2544
2545 /// Returns a mutable reference into the given `Arc`,
2546 /// without any check.
2547 ///
2548 /// See also [`get_mut`], which is safe and does appropriate checks.
2549 ///
2550 /// [`get_mut`]: Arc::get_mut
2551 ///
2552 /// # Safety
2553 ///
2554 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2555 /// they must not be dereferenced or have active borrows for the duration
2556 /// of the returned borrow, and their inner type must be exactly the same as the
2557 /// inner type of this Arc (including lifetimes). This is trivially the case if no
2558 /// such pointers exist, for example immediately after `Arc::new`.
2559 ///
2560 /// # Examples
2561 ///
2562 /// ```
2563 /// #![feature(get_mut_unchecked)]
2564 ///
2565 /// use std::sync::Arc;
2566 ///
2567 /// let mut x = Arc::new(String::new());
2568 /// unsafe {
2569 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
2570 /// }
2571 /// assert_eq!(*x, "foo");
2572 /// ```
2573 /// Other `Arc` pointers to the same allocation must be to the same type.
2574 /// ```no_run
2575 /// #![feature(get_mut_unchecked)]
2576 ///
2577 /// use std::sync::Arc;
2578 ///
2579 /// let x: Arc<str> = Arc::from("Hello, world!");
2580 /// let mut y: Arc<[u8]> = x.clone().into();
2581 /// unsafe {
2582 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
2583 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2584 /// }
2585 /// println!("{}", &*x); // Invalid UTF-8 in a str
2586 /// ```
2587 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2588 /// ```no_run
2589 /// #![feature(get_mut_unchecked)]
2590 ///
2591 /// use std::sync::Arc;
2592 ///
2593 /// let x: Arc<&str> = Arc::new("Hello, world!");
2594 /// {
2595 /// let s = String::from("Oh, no!");
2596 /// let mut y: Arc<&str> = x.clone();
2597 /// unsafe {
2598 /// // this is Undefined Behavior, because x's inner type
2599 /// // is &'long str, not &'short str
2600 /// *Arc::get_mut_unchecked(&mut y) = &s;
2601 /// }
2602 /// }
2603 /// println!("{}", &*x); // Use-after-free
2604 /// ```
2605 #[inline]
2606 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2607 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2608 // We are careful to *not* create a reference covering the "count" fields, as
2609 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2610 unsafe { &mut (*this.ptr.as_ptr()).data }
2611 }
2612
2613 /// Determine whether this is the unique reference to the underlying data.
2614 ///
2615 /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2616 /// returns `false` otherwise.
2617 ///
2618 /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2619 /// on this `Arc`, so long as no clones occur in between.
2620 ///
2621 /// # Examples
2622 ///
2623 /// ```
2624 /// #![feature(arc_is_unique)]
2625 ///
2626 /// use std::sync::Arc;
2627 ///
2628 /// let x = Arc::new(3);
2629 /// assert!(Arc::is_unique(&x));
2630 ///
2631 /// let y = Arc::clone(&x);
2632 /// assert!(!Arc::is_unique(&x));
2633 /// drop(y);
2634 ///
2635 /// // Weak references also count, because they could be upgraded at any time.
2636 /// let z = Arc::downgrade(&x);
2637 /// assert!(!Arc::is_unique(&x));
2638 /// ```
2639 ///
2640 /// # Pointer invalidation
2641 ///
2642 /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2643 /// unlike that operation it does not produce any mutable references to the underlying data,
2644 /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2645 /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2646 ///
2647 /// ```
2648 /// #![feature(arc_is_unique)]
2649 ///
2650 /// use std::sync::Arc;
2651 ///
2652 /// let arc = Arc::new(5);
2653 /// let pointer: *const i32 = &*arc;
2654 /// assert!(Arc::is_unique(&arc));
2655 /// assert_eq!(unsafe { *pointer }, 5);
2656 /// ```
2657 ///
2658 /// # Atomic orderings
2659 ///
2660 /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2661 /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2662 /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2663 ///
2664 /// Note that this operation requires locking the weak ref count, so concurrent calls to
2665 /// `downgrade` may spin-loop for a short period of time.
2666 ///
2667 /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2668 #[inline]
2669 #[unstable(feature = "arc_is_unique", issue = "138938")]
2670 pub fn is_unique(this: &Self) -> bool {
2671 // lock the weak pointer count if we appear to be the sole weak pointer
2672 // holder.
2673 //
2674 // The acquire label here ensures a happens-before relationship with any
2675 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2676 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2677 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2678 if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2679 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2680 // counter in `drop` -- the only access that happens when any but the last reference
2681 // is being dropped.
2682 let unique = this.inner().strong.load(Acquire) == 1;
2683
2684 // The release write here synchronizes with a read in `downgrade`,
2685 // effectively preventing the above read of `strong` from happening
2686 // after the write.
2687 this.inner().weak.store(1, Release); // release the lock
2688 unique
2689 } else {
2690 false
2691 }
2692 }
2693}
2694
2695#[stable(feature = "rust1", since = "1.0.0")]
2696unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2697 /// Drops the `Arc`.
2698 ///
2699 /// This will decrement the strong reference count. If the strong reference
2700 /// count reaches zero then the only other references (if any) are
2701 /// [`Weak`], so we `drop` the inner value.
2702 ///
2703 /// # Examples
2704 ///
2705 /// ```
2706 /// use std::sync::Arc;
2707 ///
2708 /// struct Foo;
2709 ///
2710 /// impl Drop for Foo {
2711 /// fn drop(&mut self) {
2712 /// println!("dropped!");
2713 /// }
2714 /// }
2715 ///
2716 /// let foo = Arc::new(Foo);
2717 /// let foo2 = Arc::clone(&foo);
2718 ///
2719 /// drop(foo); // Doesn't print anything
2720 /// drop(foo2); // Prints "dropped!"
2721 /// ```
2722 #[inline]
2723 fn drop(&mut self) {
2724 // Because `fetch_sub` is already atomic, we do not need to synchronize
2725 // with other threads unless we are going to delete the object. This
2726 // same logic applies to the below `fetch_sub` to the `weak` count.
2727 if self.inner().strong.fetch_sub(1, Release) != 1 {
2728 return;
2729 }
2730
2731 // This fence is needed to prevent reordering of use of the data and
2732 // deletion of the data. Because it is marked `Release`, the decreasing
2733 // of the reference count synchronizes with this `Acquire` fence. This
2734 // means that use of the data happens before decreasing the reference
2735 // count, which happens before this fence, which happens before the
2736 // deletion of the data.
2737 //
2738 // As explained in the [Boost documentation][1],
2739 //
2740 // > It is important to enforce any possible access to the object in one
2741 // > thread (through an existing reference) to *happen before* deleting
2742 // > the object in a different thread. This is achieved by a "release"
2743 // > operation after dropping a reference (any access to the object
2744 // > through this reference must obviously happened before), and an
2745 // > "acquire" operation before deleting the object.
2746 //
2747 // In particular, while the contents of an Arc are usually immutable, it's
2748 // possible to have interior writes to something like a Mutex<T>. Since a
2749 // Mutex is not acquired when it is deleted, we can't rely on its
2750 // synchronization logic to make writes in thread A visible to a destructor
2751 // running in thread B.
2752 //
2753 // Also note that the Acquire fence here could probably be replaced with an
2754 // Acquire load, which could improve performance in highly-contended
2755 // situations. See [2].
2756 //
2757 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2758 // [2]: (https://github.com/rust-lang/rust/pull/41714)
2759 acquire!(self.inner().strong);
2760
2761 // Make sure we aren't trying to "drop" the shared static for empty slices
2762 // used by Default::default.
2763 debug_assert!(
2764 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2765 "Arcs backed by a static should never reach a strong count of 0. \
2766 Likely decrement_strong_count or from_raw were called too many times.",
2767 );
2768
2769 unsafe {
2770 self.drop_slow();
2771 }
2772 }
2773}
2774
2775impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2776 /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2777 ///
2778 /// # Examples
2779 ///
2780 /// ```
2781 /// use std::any::Any;
2782 /// use std::sync::Arc;
2783 ///
2784 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2785 /// if let Ok(string) = value.downcast::<String>() {
2786 /// println!("String ({}): {}", string.len(), string);
2787 /// }
2788 /// }
2789 ///
2790 /// let my_string = "Hello World".to_string();
2791 /// print_if_string(Arc::new(my_string));
2792 /// print_if_string(Arc::new(0i8));
2793 /// ```
2794 #[inline]
2795 #[stable(feature = "rc_downcast", since = "1.29.0")]
2796 pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2797 where
2798 T: Any + Send + Sync,
2799 {
2800 if (*self).is::<T>() {
2801 unsafe {
2802 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2803 Ok(Arc::from_inner_in(ptr.cast(), alloc))
2804 }
2805 } else {
2806 Err(self)
2807 }
2808 }
2809
2810 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2811 ///
2812 /// For a safe alternative see [`downcast`].
2813 ///
2814 /// # Examples
2815 ///
2816 /// ```
2817 /// #![feature(downcast_unchecked)]
2818 ///
2819 /// use std::any::Any;
2820 /// use std::sync::Arc;
2821 ///
2822 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2823 ///
2824 /// unsafe {
2825 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2826 /// }
2827 /// ```
2828 ///
2829 /// # Safety
2830 ///
2831 /// The contained value must be of type `T`. Calling this method
2832 /// with the incorrect type is *undefined behavior*.
2833 ///
2834 ///
2835 /// [`downcast`]: Self::downcast
2836 #[inline]
2837 #[unstable(feature = "downcast_unchecked", issue = "90850")]
2838 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2839 where
2840 T: Any + Send + Sync,
2841 {
2842 unsafe {
2843 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2844 Arc::from_inner_in(ptr.cast(), alloc)
2845 }
2846 }
2847}
2848
2849impl<T> Weak<T> {
2850 /// Constructs a new `Weak<T>`, without allocating any memory.
2851 /// Calling [`upgrade`] on the return value always gives [`None`].
2852 ///
2853 /// [`upgrade`]: Weak::upgrade
2854 ///
2855 /// # Examples
2856 ///
2857 /// ```
2858 /// use std::sync::Weak;
2859 ///
2860 /// let empty: Weak<i64> = Weak::new();
2861 /// assert!(empty.upgrade().is_none());
2862 /// ```
2863 #[inline]
2864 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2865 #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2866 #[must_use]
2867 pub const fn new() -> Weak<T> {
2868 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2869 }
2870}
2871
2872impl<T, A: Allocator> Weak<T, A> {
2873 /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2874 /// allocator.
2875 /// Calling [`upgrade`] on the return value always gives [`None`].
2876 ///
2877 /// [`upgrade`]: Weak::upgrade
2878 ///
2879 /// # Examples
2880 ///
2881 /// ```
2882 /// #![feature(allocator_api)]
2883 ///
2884 /// use std::sync::Weak;
2885 /// use std::alloc::System;
2886 ///
2887 /// let empty: Weak<i64, _> = Weak::new_in(System);
2888 /// assert!(empty.upgrade().is_none());
2889 /// ```
2890 #[inline]
2891 #[unstable(feature = "allocator_api", issue = "32838")]
2892 pub fn new_in(alloc: A) -> Weak<T, A> {
2893 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2894 }
2895}
2896
2897/// Helper type to allow accessing the reference counts without
2898/// making any assertions about the data field.
2899struct WeakInner<'a> {
2900 weak: &'a Atomic<usize>,
2901 strong: &'a Atomic<usize>,
2902}
2903
2904impl<T: ?Sized> Weak<T> {
2905 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2906 ///
2907 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2908 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2909 ///
2910 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2911 /// as these don't own anything; the method still works on them).
2912 ///
2913 /// # Safety
2914 ///
2915 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2916 /// weak reference, and must point to a block of memory allocated by global allocator.
2917 ///
2918 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2919 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2920 /// count is not modified by this operation) and therefore it must be paired with a previous
2921 /// call to [`into_raw`].
2922 /// # Examples
2923 ///
2924 /// ```
2925 /// use std::sync::{Arc, Weak};
2926 ///
2927 /// let strong = Arc::new("hello".to_owned());
2928 ///
2929 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2930 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2931 ///
2932 /// assert_eq!(2, Arc::weak_count(&strong));
2933 ///
2934 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2935 /// assert_eq!(1, Arc::weak_count(&strong));
2936 ///
2937 /// drop(strong);
2938 ///
2939 /// // Decrement the last weak count.
2940 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2941 /// ```
2942 ///
2943 /// [`new`]: Weak::new
2944 /// [`into_raw`]: Weak::into_raw
2945 /// [`upgrade`]: Weak::upgrade
2946 #[inline]
2947 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2948 pub unsafe fn from_raw(ptr: *const T) -> Self {
2949 unsafe { Weak::from_raw_in(ptr, Global) }
2950 }
2951
2952 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2953 ///
2954 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2955 /// one weak reference (the weak count is not modified by this operation). It can be turned
2956 /// back into the `Weak<T>` with [`from_raw`].
2957 ///
2958 /// The same restrictions of accessing the target of the pointer as with
2959 /// [`as_ptr`] apply.
2960 ///
2961 /// # Examples
2962 ///
2963 /// ```
2964 /// use std::sync::{Arc, Weak};
2965 ///
2966 /// let strong = Arc::new("hello".to_owned());
2967 /// let weak = Arc::downgrade(&strong);
2968 /// let raw = weak.into_raw();
2969 ///
2970 /// assert_eq!(1, Arc::weak_count(&strong));
2971 /// assert_eq!("hello", unsafe { &*raw });
2972 ///
2973 /// drop(unsafe { Weak::from_raw(raw) });
2974 /// assert_eq!(0, Arc::weak_count(&strong));
2975 /// ```
2976 ///
2977 /// [`from_raw`]: Weak::from_raw
2978 /// [`as_ptr`]: Weak::as_ptr
2979 #[must_use = "losing the pointer will leak memory"]
2980 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2981 pub fn into_raw(self) -> *const T {
2982 ManuallyDrop::new(self).as_ptr()
2983 }
2984}
2985
2986impl<T: ?Sized, A: Allocator> Weak<T, A> {
2987 /// Returns a reference to the underlying allocator.
2988 #[inline]
2989 #[unstable(feature = "allocator_api", issue = "32838")]
2990 pub fn allocator(&self) -> &A {
2991 &self.alloc
2992 }
2993
2994 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2995 ///
2996 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2997 /// unaligned or even [`null`] otherwise.
2998 ///
2999 /// # Examples
3000 ///
3001 /// ```
3002 /// use std::sync::Arc;
3003 /// use std::ptr;
3004 ///
3005 /// let strong = Arc::new("hello".to_owned());
3006 /// let weak = Arc::downgrade(&strong);
3007 /// // Both point to the same object
3008 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
3009 /// // The strong here keeps it alive, so we can still access the object.
3010 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
3011 ///
3012 /// drop(strong);
3013 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
3014 /// // undefined behavior.
3015 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
3016 /// ```
3017 ///
3018 /// [`null`]: core::ptr::null "ptr::null"
3019 #[must_use]
3020 #[stable(feature = "weak_into_raw", since = "1.45.0")]
3021 pub fn as_ptr(&self) -> *const T {
3022 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
3023
3024 if is_dangling(ptr) {
3025 // If the pointer is dangling, we return the sentinel directly. This cannot be
3026 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
3027 ptr as *const T
3028 } else {
3029 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
3030 // The payload may be dropped at this point, and we have to maintain provenance,
3031 // so use raw pointer manipulation.
3032 unsafe { &raw mut (*ptr).data }
3033 }
3034 }
3035
3036 /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
3037 ///
3038 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
3039 /// one weak reference (the weak count is not modified by this operation). It can be turned
3040 /// back into the `Weak<T>` with [`from_raw_in`].
3041 ///
3042 /// The same restrictions of accessing the target of the pointer as with
3043 /// [`as_ptr`] apply.
3044 ///
3045 /// # Examples
3046 ///
3047 /// ```
3048 /// #![feature(allocator_api)]
3049 /// use std::sync::{Arc, Weak};
3050 /// use std::alloc::System;
3051 ///
3052 /// let strong = Arc::new_in("hello".to_owned(), System);
3053 /// let weak = Arc::downgrade(&strong);
3054 /// let (raw, alloc) = weak.into_raw_with_allocator();
3055 ///
3056 /// assert_eq!(1, Arc::weak_count(&strong));
3057 /// assert_eq!("hello", unsafe { &*raw });
3058 ///
3059 /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
3060 /// assert_eq!(0, Arc::weak_count(&strong));
3061 /// ```
3062 ///
3063 /// [`from_raw_in`]: Weak::from_raw_in
3064 /// [`as_ptr`]: Weak::as_ptr
3065 #[must_use = "losing the pointer will leak memory"]
3066 #[unstable(feature = "allocator_api", issue = "32838")]
3067 pub fn into_raw_with_allocator(self) -> (*const T, A) {
3068 let this = mem::ManuallyDrop::new(self);
3069 let result = this.as_ptr();
3070 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
3071 let alloc = unsafe { ptr::read(&this.alloc) };
3072 (result, alloc)
3073 }
3074
3075 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
3076 /// allocator.
3077 ///
3078 /// This can be used to safely get a strong reference (by calling [`upgrade`]
3079 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
3080 ///
3081 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
3082 /// as these don't own anything; the method still works on them).
3083 ///
3084 /// # Safety
3085 ///
3086 /// The pointer must have originated from the [`into_raw`] and must still own its potential
3087 /// weak reference, and must point to a block of memory allocated by `alloc`.
3088 ///
3089 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
3090 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
3091 /// count is not modified by this operation) and therefore it must be paired with a previous
3092 /// call to [`into_raw`].
3093 /// # Examples
3094 ///
3095 /// ```
3096 /// use std::sync::{Arc, Weak};
3097 ///
3098 /// let strong = Arc::new("hello".to_owned());
3099 ///
3100 /// let raw_1 = Arc::downgrade(&strong).into_raw();
3101 /// let raw_2 = Arc::downgrade(&strong).into_raw();
3102 ///
3103 /// assert_eq!(2, Arc::weak_count(&strong));
3104 ///
3105 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3106 /// assert_eq!(1, Arc::weak_count(&strong));
3107 ///
3108 /// drop(strong);
3109 ///
3110 /// // Decrement the last weak count.
3111 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3112 /// ```
3113 ///
3114 /// [`new`]: Weak::new
3115 /// [`into_raw`]: Weak::into_raw
3116 /// [`upgrade`]: Weak::upgrade
3117 #[inline]
3118 #[unstable(feature = "allocator_api", issue = "32838")]
3119 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3120 // See Weak::as_ptr for context on how the input pointer is derived.
3121
3122 let ptr = if is_dangling(ptr) {
3123 // This is a dangling Weak.
3124 ptr as *mut ArcInner<T>
3125 } else {
3126 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3127 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3128 let offset = unsafe { data_offset(ptr) };
3129 // Thus, we reverse the offset to get the whole ArcInner.
3130 // SAFETY: the pointer originated from a Weak, so this offset is safe.
3131 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3132 };
3133
3134 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3135 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3136 }
3137}
3138
3139impl<T: ?Sized, A: Allocator> Weak<T, A> {
3140 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3141 /// dropping of the inner value if successful.
3142 ///
3143 /// Returns [`None`] if the inner value has since been dropped.
3144 ///
3145 /// # Examples
3146 ///
3147 /// ```
3148 /// use std::sync::Arc;
3149 ///
3150 /// let five = Arc::new(5);
3151 ///
3152 /// let weak_five = Arc::downgrade(&five);
3153 ///
3154 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3155 /// assert!(strong_five.is_some());
3156 ///
3157 /// // Destroy all strong pointers.
3158 /// drop(strong_five);
3159 /// drop(five);
3160 ///
3161 /// assert!(weak_five.upgrade().is_none());
3162 /// ```
3163 #[must_use = "this returns a new `Arc`, \
3164 without modifying the original weak pointer"]
3165 #[stable(feature = "arc_weak", since = "1.4.0")]
3166 pub fn upgrade(&self) -> Option<Arc<T, A>>
3167 where
3168 A: Clone,
3169 {
3170 #[inline]
3171 fn checked_increment(n: usize) -> Option<usize> {
3172 // Any write of 0 we can observe leaves the field in permanently zero state.
3173 if n == 0 {
3174 return None;
3175 }
3176 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3177 assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3178 Some(n + 1)
3179 }
3180
3181 // We use a CAS loop to increment the strong count instead of a
3182 // fetch_add as this function should never take the reference count
3183 // from zero to one.
3184 //
3185 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3186 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3187 // value can be initialized after `Weak` references have already been created. In that case, we
3188 // expect to observe the fully initialized value.
3189 if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3190 // SAFETY: pointer is not null, verified in checked_increment
3191 unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3192 } else {
3193 None
3194 }
3195 }
3196
3197 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3198 ///
3199 /// If `self` was created using [`Weak::new`], this will return 0.
3200 #[must_use]
3201 #[stable(feature = "weak_counts", since = "1.41.0")]
3202 pub fn strong_count(&self) -> usize {
3203 if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3204 }
3205
3206 /// Gets an approximation of the number of `Weak` pointers pointing to this
3207 /// allocation.
3208 ///
3209 /// If `self` was created using [`Weak::new`], or if there are no remaining
3210 /// strong pointers, this will return 0.
3211 ///
3212 /// # Accuracy
3213 ///
3214 /// Due to implementation details, the returned value can be off by 1 in
3215 /// either direction when other threads are manipulating any `Arc`s or
3216 /// `Weak`s pointing to the same allocation.
3217 #[must_use]
3218 #[stable(feature = "weak_counts", since = "1.41.0")]
3219 pub fn weak_count(&self) -> usize {
3220 if let Some(inner) = self.inner() {
3221 let weak = inner.weak.load(Acquire);
3222 let strong = inner.strong.load(Relaxed);
3223 if strong == 0 {
3224 0
3225 } else {
3226 // Since we observed that there was at least one strong pointer
3227 // after reading the weak count, we know that the implicit weak
3228 // reference (present whenever any strong references are alive)
3229 // was still around when we observed the weak count, and can
3230 // therefore safely subtract it.
3231 weak - 1
3232 }
3233 } else {
3234 0
3235 }
3236 }
3237
3238 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3239 /// (i.e., when this `Weak` was created by `Weak::new`).
3240 #[inline]
3241 fn inner(&self) -> Option<WeakInner<'_>> {
3242 let ptr = self.ptr.as_ptr();
3243 if is_dangling(ptr) {
3244 None
3245 } else {
3246 // We are careful to *not* create a reference covering the "data" field, as
3247 // the field may be mutated concurrently (for example, if the last `Arc`
3248 // is dropped, the data field will be dropped in-place).
3249 Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3250 }
3251 }
3252
3253 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3254 /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3255 /// this function ignores the metadata of `dyn Trait` pointers.
3256 ///
3257 /// # Notes
3258 ///
3259 /// Since this compares pointers it means that `Weak::new()` will equal each
3260 /// other, even though they don't point to any allocation.
3261 ///
3262 /// # Examples
3263 ///
3264 /// ```
3265 /// use std::sync::Arc;
3266 ///
3267 /// let first_rc = Arc::new(5);
3268 /// let first = Arc::downgrade(&first_rc);
3269 /// let second = Arc::downgrade(&first_rc);
3270 ///
3271 /// assert!(first.ptr_eq(&second));
3272 ///
3273 /// let third_rc = Arc::new(5);
3274 /// let third = Arc::downgrade(&third_rc);
3275 ///
3276 /// assert!(!first.ptr_eq(&third));
3277 /// ```
3278 ///
3279 /// Comparing `Weak::new`.
3280 ///
3281 /// ```
3282 /// use std::sync::{Arc, Weak};
3283 ///
3284 /// let first = Weak::new();
3285 /// let second = Weak::new();
3286 /// assert!(first.ptr_eq(&second));
3287 ///
3288 /// let third_rc = Arc::new(());
3289 /// let third = Arc::downgrade(&third_rc);
3290 /// assert!(!first.ptr_eq(&third));
3291 /// ```
3292 ///
3293 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3294 #[inline]
3295 #[must_use]
3296 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3297 pub fn ptr_eq(&self, other: &Self) -> bool {
3298 ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3299 }
3300}
3301
3302#[stable(feature = "arc_weak", since = "1.4.0")]
3303impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3304 /// Makes a clone of the `Weak` pointer that points to the same allocation.
3305 ///
3306 /// # Examples
3307 ///
3308 /// ```
3309 /// use std::sync::{Arc, Weak};
3310 ///
3311 /// let weak_five = Arc::downgrade(&Arc::new(5));
3312 ///
3313 /// let _ = Weak::clone(&weak_five);
3314 /// ```
3315 #[inline]
3316 fn clone(&self) -> Weak<T, A> {
3317 if let Some(inner) = self.inner() {
3318 // See comments in Arc::clone() for why this is relaxed. This can use a
3319 // fetch_add (ignoring the lock) because the weak count is only locked
3320 // where are *no other* weak pointers in existence. (So we can't be
3321 // running this code in that case).
3322 let old_size = inner.weak.fetch_add(1, Relaxed);
3323
3324 // See comments in Arc::clone() for why we do this (for mem::forget).
3325 if old_size > MAX_REFCOUNT {
3326 abort();
3327 }
3328 }
3329
3330 Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3331 }
3332}
3333
3334#[unstable(feature = "ergonomic_clones", issue = "132290")]
3335impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3336
3337#[stable(feature = "downgraded_weak", since = "1.10.0")]
3338impl<T> Default for Weak<T> {
3339 /// Constructs a new `Weak<T>`, without allocating memory.
3340 /// Calling [`upgrade`] on the return value always
3341 /// gives [`None`].
3342 ///
3343 /// [`upgrade`]: Weak::upgrade
3344 ///
3345 /// # Examples
3346 ///
3347 /// ```
3348 /// use std::sync::Weak;
3349 ///
3350 /// let empty: Weak<i64> = Default::default();
3351 /// assert!(empty.upgrade().is_none());
3352 /// ```
3353 fn default() -> Weak<T> {
3354 Weak::new()
3355 }
3356}
3357
3358#[stable(feature = "arc_weak", since = "1.4.0")]
3359unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3360 /// Drops the `Weak` pointer.
3361 ///
3362 /// # Examples
3363 ///
3364 /// ```
3365 /// use std::sync::{Arc, Weak};
3366 ///
3367 /// struct Foo;
3368 ///
3369 /// impl Drop for Foo {
3370 /// fn drop(&mut self) {
3371 /// println!("dropped!");
3372 /// }
3373 /// }
3374 ///
3375 /// let foo = Arc::new(Foo);
3376 /// let weak_foo = Arc::downgrade(&foo);
3377 /// let other_weak_foo = Weak::clone(&weak_foo);
3378 ///
3379 /// drop(weak_foo); // Doesn't print anything
3380 /// drop(foo); // Prints "dropped!"
3381 ///
3382 /// assert!(other_weak_foo.upgrade().is_none());
3383 /// ```
3384 fn drop(&mut self) {
3385 // If we find out that we were the last weak pointer, then its time to
3386 // deallocate the data entirely. See the discussion in Arc::drop() about
3387 // the memory orderings
3388 //
3389 // It's not necessary to check for the locked state here, because the
3390 // weak count can only be locked if there was precisely one weak ref,
3391 // meaning that drop could only subsequently run ON that remaining weak
3392 // ref, which can only happen after the lock is released.
3393 let inner = if let Some(inner) = self.inner() { inner } else { return };
3394
3395 if inner.weak.fetch_sub(1, Release) == 1 {
3396 acquire!(inner.weak);
3397
3398 // Make sure we aren't trying to "deallocate" the shared static for empty slices
3399 // used by Default::default.
3400 debug_assert!(
3401 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3402 "Arc/Weaks backed by a static should never be deallocated. \
3403 Likely decrement_strong_count or from_raw were called too many times.",
3404 );
3405
3406 unsafe {
3407 self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3408 }
3409 }
3410 }
3411}
3412
3413#[stable(feature = "rust1", since = "1.0.0")]
3414trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3415 fn eq(&self, other: &Arc<T, A>) -> bool;
3416 fn ne(&self, other: &Arc<T, A>) -> bool;
3417}
3418
3419#[stable(feature = "rust1", since = "1.0.0")]
3420impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3421 #[inline]
3422 default fn eq(&self, other: &Arc<T, A>) -> bool {
3423 **self == **other
3424 }
3425 #[inline]
3426 default fn ne(&self, other: &Arc<T, A>) -> bool {
3427 **self != **other
3428 }
3429}
3430
3431/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3432/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3433/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3434/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3435/// the same value, than two `&T`s.
3436///
3437/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3438#[stable(feature = "rust1", since = "1.0.0")]
3439impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3440 #[inline]
3441 fn eq(&self, other: &Arc<T, A>) -> bool {
3442 Arc::ptr_eq(self, other) || **self == **other
3443 }
3444
3445 #[inline]
3446 fn ne(&self, other: &Arc<T, A>) -> bool {
3447 !Arc::ptr_eq(self, other) && **self != **other
3448 }
3449}
3450
3451#[stable(feature = "rust1", since = "1.0.0")]
3452impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3453 /// Equality for two `Arc`s.
3454 ///
3455 /// Two `Arc`s are equal if their inner values are equal, even if they are
3456 /// stored in different allocation.
3457 ///
3458 /// If `T` also implements `Eq` (implying reflexivity of equality),
3459 /// two `Arc`s that point to the same allocation are always equal.
3460 ///
3461 /// # Examples
3462 ///
3463 /// ```
3464 /// use std::sync::Arc;
3465 ///
3466 /// let five = Arc::new(5);
3467 ///
3468 /// assert!(five == Arc::new(5));
3469 /// ```
3470 #[inline]
3471 fn eq(&self, other: &Arc<T, A>) -> bool {
3472 ArcEqIdent::eq(self, other)
3473 }
3474
3475 /// Inequality for two `Arc`s.
3476 ///
3477 /// Two `Arc`s are not equal if their inner values are not equal.
3478 ///
3479 /// If `T` also implements `Eq` (implying reflexivity of equality),
3480 /// two `Arc`s that point to the same value are always equal.
3481 ///
3482 /// # Examples
3483 ///
3484 /// ```
3485 /// use std::sync::Arc;
3486 ///
3487 /// let five = Arc::new(5);
3488 ///
3489 /// assert!(five != Arc::new(6));
3490 /// ```
3491 #[inline]
3492 fn ne(&self, other: &Arc<T, A>) -> bool {
3493 ArcEqIdent::ne(self, other)
3494 }
3495}
3496
3497#[stable(feature = "rust1", since = "1.0.0")]
3498impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3499 /// Partial comparison for two `Arc`s.
3500 ///
3501 /// The two are compared by calling `partial_cmp()` on their inner values.
3502 ///
3503 /// # Examples
3504 ///
3505 /// ```
3506 /// use std::sync::Arc;
3507 /// use std::cmp::Ordering;
3508 ///
3509 /// let five = Arc::new(5);
3510 ///
3511 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3512 /// ```
3513 fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3514 (**self).partial_cmp(&**other)
3515 }
3516
3517 /// Less-than comparison for two `Arc`s.
3518 ///
3519 /// The two are compared by calling `<` on their inner values.
3520 ///
3521 /// # Examples
3522 ///
3523 /// ```
3524 /// use std::sync::Arc;
3525 ///
3526 /// let five = Arc::new(5);
3527 ///
3528 /// assert!(five < Arc::new(6));
3529 /// ```
3530 fn lt(&self, other: &Arc<T, A>) -> bool {
3531 *(*self) < *(*other)
3532 }
3533
3534 /// 'Less than or equal to' comparison for two `Arc`s.
3535 ///
3536 /// The two are compared by calling `<=` on their inner values.
3537 ///
3538 /// # Examples
3539 ///
3540 /// ```
3541 /// use std::sync::Arc;
3542 ///
3543 /// let five = Arc::new(5);
3544 ///
3545 /// assert!(five <= Arc::new(5));
3546 /// ```
3547 fn le(&self, other: &Arc<T, A>) -> bool {
3548 *(*self) <= *(*other)
3549 }
3550
3551 /// Greater-than comparison for two `Arc`s.
3552 ///
3553 /// The two are compared by calling `>` on their inner values.
3554 ///
3555 /// # Examples
3556 ///
3557 /// ```
3558 /// use std::sync::Arc;
3559 ///
3560 /// let five = Arc::new(5);
3561 ///
3562 /// assert!(five > Arc::new(4));
3563 /// ```
3564 fn gt(&self, other: &Arc<T, A>) -> bool {
3565 *(*self) > *(*other)
3566 }
3567
3568 /// 'Greater than or equal to' comparison for two `Arc`s.
3569 ///
3570 /// The two are compared by calling `>=` on their inner values.
3571 ///
3572 /// # Examples
3573 ///
3574 /// ```
3575 /// use std::sync::Arc;
3576 ///
3577 /// let five = Arc::new(5);
3578 ///
3579 /// assert!(five >= Arc::new(5));
3580 /// ```
3581 fn ge(&self, other: &Arc<T, A>) -> bool {
3582 *(*self) >= *(*other)
3583 }
3584}
3585#[stable(feature = "rust1", since = "1.0.0")]
3586impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3587 /// Comparison for two `Arc`s.
3588 ///
3589 /// The two are compared by calling `cmp()` on their inner values.
3590 ///
3591 /// # Examples
3592 ///
3593 /// ```
3594 /// use std::sync::Arc;
3595 /// use std::cmp::Ordering;
3596 ///
3597 /// let five = Arc::new(5);
3598 ///
3599 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3600 /// ```
3601 fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3602 (**self).cmp(&**other)
3603 }
3604}
3605#[stable(feature = "rust1", since = "1.0.0")]
3606impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3607
3608#[stable(feature = "rust1", since = "1.0.0")]
3609impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3610 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3611 fmt::Display::fmt(&**self, f)
3612 }
3613}
3614
3615#[stable(feature = "rust1", since = "1.0.0")]
3616impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3617 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3618 fmt::Debug::fmt(&**self, f)
3619 }
3620}
3621
3622#[stable(feature = "rust1", since = "1.0.0")]
3623impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3624 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3625 fmt::Pointer::fmt(&(&raw const **self), f)
3626 }
3627}
3628
3629#[cfg(not(no_global_oom_handling))]
3630#[stable(feature = "rust1", since = "1.0.0")]
3631impl<T: Default> Default for Arc<T> {
3632 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3633 ///
3634 /// # Examples
3635 ///
3636 /// ```
3637 /// use std::sync::Arc;
3638 ///
3639 /// let x: Arc<i32> = Default::default();
3640 /// assert_eq!(*x, 0);
3641 /// ```
3642 fn default() -> Arc<T> {
3643 unsafe {
3644 Self::from_inner(
3645 Box::leak(Box::write(
3646 Box::new_uninit(),
3647 ArcInner {
3648 strong: atomic::AtomicUsize::new(1),
3649 weak: atomic::AtomicUsize::new(1),
3650 data: T::default(),
3651 },
3652 ))
3653 .into(),
3654 )
3655 }
3656 }
3657}
3658
3659/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3660/// returned by `Default::default`.
3661///
3662/// Layout notes:
3663/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3664/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3665/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3666#[repr(C, align(16))]
3667struct SliceArcInnerForStatic {
3668 inner: ArcInner<[u8; 1]>,
3669}
3670#[cfg(not(no_global_oom_handling))]
3671const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3672
3673static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3674 inner: ArcInner {
3675 strong: atomic::AtomicUsize::new(1),
3676 weak: atomic::AtomicUsize::new(1),
3677 data: [0],
3678 },
3679};
3680
3681#[cfg(not(no_global_oom_handling))]
3682#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3683impl Default for Arc<str> {
3684 /// Creates an empty str inside an Arc
3685 ///
3686 /// This may or may not share an allocation with other Arcs.
3687 #[inline]
3688 fn default() -> Self {
3689 let arc: Arc<[u8]> = Default::default();
3690 debug_assert!(core::str::from_utf8(&*arc).is_ok());
3691 let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3692 unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3693 }
3694}
3695
3696#[cfg(not(no_global_oom_handling))]
3697#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3698impl Default for Arc<core::ffi::CStr> {
3699 /// Creates an empty CStr inside an Arc
3700 ///
3701 /// This may or may not share an allocation with other Arcs.
3702 #[inline]
3703 fn default() -> Self {
3704 use core::ffi::CStr;
3705 let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3706 let inner: NonNull<ArcInner<CStr>> =
3707 NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3708 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3709 let this: mem::ManuallyDrop<Arc<CStr>> =
3710 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3711 (*this).clone()
3712 }
3713}
3714
3715#[cfg(not(no_global_oom_handling))]
3716#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3717impl<T> Default for Arc<[T]> {
3718 /// Creates an empty `[T]` inside an Arc
3719 ///
3720 /// This may or may not share an allocation with other Arcs.
3721 #[inline]
3722 fn default() -> Self {
3723 if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3724 // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3725 // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3726 // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3727 // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3728 let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3729 let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3730 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3731 let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3732 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3733 return (*this).clone();
3734 }
3735
3736 // If T's alignment is too large for the static, make a new unique allocation.
3737 let arr: [T; 0] = [];
3738 Arc::from(arr)
3739 }
3740}
3741
3742#[cfg(not(no_global_oom_handling))]
3743#[stable(feature = "pin_default_impls", since = "1.91.0")]
3744impl<T> Default for Pin<Arc<T>>
3745where
3746 T: ?Sized,
3747 Arc<T>: Default,
3748{
3749 #[inline]
3750 fn default() -> Self {
3751 unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3752 }
3753}
3754
3755#[stable(feature = "rust1", since = "1.0.0")]
3756impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3757 fn hash<H: Hasher>(&self, state: &mut H) {
3758 (**self).hash(state)
3759 }
3760}
3761
3762#[cfg(not(no_global_oom_handling))]
3763#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3764impl<T> From<T> for Arc<T> {
3765 /// Converts a `T` into an `Arc<T>`
3766 ///
3767 /// The conversion moves the value into a
3768 /// newly allocated `Arc`. It is equivalent to
3769 /// calling `Arc::new(t)`.
3770 ///
3771 /// # Example
3772 /// ```rust
3773 /// # use std::sync::Arc;
3774 /// let x = 5;
3775 /// let arc = Arc::new(5);
3776 ///
3777 /// assert_eq!(Arc::from(x), arc);
3778 /// ```
3779 fn from(t: T) -> Self {
3780 Arc::new(t)
3781 }
3782}
3783
3784#[cfg(not(no_global_oom_handling))]
3785#[stable(feature = "shared_from_array", since = "1.74.0")]
3786impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3787 /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3788 ///
3789 /// The conversion moves the array into a newly allocated `Arc`.
3790 ///
3791 /// # Example
3792 ///
3793 /// ```
3794 /// # use std::sync::Arc;
3795 /// let original: [i32; 3] = [1, 2, 3];
3796 /// let shared: Arc<[i32]> = Arc::from(original);
3797 /// assert_eq!(&[1, 2, 3], &shared[..]);
3798 /// ```
3799 #[inline]
3800 fn from(v: [T; N]) -> Arc<[T]> {
3801 Arc::<[T; N]>::from(v)
3802 }
3803}
3804
3805#[cfg(not(no_global_oom_handling))]
3806#[stable(feature = "shared_from_slice", since = "1.21.0")]
3807impl<T: Clone> From<&[T]> for Arc<[T]> {
3808 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3809 ///
3810 /// # Example
3811 ///
3812 /// ```
3813 /// # use std::sync::Arc;
3814 /// let original: &[i32] = &[1, 2, 3];
3815 /// let shared: Arc<[i32]> = Arc::from(original);
3816 /// assert_eq!(&[1, 2, 3], &shared[..]);
3817 /// ```
3818 #[inline]
3819 fn from(v: &[T]) -> Arc<[T]> {
3820 <Self as ArcFromSlice<T>>::from_slice(v)
3821 }
3822}
3823
3824#[cfg(not(no_global_oom_handling))]
3825#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3826impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3827 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3828 ///
3829 /// # Example
3830 ///
3831 /// ```
3832 /// # use std::sync::Arc;
3833 /// let mut original = [1, 2, 3];
3834 /// let original: &mut [i32] = &mut original;
3835 /// let shared: Arc<[i32]> = Arc::from(original);
3836 /// assert_eq!(&[1, 2, 3], &shared[..]);
3837 /// ```
3838 #[inline]
3839 fn from(v: &mut [T]) -> Arc<[T]> {
3840 Arc::from(&*v)
3841 }
3842}
3843
3844#[cfg(not(no_global_oom_handling))]
3845#[stable(feature = "shared_from_slice", since = "1.21.0")]
3846impl From<&str> for Arc<str> {
3847 /// Allocates a reference-counted `str` and copies `v` into it.
3848 ///
3849 /// # Example
3850 ///
3851 /// ```
3852 /// # use std::sync::Arc;
3853 /// let shared: Arc<str> = Arc::from("eggplant");
3854 /// assert_eq!("eggplant", &shared[..]);
3855 /// ```
3856 #[inline]
3857 fn from(v: &str) -> Arc<str> {
3858 let arc = Arc::<[u8]>::from(v.as_bytes());
3859 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3860 }
3861}
3862
3863#[cfg(not(no_global_oom_handling))]
3864#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3865impl From<&mut str> for Arc<str> {
3866 /// Allocates a reference-counted `str` and copies `v` into it.
3867 ///
3868 /// # Example
3869 ///
3870 /// ```
3871 /// # use std::sync::Arc;
3872 /// let mut original = String::from("eggplant");
3873 /// let original: &mut str = &mut original;
3874 /// let shared: Arc<str> = Arc::from(original);
3875 /// assert_eq!("eggplant", &shared[..]);
3876 /// ```
3877 #[inline]
3878 fn from(v: &mut str) -> Arc<str> {
3879 Arc::from(&*v)
3880 }
3881}
3882
3883#[cfg(not(no_global_oom_handling))]
3884#[stable(feature = "shared_from_slice", since = "1.21.0")]
3885impl From<String> for Arc<str> {
3886 /// Allocates a reference-counted `str` and copies `v` into it.
3887 ///
3888 /// # Example
3889 ///
3890 /// ```
3891 /// # use std::sync::Arc;
3892 /// let unique: String = "eggplant".to_owned();
3893 /// let shared: Arc<str> = Arc::from(unique);
3894 /// assert_eq!("eggplant", &shared[..]);
3895 /// ```
3896 #[inline]
3897 fn from(v: String) -> Arc<str> {
3898 Arc::from(&v[..])
3899 }
3900}
3901
3902#[cfg(not(no_global_oom_handling))]
3903#[stable(feature = "shared_from_slice", since = "1.21.0")]
3904impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3905 /// Move a boxed object to a new, reference-counted allocation.
3906 ///
3907 /// # Example
3908 ///
3909 /// ```
3910 /// # use std::sync::Arc;
3911 /// let unique: Box<str> = Box::from("eggplant");
3912 /// let shared: Arc<str> = Arc::from(unique);
3913 /// assert_eq!("eggplant", &shared[..]);
3914 /// ```
3915 #[inline]
3916 fn from(v: Box<T, A>) -> Arc<T, A> {
3917 Arc::from_box_in(v)
3918 }
3919}
3920
3921#[cfg(not(no_global_oom_handling))]
3922#[stable(feature = "shared_from_slice", since = "1.21.0")]
3923impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3924 /// Allocates a reference-counted slice and moves `v`'s items into it.
3925 ///
3926 /// # Example
3927 ///
3928 /// ```
3929 /// # use std::sync::Arc;
3930 /// let unique: Vec<i32> = vec![1, 2, 3];
3931 /// let shared: Arc<[i32]> = Arc::from(unique);
3932 /// assert_eq!(&[1, 2, 3], &shared[..]);
3933 /// ```
3934 #[inline]
3935 fn from(v: Vec<T, A>) -> Arc<[T], A> {
3936 unsafe {
3937 let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3938
3939 let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3940 ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3941
3942 // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3943 // without dropping its contents or the allocator
3944 let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3945
3946 Self::from_ptr_in(rc_ptr, alloc)
3947 }
3948 }
3949}
3950
3951#[stable(feature = "shared_from_cow", since = "1.45.0")]
3952impl<'a, B> From<Cow<'a, B>> for Arc<B>
3953where
3954 B: ToOwned + ?Sized,
3955 Arc<B>: From<&'a B> + From<B::Owned>,
3956{
3957 /// Creates an atomically reference-counted pointer from a clone-on-write
3958 /// pointer by copying its content.
3959 ///
3960 /// # Example
3961 ///
3962 /// ```rust
3963 /// # use std::sync::Arc;
3964 /// # use std::borrow::Cow;
3965 /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3966 /// let shared: Arc<str> = Arc::from(cow);
3967 /// assert_eq!("eggplant", &shared[..]);
3968 /// ```
3969 #[inline]
3970 fn from(cow: Cow<'a, B>) -> Arc<B> {
3971 match cow {
3972 Cow::Borrowed(s) => Arc::from(s),
3973 Cow::Owned(s) => Arc::from(s),
3974 }
3975 }
3976}
3977
3978#[stable(feature = "shared_from_str", since = "1.62.0")]
3979impl From<Arc<str>> for Arc<[u8]> {
3980 /// Converts an atomically reference-counted string slice into a byte slice.
3981 ///
3982 /// # Example
3983 ///
3984 /// ```
3985 /// # use std::sync::Arc;
3986 /// let string: Arc<str> = Arc::from("eggplant");
3987 /// let bytes: Arc<[u8]> = Arc::from(string);
3988 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3989 /// ```
3990 #[inline]
3991 fn from(rc: Arc<str>) -> Self {
3992 // SAFETY: `str` has the same layout as `[u8]`.
3993 unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3994 }
3995}
3996
3997#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3998impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3999 type Error = Arc<[T], A>;
4000
4001 fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
4002 if boxed_slice.len() == N {
4003 let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
4004 Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
4005 } else {
4006 Err(boxed_slice)
4007 }
4008 }
4009}
4010
4011#[cfg(not(no_global_oom_handling))]
4012#[stable(feature = "shared_from_iter", since = "1.37.0")]
4013impl<T> FromIterator<T> for Arc<[T]> {
4014 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
4015 ///
4016 /// # Performance characteristics
4017 ///
4018 /// ## The general case
4019 ///
4020 /// In the general case, collecting into `Arc<[T]>` is done by first
4021 /// collecting into a `Vec<T>`. That is, when writing the following:
4022 ///
4023 /// ```rust
4024 /// # use std::sync::Arc;
4025 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
4026 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4027 /// ```
4028 ///
4029 /// this behaves as if we wrote:
4030 ///
4031 /// ```rust
4032 /// # use std::sync::Arc;
4033 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
4034 /// .collect::<Vec<_>>() // The first set of allocations happens here.
4035 /// .into(); // A second allocation for `Arc<[T]>` happens here.
4036 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
4037 /// ```
4038 ///
4039 /// This will allocate as many times as needed for constructing the `Vec<T>`
4040 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
4041 ///
4042 /// ## Iterators of known length
4043 ///
4044 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
4045 /// a single allocation will be made for the `Arc<[T]>`. For example:
4046 ///
4047 /// ```rust
4048 /// # use std::sync::Arc;
4049 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
4050 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
4051 /// ```
4052 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
4053 ToArcSlice::to_arc_slice(iter.into_iter())
4054 }
4055}
4056
4057#[cfg(not(no_global_oom_handling))]
4058/// Specialization trait used for collecting into `Arc<[T]>`.
4059trait ToArcSlice<T>: Iterator<Item = T> + Sized {
4060 fn to_arc_slice(self) -> Arc<[T]>;
4061}
4062
4063#[cfg(not(no_global_oom_handling))]
4064impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
4065 default fn to_arc_slice(self) -> Arc<[T]> {
4066 self.collect::<Vec<T>>().into()
4067 }
4068}
4069
4070#[cfg(not(no_global_oom_handling))]
4071impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
4072 fn to_arc_slice(self) -> Arc<[T]> {
4073 // This is the case for a `TrustedLen` iterator.
4074 let (low, high) = self.size_hint();
4075 if let Some(high) = high {
4076 debug_assert_eq!(
4077 low,
4078 high,
4079 "TrustedLen iterator's size hint is not exact: {:?}",
4080 (low, high)
4081 );
4082
4083 unsafe {
4084 // SAFETY: We need to ensure that the iterator has an exact length and we have.
4085 Arc::from_iter_exact(self, low)
4086 }
4087 } else {
4088 // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
4089 // length exceeding `usize::MAX`.
4090 // The default implementation would collect into a vec which would panic.
4091 // Thus we panic here immediately without invoking `Vec` code.
4092 panic!("capacity overflow");
4093 }
4094 }
4095}
4096
4097#[stable(feature = "rust1", since = "1.0.0")]
4098impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
4099 fn borrow(&self) -> &T {
4100 &**self
4101 }
4102}
4103
4104#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4105impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4106 fn as_ref(&self) -> &T {
4107 &**self
4108 }
4109}
4110
4111#[stable(feature = "pin", since = "1.33.0")]
4112impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4113
4114/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4115///
4116/// # Safety
4117///
4118/// The pointer must point to (and have valid metadata for) a previously
4119/// valid instance of T, but the T is allowed to be dropped.
4120unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4121 // Align the unsized value to the end of the ArcInner.
4122 // Because ArcInner is repr(C), it will always be the last field in memory.
4123 // SAFETY: since the only unsized types possible are slices, trait objects,
4124 // and extern types, the input safety requirement is currently enough to
4125 // satisfy the requirements of align_of_val_raw; this is an implementation
4126 // detail of the language that must not be relied upon outside of std.
4127 unsafe { data_offset_align(align_of_val_raw(ptr)) }
4128}
4129
4130#[inline]
4131fn data_offset_align(align: usize) -> usize {
4132 let layout = Layout::new::<ArcInner<()>>();
4133 layout.size() + layout.padding_needed_for(align)
4134}
4135
4136/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4137/// but will deallocate it (without dropping the value) when dropped.
4138///
4139/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4140#[cfg(not(no_global_oom_handling))]
4141struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4142 ptr: NonNull<ArcInner<T>>,
4143 layout_for_value: Layout,
4144 alloc: Option<A>,
4145}
4146
4147#[cfg(not(no_global_oom_handling))]
4148impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4149 /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4150 fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4151 let layout = Layout::for_value(for_value);
4152 let ptr = unsafe {
4153 Arc::allocate_for_layout(
4154 layout,
4155 |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4156 |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4157 )
4158 };
4159 Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4160 }
4161
4162 /// Returns the pointer to be written into to initialize the [`Arc`].
4163 fn data_ptr(&mut self) -> *mut T {
4164 let offset = data_offset_align(self.layout_for_value.align());
4165 unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4166 }
4167
4168 /// Upgrade this into a normal [`Arc`].
4169 ///
4170 /// # Safety
4171 ///
4172 /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4173 unsafe fn into_arc(self) -> Arc<T, A> {
4174 let mut this = ManuallyDrop::new(self);
4175 let ptr = this.ptr.as_ptr();
4176 let alloc = this.alloc.take().unwrap();
4177
4178 // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4179 // for having initialized the data.
4180 unsafe { Arc::from_ptr_in(ptr, alloc) }
4181 }
4182}
4183
4184#[cfg(not(no_global_oom_handling))]
4185impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4186 fn drop(&mut self) {
4187 // SAFETY:
4188 // * new() produced a pointer safe to deallocate.
4189 // * We own the pointer unless into_arc() was called, which forgets us.
4190 unsafe {
4191 self.alloc.take().unwrap().deallocate(
4192 self.ptr.cast(),
4193 arcinner_layout_for_value_layout(self.layout_for_value),
4194 );
4195 }
4196 }
4197}
4198
4199#[stable(feature = "arc_error", since = "1.52.0")]
4200impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4201 #[allow(deprecated)]
4202 fn cause(&self) -> Option<&dyn core::error::Error> {
4203 core::error::Error::cause(&**self)
4204 }
4205
4206 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4207 core::error::Error::source(&**self)
4208 }
4209
4210 fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4211 core::error::Error::provide(&**self, req);
4212 }
4213}
4214
4215/// A uniquely owned [`Arc`].
4216///
4217/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4218/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4219/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4220///
4221/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4222/// use case is to have an object be mutable during its initialization phase but then have it become
4223/// immutable and converted to a normal `Arc`.
4224///
4225/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4226///
4227/// ```
4228/// #![feature(unique_rc_arc)]
4229/// use std::sync::{Arc, Weak, UniqueArc};
4230///
4231/// struct Gadget {
4232/// me: Weak<Gadget>,
4233/// }
4234///
4235/// fn create_gadget() -> Option<Arc<Gadget>> {
4236/// let mut rc = UniqueArc::new(Gadget {
4237/// me: Weak::new(),
4238/// });
4239/// rc.me = UniqueArc::downgrade(&rc);
4240/// Some(UniqueArc::into_arc(rc))
4241/// }
4242///
4243/// create_gadget().unwrap();
4244/// ```
4245///
4246/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4247/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4248/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4249/// including fallible or async constructors.
4250#[unstable(feature = "unique_rc_arc", issue = "112566")]
4251pub struct UniqueArc<
4252 T: ?Sized,
4253 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4254> {
4255 ptr: NonNull<ArcInner<T>>,
4256 // Define the ownership of `ArcInner<T>` for drop-check
4257 _marker: PhantomData<ArcInner<T>>,
4258 // Invariance is necessary for soundness: once other `Weak`
4259 // references exist, we already have a form of shared mutability!
4260 _marker2: PhantomData<*mut T>,
4261 alloc: A,
4262}
4263
4264#[unstable(feature = "unique_rc_arc", issue = "112566")]
4265unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4266
4267#[unstable(feature = "unique_rc_arc", issue = "112566")]
4268unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4269
4270#[unstable(feature = "unique_rc_arc", issue = "112566")]
4271// #[unstable(feature = "coerce_unsized", issue = "18598")]
4272impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4273 for UniqueArc<T, A>
4274{
4275}
4276
4277//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4278#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4279impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4280
4281#[unstable(feature = "unique_rc_arc", issue = "112566")]
4282impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4284 fmt::Display::fmt(&**self, f)
4285 }
4286}
4287
4288#[unstable(feature = "unique_rc_arc", issue = "112566")]
4289impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4290 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4291 fmt::Debug::fmt(&**self, f)
4292 }
4293}
4294
4295#[unstable(feature = "unique_rc_arc", issue = "112566")]
4296impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4297 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4298 fmt::Pointer::fmt(&(&raw const **self), f)
4299 }
4300}
4301
4302#[unstable(feature = "unique_rc_arc", issue = "112566")]
4303impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4304 fn borrow(&self) -> &T {
4305 &**self
4306 }
4307}
4308
4309#[unstable(feature = "unique_rc_arc", issue = "112566")]
4310impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4311 fn borrow_mut(&mut self) -> &mut T {
4312 &mut **self
4313 }
4314}
4315
4316#[unstable(feature = "unique_rc_arc", issue = "112566")]
4317impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4318 fn as_ref(&self) -> &T {
4319 &**self
4320 }
4321}
4322
4323#[unstable(feature = "unique_rc_arc", issue = "112566")]
4324impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4325 fn as_mut(&mut self) -> &mut T {
4326 &mut **self
4327 }
4328}
4329
4330#[unstable(feature = "unique_rc_arc", issue = "112566")]
4331impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4332
4333#[unstable(feature = "unique_rc_arc", issue = "112566")]
4334impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4335 /// Equality for two `UniqueArc`s.
4336 ///
4337 /// Two `UniqueArc`s are equal if their inner values are equal.
4338 ///
4339 /// # Examples
4340 ///
4341 /// ```
4342 /// #![feature(unique_rc_arc)]
4343 /// use std::sync::UniqueArc;
4344 ///
4345 /// let five = UniqueArc::new(5);
4346 ///
4347 /// assert!(five == UniqueArc::new(5));
4348 /// ```
4349 #[inline]
4350 fn eq(&self, other: &Self) -> bool {
4351 PartialEq::eq(&**self, &**other)
4352 }
4353}
4354
4355#[unstable(feature = "unique_rc_arc", issue = "112566")]
4356impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4357 /// Partial comparison for two `UniqueArc`s.
4358 ///
4359 /// The two are compared by calling `partial_cmp()` on their inner values.
4360 ///
4361 /// # Examples
4362 ///
4363 /// ```
4364 /// #![feature(unique_rc_arc)]
4365 /// use std::sync::UniqueArc;
4366 /// use std::cmp::Ordering;
4367 ///
4368 /// let five = UniqueArc::new(5);
4369 ///
4370 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4371 /// ```
4372 #[inline(always)]
4373 fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4374 (**self).partial_cmp(&**other)
4375 }
4376
4377 /// Less-than comparison for two `UniqueArc`s.
4378 ///
4379 /// The two are compared by calling `<` on their inner values.
4380 ///
4381 /// # Examples
4382 ///
4383 /// ```
4384 /// #![feature(unique_rc_arc)]
4385 /// use std::sync::UniqueArc;
4386 ///
4387 /// let five = UniqueArc::new(5);
4388 ///
4389 /// assert!(five < UniqueArc::new(6));
4390 /// ```
4391 #[inline(always)]
4392 fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4393 **self < **other
4394 }
4395
4396 /// 'Less than or equal to' comparison for two `UniqueArc`s.
4397 ///
4398 /// The two are compared by calling `<=` on their inner values.
4399 ///
4400 /// # Examples
4401 ///
4402 /// ```
4403 /// #![feature(unique_rc_arc)]
4404 /// use std::sync::UniqueArc;
4405 ///
4406 /// let five = UniqueArc::new(5);
4407 ///
4408 /// assert!(five <= UniqueArc::new(5));
4409 /// ```
4410 #[inline(always)]
4411 fn le(&self, other: &UniqueArc<T, A>) -> bool {
4412 **self <= **other
4413 }
4414
4415 /// Greater-than comparison for two `UniqueArc`s.
4416 ///
4417 /// The two are compared by calling `>` on their inner values.
4418 ///
4419 /// # Examples
4420 ///
4421 /// ```
4422 /// #![feature(unique_rc_arc)]
4423 /// use std::sync::UniqueArc;
4424 ///
4425 /// let five = UniqueArc::new(5);
4426 ///
4427 /// assert!(five > UniqueArc::new(4));
4428 /// ```
4429 #[inline(always)]
4430 fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4431 **self > **other
4432 }
4433
4434 /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4435 ///
4436 /// The two are compared by calling `>=` on their inner values.
4437 ///
4438 /// # Examples
4439 ///
4440 /// ```
4441 /// #![feature(unique_rc_arc)]
4442 /// use std::sync::UniqueArc;
4443 ///
4444 /// let five = UniqueArc::new(5);
4445 ///
4446 /// assert!(five >= UniqueArc::new(5));
4447 /// ```
4448 #[inline(always)]
4449 fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4450 **self >= **other
4451 }
4452}
4453
4454#[unstable(feature = "unique_rc_arc", issue = "112566")]
4455impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4456 /// Comparison for two `UniqueArc`s.
4457 ///
4458 /// The two are compared by calling `cmp()` on their inner values.
4459 ///
4460 /// # Examples
4461 ///
4462 /// ```
4463 /// #![feature(unique_rc_arc)]
4464 /// use std::sync::UniqueArc;
4465 /// use std::cmp::Ordering;
4466 ///
4467 /// let five = UniqueArc::new(5);
4468 ///
4469 /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4470 /// ```
4471 #[inline]
4472 fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4473 (**self).cmp(&**other)
4474 }
4475}
4476
4477#[unstable(feature = "unique_rc_arc", issue = "112566")]
4478impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4479
4480#[unstable(feature = "unique_rc_arc", issue = "112566")]
4481impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4482 fn hash<H: Hasher>(&self, state: &mut H) {
4483 (**self).hash(state);
4484 }
4485}
4486
4487impl<T> UniqueArc<T, Global> {
4488 /// Creates a new `UniqueArc`.
4489 ///
4490 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4491 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4492 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4493 /// point to the new [`Arc`].
4494 #[cfg(not(no_global_oom_handling))]
4495 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4496 #[must_use]
4497 pub fn new(value: T) -> Self {
4498 Self::new_in(value, Global)
4499 }
4500
4501 /// Maps the value in a `UniqueArc`, reusing the allocation if possible.
4502 ///
4503 /// `f` is called on a reference to the value in the `UniqueArc`, and the result is returned,
4504 /// also in a `UniqueArc`.
4505 ///
4506 /// Note: this is an associated function, which means that you have
4507 /// to call it as `UniqueArc::map(u, f)` instead of `u.map(f)`. This
4508 /// is so that there is no conflict with a method on the inner type.
4509 ///
4510 /// # Examples
4511 ///
4512 /// ```
4513 /// #![feature(smart_pointer_try_map)]
4514 /// #![feature(unique_rc_arc)]
4515 ///
4516 /// use std::sync::UniqueArc;
4517 ///
4518 /// let r = UniqueArc::new(7);
4519 /// let new = UniqueArc::map(r, |i| i + 7);
4520 /// assert_eq!(*new, 14);
4521 /// ```
4522 #[cfg(not(no_global_oom_handling))]
4523 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4524 pub fn map<U>(this: Self, f: impl FnOnce(T) -> U) -> UniqueArc<U> {
4525 if size_of::<T>() == size_of::<U>()
4526 && align_of::<T>() == align_of::<U>()
4527 && UniqueArc::weak_count(&this) == 0
4528 {
4529 unsafe {
4530 let ptr = UniqueArc::into_raw(this);
4531 let value = ptr.read();
4532 let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<U>>());
4533
4534 allocation.write(f(value));
4535 allocation.assume_init()
4536 }
4537 } else {
4538 UniqueArc::new(f(UniqueArc::unwrap(this)))
4539 }
4540 }
4541
4542 /// Attempts to map the value in a `UniqueArc`, reusing the allocation if possible.
4543 ///
4544 /// `f` is called on a reference to the value in the `UniqueArc`, and if the operation succeeds,
4545 /// the result is returned, also in a `UniqueArc`.
4546 ///
4547 /// Note: this is an associated function, which means that you have
4548 /// to call it as `UniqueArc::try_map(u, f)` instead of `u.try_map(f)`. This
4549 /// is so that there is no conflict with a method on the inner type.
4550 ///
4551 /// # Examples
4552 ///
4553 /// ```
4554 /// #![feature(smart_pointer_try_map)]
4555 /// #![feature(unique_rc_arc)]
4556 ///
4557 /// use std::sync::UniqueArc;
4558 ///
4559 /// let b = UniqueArc::new(7);
4560 /// let new = UniqueArc::try_map(b, u32::try_from).unwrap();
4561 /// assert_eq!(*new, 7);
4562 /// ```
4563 #[cfg(not(no_global_oom_handling))]
4564 #[unstable(feature = "smart_pointer_try_map", issue = "144419")]
4565 pub fn try_map<R>(
4566 this: Self,
4567 f: impl FnOnce(T) -> R,
4568 ) -> <R::Residual as Residual<UniqueArc<R::Output>>>::TryType
4569 where
4570 R: Try,
4571 R::Residual: Residual<UniqueArc<R::Output>>,
4572 {
4573 if size_of::<T>() == size_of::<R::Output>()
4574 && align_of::<T>() == align_of::<R::Output>()
4575 && UniqueArc::weak_count(&this) == 0
4576 {
4577 unsafe {
4578 let ptr = UniqueArc::into_raw(this);
4579 let value = ptr.read();
4580 let mut allocation = UniqueArc::from_raw(ptr.cast::<mem::MaybeUninit<R::Output>>());
4581
4582 allocation.write(f(value)?);
4583 try { allocation.assume_init() }
4584 }
4585 } else {
4586 try { UniqueArc::new(f(UniqueArc::unwrap(this))?) }
4587 }
4588 }
4589
4590 #[cfg(not(no_global_oom_handling))]
4591 fn unwrap(this: Self) -> T {
4592 let this = ManuallyDrop::new(this);
4593 let val: T = unsafe { ptr::read(&**this) };
4594
4595 let _weak = Weak { ptr: this.ptr, alloc: Global };
4596
4597 val
4598 }
4599}
4600
4601impl<T: ?Sized> UniqueArc<T> {
4602 #[cfg(not(no_global_oom_handling))]
4603 unsafe fn from_raw(ptr: *const T) -> Self {
4604 let offset = unsafe { data_offset(ptr) };
4605
4606 // Reverse the offset to find the original ArcInner.
4607 let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> };
4608
4609 Self {
4610 ptr: unsafe { NonNull::new_unchecked(rc_ptr) },
4611 _marker: PhantomData,
4612 _marker2: PhantomData,
4613 alloc: Global,
4614 }
4615 }
4616
4617 #[cfg(not(no_global_oom_handling))]
4618 fn into_raw(this: Self) -> *const T {
4619 let this = ManuallyDrop::new(this);
4620 Self::as_ptr(&*this)
4621 }
4622}
4623
4624impl<T, A: Allocator> UniqueArc<T, A> {
4625 /// Creates a new `UniqueArc` in the provided allocator.
4626 ///
4627 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4628 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4629 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4630 /// point to the new [`Arc`].
4631 #[cfg(not(no_global_oom_handling))]
4632 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4633 #[must_use]
4634 // #[unstable(feature = "allocator_api", issue = "32838")]
4635 pub fn new_in(data: T, alloc: A) -> Self {
4636 let (ptr, alloc) = Box::into_unique(Box::new_in(
4637 ArcInner {
4638 strong: atomic::AtomicUsize::new(0),
4639 // keep one weak reference so if all the weak pointers that are created are dropped
4640 // the UniqueArc still stays valid.
4641 weak: atomic::AtomicUsize::new(1),
4642 data,
4643 },
4644 alloc,
4645 ));
4646 Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4647 }
4648}
4649
4650impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4651 /// Converts the `UniqueArc` into a regular [`Arc`].
4652 ///
4653 /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4654 /// is passed to `into_arc`.
4655 ///
4656 /// Any weak references created before this method is called can now be upgraded to strong
4657 /// references.
4658 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4659 #[must_use]
4660 pub fn into_arc(this: Self) -> Arc<T, A> {
4661 let this = ManuallyDrop::new(this);
4662
4663 // Move the allocator out.
4664 // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4665 // a `ManuallyDrop`.
4666 let alloc: A = unsafe { ptr::read(&this.alloc) };
4667
4668 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4669 unsafe {
4670 // Convert our weak reference into a strong reference
4671 (*this.ptr.as_ptr()).strong.store(1, Release);
4672 Arc::from_inner_in(this.ptr, alloc)
4673 }
4674 }
4675
4676 #[cfg(not(no_global_oom_handling))]
4677 fn weak_count(this: &Self) -> usize {
4678 this.inner().weak.load(Acquire) - 1
4679 }
4680
4681 #[cfg(not(no_global_oom_handling))]
4682 fn inner(&self) -> &ArcInner<T> {
4683 // SAFETY: while this UniqueArc is alive we're guaranteed that the inner pointer is valid.
4684 unsafe { self.ptr.as_ref() }
4685 }
4686
4687 #[cfg(not(no_global_oom_handling))]
4688 fn as_ptr(this: &Self) -> *const T {
4689 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
4690
4691 // SAFETY: This cannot go through Deref::deref or UniqueArc::inner because
4692 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
4693 // write through the pointer after the Rc is recovered through `from_raw`.
4694 unsafe { &raw mut (*ptr).data }
4695 }
4696
4697 #[inline]
4698 #[cfg(not(no_global_oom_handling))]
4699 fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
4700 let this = mem::ManuallyDrop::new(this);
4701 (this.ptr, unsafe { ptr::read(&this.alloc) })
4702 }
4703
4704 #[inline]
4705 #[cfg(not(no_global_oom_handling))]
4706 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
4707 Self { ptr, _marker: PhantomData, _marker2: PhantomData, alloc }
4708 }
4709}
4710
4711impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4712 /// Creates a new weak reference to the `UniqueArc`.
4713 ///
4714 /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4715 /// to a [`Arc`] using [`UniqueArc::into_arc`].
4716 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4717 #[must_use]
4718 pub fn downgrade(this: &Self) -> Weak<T, A> {
4719 // Using a relaxed ordering is alright here, as knowledge of the
4720 // original reference prevents other threads from erroneously deleting
4721 // the object or converting the object to a normal `Arc<T, A>`.
4722 //
4723 // Note that we don't need to test if the weak counter is locked because there
4724 // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4725 // the weak counter.
4726 //
4727 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4728 let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4729
4730 // See comments in Arc::clone() for why we do this (for mem::forget).
4731 if old_size > MAX_REFCOUNT {
4732 abort();
4733 }
4734
4735 Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4736 }
4737}
4738
4739#[cfg(not(no_global_oom_handling))]
4740impl<T, A: Allocator> UniqueArc<mem::MaybeUninit<T>, A> {
4741 unsafe fn assume_init(self) -> UniqueArc<T, A> {
4742 let (ptr, alloc) = UniqueArc::into_inner_with_allocator(self);
4743 unsafe { UniqueArc::from_inner_in(ptr.cast(), alloc) }
4744 }
4745}
4746
4747#[unstable(feature = "unique_rc_arc", issue = "112566")]
4748impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4749 type Target = T;
4750
4751 fn deref(&self) -> &T {
4752 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4753 unsafe { &self.ptr.as_ref().data }
4754 }
4755}
4756
4757// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4758#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4759unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4760
4761#[unstable(feature = "unique_rc_arc", issue = "112566")]
4762impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4763 fn deref_mut(&mut self) -> &mut T {
4764 // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4765 // have unique ownership and therefore it's safe to make a mutable reference because
4766 // `UniqueArc` owns the only strong reference to itself.
4767 // We also need to be careful to only create a mutable reference to the `data` field,
4768 // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4769 // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4770 unsafe { &mut (*self.ptr.as_ptr()).data }
4771 }
4772}
4773
4774#[unstable(feature = "unique_rc_arc", issue = "112566")]
4775// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4776unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4777
4778#[unstable(feature = "unique_rc_arc", issue = "112566")]
4779unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4780 fn drop(&mut self) {
4781 // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4782 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4783 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4784
4785 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4786 }
4787}
4788
4789#[unstable(feature = "allocator_api", issue = "32838")]
4790unsafe impl<T: ?Sized + Allocator, A: Allocator> Allocator for Arc<T, A> {
4791 #[inline]
4792 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4793 (**self).allocate(layout)
4794 }
4795
4796 #[inline]
4797 fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
4798 (**self).allocate_zeroed(layout)
4799 }
4800
4801 #[inline]
4802 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4803 // SAFETY: the safety contract must be upheld by the caller
4804 unsafe { (**self).deallocate(ptr, layout) }
4805 }
4806
4807 #[inline]
4808 unsafe fn grow(
4809 &self,
4810 ptr: NonNull<u8>,
4811 old_layout: Layout,
4812 new_layout: Layout,
4813 ) -> Result<NonNull<[u8]>, AllocError> {
4814 // SAFETY: the safety contract must be upheld by the caller
4815 unsafe { (**self).grow(ptr, old_layout, new_layout) }
4816 }
4817
4818 #[inline]
4819 unsafe fn grow_zeroed(
4820 &self,
4821 ptr: NonNull<u8>,
4822 old_layout: Layout,
4823 new_layout: Layout,
4824 ) -> Result<NonNull<[u8]>, AllocError> {
4825 // SAFETY: the safety contract must be upheld by the caller
4826 unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
4827 }
4828
4829 #[inline]
4830 unsafe fn shrink(
4831 &self,
4832 ptr: NonNull<u8>,
4833 old_layout: Layout,
4834 new_layout: Layout,
4835 ) -> Result<NonNull<[u8]>, AllocError> {
4836 // SAFETY: the safety contract must be upheld by the caller
4837 unsafe { (**self).shrink(ptr, old_layout, new_layout) }
4838 }
4839}