alloc/sync.rs
1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12#[cfg(not(no_global_oom_handling))]
13use core::clone::CloneToUninit;
14use core::clone::UseCloned;
15use core::cmp::Ordering;
16use core::hash::{Hash, Hasher};
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21use core::mem::{self, ManuallyDrop, align_of_val_raw};
22use core::num::NonZeroUsize;
23use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver};
24use core::panic::{RefUnwindSafe, UnwindSafe};
25use core::pin::{Pin, PinCoerceUnsized};
26use core::ptr::{self, NonNull};
27#[cfg(not(no_global_oom_handling))]
28use core::slice::from_raw_parts_mut;
29use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
30use core::sync::atomic::{self, Atomic};
31use core::{borrow, fmt, hint};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35use crate::alloc::{AllocError, Allocator, Global, Layout};
36use crate::borrow::{Cow, ToOwned};
37use crate::boxed::Box;
38use crate::rc::is_dangling;
39#[cfg(not(no_global_oom_handling))]
40use crate::string::String;
41#[cfg(not(no_global_oom_handling))]
42use crate::vec::Vec;
43
44/// A soft limit on the amount of references that may be made to an `Arc`.
45///
46/// Going above this limit will abort your program (although not
47/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
48/// Trying to go above it might call a `panic` (if not actually going above it).
49///
50/// This is a global invariant, and also applies when using a compare-exchange loop.
51///
52/// See comment in `Arc::clone`.
53const MAX_REFCOUNT: usize = (isize::MAX) as usize;
54
55/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
56const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
57
58#[cfg(not(sanitize = "thread"))]
59macro_rules! acquire {
60 ($x:expr) => {
61 atomic::fence(Acquire)
62 };
63}
64
65// ThreadSanitizer does not support memory fences. To avoid false positive
66// reports in Arc / Weak implementation use atomic loads for synchronization
67// instead.
68#[cfg(sanitize = "thread")]
69macro_rules! acquire {
70 ($x:expr) => {
71 $x.load(Acquire)
72 };
73}
74
75/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
76/// Reference Counted'.
77///
78/// The type `Arc<T>` provides shared ownership of a value of type `T`,
79/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
80/// a new `Arc` instance, which points to the same allocation on the heap as the
81/// source `Arc`, while increasing a reference count. When the last `Arc`
82/// pointer to a given allocation is destroyed, the value stored in that allocation (often
83/// referred to as "inner value") is also dropped.
84///
85/// Shared references in Rust disallow mutation by default, and `Arc` is no
86/// exception: you cannot generally obtain a mutable reference to something
87/// inside an `Arc`. If you do need to mutate through an `Arc`, you have several options:
88///
89/// 1. Use interior mutability with synchronization primitives like [`Mutex`][mutex],
90/// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types.
91///
92/// 2. Use clone-on-write semantics with [`Arc::make_mut`] which provides efficient mutation
93/// without requiring interior mutability. This approach clones the data only when
94/// needed (when there are multiple references) and can be more efficient when mutations
95/// are infrequent.
96///
97/// 3. Use [`Arc::get_mut`] when you know your `Arc` is not shared (has a reference count of 1),
98/// which provides direct mutable access to the inner value without any cloning.
99///
100/// ```
101/// use std::sync::Arc;
102///
103/// let mut data = Arc::new(vec![1, 2, 3]);
104///
105/// // This will clone the vector only if there are other references to it
106/// Arc::make_mut(&mut data).push(4);
107///
108/// assert_eq!(*data, vec![1, 2, 3, 4]);
109/// ```
110///
111/// **Note**: This type is only available on platforms that support atomic
112/// loads and stores of pointers, which includes all platforms that support
113/// the `std` crate but not all those which only support [`alloc`](crate).
114/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
115///
116/// ## Thread Safety
117///
118/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
119/// counting. This means that it is thread-safe. The disadvantage is that
120/// atomic operations are more expensive than ordinary memory accesses. If you
121/// are not sharing reference-counted allocations between threads, consider using
122/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
123/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
124/// However, a library might choose `Arc<T>` in order to give library consumers
125/// more flexibility.
126///
127/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
128/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
129/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
130/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
131/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
132/// data, but it doesn't add thread safety to its data. Consider
133/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
134/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
135/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
136/// non-atomic operations.
137///
138/// In the end, this means that you may need to pair `Arc<T>` with some sort of
139/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
140///
141/// ## Breaking cycles with `Weak`
142///
143/// The [`downgrade`][downgrade] method can be used to create a non-owning
144/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
145/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
146/// already been dropped. In other words, `Weak` pointers do not keep the value
147/// inside the allocation alive; however, they *do* keep the allocation
148/// (the backing store for the value) alive.
149///
150/// A cycle between `Arc` pointers will never be deallocated. For this reason,
151/// [`Weak`] is used to break cycles. For example, a tree could have
152/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
153/// pointers from children back to their parents.
154///
155/// # Cloning references
156///
157/// Creating a new reference from an existing reference-counted pointer is done using the
158/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
159///
160/// ```
161/// use std::sync::Arc;
162/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
163/// // The two syntaxes below are equivalent.
164/// let a = foo.clone();
165/// let b = Arc::clone(&foo);
166/// // a, b, and foo are all Arcs that point to the same memory location
167/// ```
168///
169/// ## `Deref` behavior
170///
171/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
172/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
173/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
174/// functions, called using [fully qualified syntax]:
175///
176/// ```
177/// use std::sync::Arc;
178///
179/// let my_arc = Arc::new(());
180/// let my_weak = Arc::downgrade(&my_arc);
181/// ```
182///
183/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
184/// fully qualified syntax. Some people prefer to use fully qualified syntax,
185/// while others prefer using method-call syntax.
186///
187/// ```
188/// use std::sync::Arc;
189///
190/// let arc = Arc::new(());
191/// // Method-call syntax
192/// let arc2 = arc.clone();
193/// // Fully qualified syntax
194/// let arc3 = Arc::clone(&arc);
195/// ```
196///
197/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
198/// already been dropped.
199///
200/// [`Rc<T>`]: crate::rc::Rc
201/// [clone]: Clone::clone
202/// [mutex]: ../../std/sync/struct.Mutex.html
203/// [rwlock]: ../../std/sync/struct.RwLock.html
204/// [atomic]: core::sync::atomic
205/// [downgrade]: Arc::downgrade
206/// [upgrade]: Weak::upgrade
207/// [RefCell\<T>]: core::cell::RefCell
208/// [`RefCell<T>`]: core::cell::RefCell
209/// [`std::sync`]: ../../std/sync/index.html
210/// [`Arc::clone(&from)`]: Arc::clone
211/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
212///
213/// # Examples
214///
215/// Sharing some immutable data between threads:
216///
217/// ```
218/// use std::sync::Arc;
219/// use std::thread;
220///
221/// let five = Arc::new(5);
222///
223/// for _ in 0..10 {
224/// let five = Arc::clone(&five);
225///
226/// thread::spawn(move || {
227/// println!("{five:?}");
228/// });
229/// }
230/// ```
231///
232/// Sharing a mutable [`AtomicUsize`]:
233///
234/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
235///
236/// ```
237/// use std::sync::Arc;
238/// use std::sync::atomic::{AtomicUsize, Ordering};
239/// use std::thread;
240///
241/// let val = Arc::new(AtomicUsize::new(5));
242///
243/// for _ in 0..10 {
244/// let val = Arc::clone(&val);
245///
246/// thread::spawn(move || {
247/// let v = val.fetch_add(1, Ordering::Relaxed);
248/// println!("{v:?}");
249/// });
250/// }
251/// ```
252///
253/// See the [`rc` documentation][rc_examples] for more examples of reference
254/// counting in general.
255///
256/// [rc_examples]: crate::rc#examples
257#[doc(search_unbox)]
258#[rustc_diagnostic_item = "Arc"]
259#[stable(feature = "rust1", since = "1.0.0")]
260#[rustc_insignificant_dtor]
261pub struct Arc<
262 T: ?Sized,
263 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
264> {
265 ptr: NonNull<ArcInner<T>>,
266 phantom: PhantomData<ArcInner<T>>,
267 alloc: A,
268}
269
270#[stable(feature = "rust1", since = "1.0.0")]
271unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
272#[stable(feature = "rust1", since = "1.0.0")]
273unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
274
275#[stable(feature = "catch_unwind", since = "1.9.0")]
276impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
277
278#[unstable(feature = "coerce_unsized", issue = "18598")]
279impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
280
281#[unstable(feature = "dispatch_from_dyn", issue = "none")]
282impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
283
284impl<T: ?Sized> Arc<T> {
285 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
286 unsafe { Self::from_inner_in(ptr, Global) }
287 }
288
289 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
290 unsafe { Self::from_ptr_in(ptr, Global) }
291 }
292}
293
294impl<T: ?Sized, A: Allocator> Arc<T, A> {
295 #[inline]
296 fn into_inner_with_allocator(this: Self) -> (NonNull<ArcInner<T>>, A) {
297 let this = mem::ManuallyDrop::new(this);
298 (this.ptr, unsafe { ptr::read(&this.alloc) })
299 }
300
301 #[inline]
302 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
303 Self { ptr, phantom: PhantomData, alloc }
304 }
305
306 #[inline]
307 unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
308 unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) }
309 }
310}
311
312/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
313/// managed allocation.
314///
315/// The allocation is accessed by calling [`upgrade`] on the `Weak`
316/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
317///
318/// Since a `Weak` reference does not count towards ownership, it will not
319/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
320/// guarantees about the value still being present. Thus it may return [`None`]
321/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
322/// itself (the backing store) from being deallocated.
323///
324/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
325/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
326/// prevent circular references between [`Arc`] pointers, since mutual owning references
327/// would never allow either [`Arc`] to be dropped. For example, a tree could
328/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
329/// pointers from children back to their parents.
330///
331/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
332///
333/// [`upgrade`]: Weak::upgrade
334#[stable(feature = "arc_weak", since = "1.4.0")]
335#[rustc_diagnostic_item = "ArcWeak"]
336pub struct Weak<
337 T: ?Sized,
338 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
339> {
340 // This is a `NonNull` to allow optimizing the size of this type in enums,
341 // but it is not necessarily a valid pointer.
342 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
343 // to allocate space on the heap. That's not a value a real pointer
344 // will ever have because RcInner has alignment at least 2.
345 ptr: NonNull<ArcInner<T>>,
346 alloc: A,
347}
348
349#[stable(feature = "arc_weak", since = "1.4.0")]
350unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
351#[stable(feature = "arc_weak", since = "1.4.0")]
352unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
353
354#[unstable(feature = "coerce_unsized", issue = "18598")]
355impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
356#[unstable(feature = "dispatch_from_dyn", issue = "none")]
357impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
358
359#[stable(feature = "arc_weak", since = "1.4.0")]
360impl<T: ?Sized, A: Allocator> fmt::Debug for Weak<T, A> {
361 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
362 write!(f, "(Weak)")
363 }
364}
365
366// This is repr(C) to future-proof against possible field-reordering, which
367// would interfere with otherwise safe [into|from]_raw() of transmutable
368// inner types.
369#[repr(C)]
370struct ArcInner<T: ?Sized> {
371 strong: Atomic<usize>,
372
373 // the value usize::MAX acts as a sentinel for temporarily "locking" the
374 // ability to upgrade weak pointers or downgrade strong ones; this is used
375 // to avoid races in `make_mut` and `get_mut`.
376 weak: Atomic<usize>,
377
378 data: T,
379}
380
381/// Calculate layout for `ArcInner<T>` using the inner value's layout
382fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
383 // Calculate layout using the given value layout.
384 // Previously, layout was calculated on the expression
385 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
386 // reference (see #54908).
387 Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
388}
389
390unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
391unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
392
393impl<T> Arc<T> {
394 /// Constructs a new `Arc<T>`.
395 ///
396 /// # Examples
397 ///
398 /// ```
399 /// use std::sync::Arc;
400 ///
401 /// let five = Arc::new(5);
402 /// ```
403 #[cfg(not(no_global_oom_handling))]
404 #[inline]
405 #[stable(feature = "rust1", since = "1.0.0")]
406 pub fn new(data: T) -> Arc<T> {
407 // Start the weak pointer count as 1 which is the weak pointer that's
408 // held by all the strong pointers (kinda), see std/rc.rs for more info
409 let x: Box<_> = Box::new(ArcInner {
410 strong: atomic::AtomicUsize::new(1),
411 weak: atomic::AtomicUsize::new(1),
412 data,
413 });
414 unsafe { Self::from_inner(Box::leak(x).into()) }
415 }
416
417 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
418 /// to allow you to construct a `T` which holds a weak pointer to itself.
419 ///
420 /// Generally, a structure circularly referencing itself, either directly or
421 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
422 /// Using this function, you get access to the weak pointer during the
423 /// initialization of `T`, before the `Arc<T>` is created, such that you can
424 /// clone and store it inside the `T`.
425 ///
426 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
427 /// then calls your closure, giving it a `Weak<T>` to this allocation,
428 /// and only afterwards completes the construction of the `Arc<T>` by placing
429 /// the `T` returned from your closure into the allocation.
430 ///
431 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
432 /// returns, calling [`upgrade`] on the weak reference inside your closure will
433 /// fail and result in a `None` value.
434 ///
435 /// # Panics
436 ///
437 /// If `data_fn` panics, the panic is propagated to the caller, and the
438 /// temporary [`Weak<T>`] is dropped normally.
439 ///
440 /// # Example
441 ///
442 /// ```
443 /// # #![allow(dead_code)]
444 /// use std::sync::{Arc, Weak};
445 ///
446 /// struct Gadget {
447 /// me: Weak<Gadget>,
448 /// }
449 ///
450 /// impl Gadget {
451 /// /// Constructs a reference counted Gadget.
452 /// fn new() -> Arc<Self> {
453 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
454 /// // `Arc` we're constructing.
455 /// Arc::new_cyclic(|me| {
456 /// // Create the actual struct here.
457 /// Gadget { me: me.clone() }
458 /// })
459 /// }
460 ///
461 /// /// Returns a reference counted pointer to Self.
462 /// fn me(&self) -> Arc<Self> {
463 /// self.me.upgrade().unwrap()
464 /// }
465 /// }
466 /// ```
467 /// [`upgrade`]: Weak::upgrade
468 #[cfg(not(no_global_oom_handling))]
469 #[inline]
470 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
471 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
472 where
473 F: FnOnce(&Weak<T>) -> T,
474 {
475 Self::new_cyclic_in(data_fn, Global)
476 }
477
478 /// Constructs a new `Arc` with uninitialized contents.
479 ///
480 /// # Examples
481 ///
482 /// ```
483 /// use std::sync::Arc;
484 ///
485 /// let mut five = Arc::<u32>::new_uninit();
486 ///
487 /// // Deferred initialization:
488 /// Arc::get_mut(&mut five).unwrap().write(5);
489 ///
490 /// let five = unsafe { five.assume_init() };
491 ///
492 /// assert_eq!(*five, 5)
493 /// ```
494 #[cfg(not(no_global_oom_handling))]
495 #[inline]
496 #[stable(feature = "new_uninit", since = "1.82.0")]
497 #[must_use]
498 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
499 unsafe {
500 Arc::from_ptr(Arc::allocate_for_layout(
501 Layout::new::<T>(),
502 |layout| Global.allocate(layout),
503 <*mut u8>::cast,
504 ))
505 }
506 }
507
508 /// Constructs a new `Arc` with uninitialized contents, with the memory
509 /// being filled with `0` bytes.
510 ///
511 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
512 /// of this method.
513 ///
514 /// # Examples
515 ///
516 /// ```
517 /// use std::sync::Arc;
518 ///
519 /// let zero = Arc::<u32>::new_zeroed();
520 /// let zero = unsafe { zero.assume_init() };
521 ///
522 /// assert_eq!(*zero, 0)
523 /// ```
524 ///
525 /// [zeroed]: mem::MaybeUninit::zeroed
526 #[cfg(not(no_global_oom_handling))]
527 #[inline]
528 #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
529 #[must_use]
530 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
531 unsafe {
532 Arc::from_ptr(Arc::allocate_for_layout(
533 Layout::new::<T>(),
534 |layout| Global.allocate_zeroed(layout),
535 <*mut u8>::cast,
536 ))
537 }
538 }
539
540 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
541 /// `data` will be pinned in memory and unable to be moved.
542 #[cfg(not(no_global_oom_handling))]
543 #[stable(feature = "pin", since = "1.33.0")]
544 #[must_use]
545 pub fn pin(data: T) -> Pin<Arc<T>> {
546 unsafe { Pin::new_unchecked(Arc::new(data)) }
547 }
548
549 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
550 #[unstable(feature = "allocator_api", issue = "32838")]
551 #[inline]
552 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
553 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
554 }
555
556 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
557 ///
558 /// # Examples
559 ///
560 /// ```
561 /// #![feature(allocator_api)]
562 /// use std::sync::Arc;
563 ///
564 /// let five = Arc::try_new(5)?;
565 /// # Ok::<(), std::alloc::AllocError>(())
566 /// ```
567 #[unstable(feature = "allocator_api", issue = "32838")]
568 #[inline]
569 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
570 // Start the weak pointer count as 1 which is the weak pointer that's
571 // held by all the strong pointers (kinda), see std/rc.rs for more info
572 let x: Box<_> = Box::try_new(ArcInner {
573 strong: atomic::AtomicUsize::new(1),
574 weak: atomic::AtomicUsize::new(1),
575 data,
576 })?;
577 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
578 }
579
580 /// Constructs a new `Arc` with uninitialized contents, returning an error
581 /// if allocation fails.
582 ///
583 /// # Examples
584 ///
585 /// ```
586 /// #![feature(allocator_api)]
587 ///
588 /// use std::sync::Arc;
589 ///
590 /// let mut five = Arc::<u32>::try_new_uninit()?;
591 ///
592 /// // Deferred initialization:
593 /// Arc::get_mut(&mut five).unwrap().write(5);
594 ///
595 /// let five = unsafe { five.assume_init() };
596 ///
597 /// assert_eq!(*five, 5);
598 /// # Ok::<(), std::alloc::AllocError>(())
599 /// ```
600 #[unstable(feature = "allocator_api", issue = "32838")]
601 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
602 unsafe {
603 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
604 Layout::new::<T>(),
605 |layout| Global.allocate(layout),
606 <*mut u8>::cast,
607 )?))
608 }
609 }
610
611 /// Constructs a new `Arc` with uninitialized contents, with the memory
612 /// being filled with `0` bytes, returning an error if allocation fails.
613 ///
614 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
615 /// of this method.
616 ///
617 /// # Examples
618 ///
619 /// ```
620 /// #![feature( allocator_api)]
621 ///
622 /// use std::sync::Arc;
623 ///
624 /// let zero = Arc::<u32>::try_new_zeroed()?;
625 /// let zero = unsafe { zero.assume_init() };
626 ///
627 /// assert_eq!(*zero, 0);
628 /// # Ok::<(), std::alloc::AllocError>(())
629 /// ```
630 ///
631 /// [zeroed]: mem::MaybeUninit::zeroed
632 #[unstable(feature = "allocator_api", issue = "32838")]
633 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
634 unsafe {
635 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
636 Layout::new::<T>(),
637 |layout| Global.allocate_zeroed(layout),
638 <*mut u8>::cast,
639 )?))
640 }
641 }
642}
643
644impl<T, A: Allocator> Arc<T, A> {
645 /// Constructs a new `Arc<T>` in the provided allocator.
646 ///
647 /// # Examples
648 ///
649 /// ```
650 /// #![feature(allocator_api)]
651 ///
652 /// use std::sync::Arc;
653 /// use std::alloc::System;
654 ///
655 /// let five = Arc::new_in(5, System);
656 /// ```
657 #[inline]
658 #[cfg(not(no_global_oom_handling))]
659 #[unstable(feature = "allocator_api", issue = "32838")]
660 pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
661 // Start the weak pointer count as 1 which is the weak pointer that's
662 // held by all the strong pointers (kinda), see std/rc.rs for more info
663 let x = Box::new_in(
664 ArcInner {
665 strong: atomic::AtomicUsize::new(1),
666 weak: atomic::AtomicUsize::new(1),
667 data,
668 },
669 alloc,
670 );
671 let (ptr, alloc) = Box::into_unique(x);
672 unsafe { Self::from_inner_in(ptr.into(), alloc) }
673 }
674
675 /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
676 ///
677 /// # Examples
678 ///
679 /// ```
680 /// #![feature(get_mut_unchecked)]
681 /// #![feature(allocator_api)]
682 ///
683 /// use std::sync::Arc;
684 /// use std::alloc::System;
685 ///
686 /// let mut five = Arc::<u32, _>::new_uninit_in(System);
687 ///
688 /// let five = unsafe {
689 /// // Deferred initialization:
690 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
691 ///
692 /// five.assume_init()
693 /// };
694 ///
695 /// assert_eq!(*five, 5)
696 /// ```
697 #[cfg(not(no_global_oom_handling))]
698 #[unstable(feature = "allocator_api", issue = "32838")]
699 #[inline]
700 pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
701 unsafe {
702 Arc::from_ptr_in(
703 Arc::allocate_for_layout(
704 Layout::new::<T>(),
705 |layout| alloc.allocate(layout),
706 <*mut u8>::cast,
707 ),
708 alloc,
709 )
710 }
711 }
712
713 /// Constructs a new `Arc` with uninitialized contents, with the memory
714 /// being filled with `0` bytes, in the provided allocator.
715 ///
716 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
717 /// of this method.
718 ///
719 /// # Examples
720 ///
721 /// ```
722 /// #![feature(allocator_api)]
723 ///
724 /// use std::sync::Arc;
725 /// use std::alloc::System;
726 ///
727 /// let zero = Arc::<u32, _>::new_zeroed_in(System);
728 /// let zero = unsafe { zero.assume_init() };
729 ///
730 /// assert_eq!(*zero, 0)
731 /// ```
732 ///
733 /// [zeroed]: mem::MaybeUninit::zeroed
734 #[cfg(not(no_global_oom_handling))]
735 #[unstable(feature = "allocator_api", issue = "32838")]
736 #[inline]
737 pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
738 unsafe {
739 Arc::from_ptr_in(
740 Arc::allocate_for_layout(
741 Layout::new::<T>(),
742 |layout| alloc.allocate_zeroed(layout),
743 <*mut u8>::cast,
744 ),
745 alloc,
746 )
747 }
748 }
749
750 /// Constructs a new `Arc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
751 /// to allow you to construct a `T` which holds a weak pointer to itself.
752 ///
753 /// Generally, a structure circularly referencing itself, either directly or
754 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
755 /// Using this function, you get access to the weak pointer during the
756 /// initialization of `T`, before the `Arc<T, A>` is created, such that you can
757 /// clone and store it inside the `T`.
758 ///
759 /// `new_cyclic_in` first allocates the managed allocation for the `Arc<T, A>`,
760 /// then calls your closure, giving it a `Weak<T, A>` to this allocation,
761 /// and only afterwards completes the construction of the `Arc<T, A>` by placing
762 /// the `T` returned from your closure into the allocation.
763 ///
764 /// Since the new `Arc<T, A>` is not fully-constructed until `Arc<T, A>::new_cyclic_in`
765 /// returns, calling [`upgrade`] on the weak reference inside your closure will
766 /// fail and result in a `None` value.
767 ///
768 /// # Panics
769 ///
770 /// If `data_fn` panics, the panic is propagated to the caller, and the
771 /// temporary [`Weak<T>`] is dropped normally.
772 ///
773 /// # Example
774 ///
775 /// See [`new_cyclic`]
776 ///
777 /// [`new_cyclic`]: Arc::new_cyclic
778 /// [`upgrade`]: Weak::upgrade
779 #[cfg(not(no_global_oom_handling))]
780 #[inline]
781 #[unstable(feature = "allocator_api", issue = "32838")]
782 pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Arc<T, A>
783 where
784 F: FnOnce(&Weak<T, A>) -> T,
785 {
786 // Construct the inner in the "uninitialized" state with a single
787 // weak reference.
788 let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
789 ArcInner {
790 strong: atomic::AtomicUsize::new(0),
791 weak: atomic::AtomicUsize::new(1),
792 data: mem::MaybeUninit::<T>::uninit(),
793 },
794 alloc,
795 ));
796 let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
797 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
798
799 let weak = Weak { ptr: init_ptr, alloc };
800
801 // It's important we don't give up ownership of the weak pointer, or
802 // else the memory might be freed by the time `data_fn` returns. If
803 // we really wanted to pass ownership, we could create an additional
804 // weak pointer for ourselves, but this would result in additional
805 // updates to the weak reference count which might not be necessary
806 // otherwise.
807 let data = data_fn(&weak);
808
809 // Now we can properly initialize the inner value and turn our weak
810 // reference into a strong reference.
811 let strong = unsafe {
812 let inner = init_ptr.as_ptr();
813 ptr::write(&raw mut (*inner).data, data);
814
815 // The above write to the data field must be visible to any threads which
816 // observe a non-zero strong count. Therefore we need at least "Release" ordering
817 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
818 //
819 // "Acquire" ordering is not required. When considering the possible behaviors
820 // of `data_fn` we only need to look at what it could do with a reference to a
821 // non-upgradeable `Weak`:
822 // - It can *clone* the `Weak`, increasing the weak reference count.
823 // - It can drop those clones, decreasing the weak reference count (but never to zero).
824 //
825 // These side effects do not impact us in any way, and no other side effects are
826 // possible with safe code alone.
827 let prev_value = (*inner).strong.fetch_add(1, Release);
828 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
829
830 // Strong references should collectively own a shared weak reference,
831 // so don't run the destructor for our old weak reference.
832 // Calling into_raw_with_allocator has the double effect of giving us back the allocator,
833 // and forgetting the weak reference.
834 let alloc = weak.into_raw_with_allocator().1;
835
836 Arc::from_inner_in(init_ptr, alloc)
837 };
838
839 strong
840 }
841
842 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
843 /// then `data` will be pinned in memory and unable to be moved.
844 #[cfg(not(no_global_oom_handling))]
845 #[unstable(feature = "allocator_api", issue = "32838")]
846 #[inline]
847 pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
848 where
849 A: 'static,
850 {
851 unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
852 }
853
854 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
855 /// fails.
856 #[inline]
857 #[unstable(feature = "allocator_api", issue = "32838")]
858 pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
859 where
860 A: 'static,
861 {
862 unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
863 }
864
865 /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
866 ///
867 /// # Examples
868 ///
869 /// ```
870 /// #![feature(allocator_api)]
871 ///
872 /// use std::sync::Arc;
873 /// use std::alloc::System;
874 ///
875 /// let five = Arc::try_new_in(5, System)?;
876 /// # Ok::<(), std::alloc::AllocError>(())
877 /// ```
878 #[inline]
879 #[unstable(feature = "allocator_api", issue = "32838")]
880 #[inline]
881 pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
882 // Start the weak pointer count as 1 which is the weak pointer that's
883 // held by all the strong pointers (kinda), see std/rc.rs for more info
884 let x = Box::try_new_in(
885 ArcInner {
886 strong: atomic::AtomicUsize::new(1),
887 weak: atomic::AtomicUsize::new(1),
888 data,
889 },
890 alloc,
891 )?;
892 let (ptr, alloc) = Box::into_unique(x);
893 Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
894 }
895
896 /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
897 /// error if allocation fails.
898 ///
899 /// # Examples
900 ///
901 /// ```
902 /// #![feature(allocator_api)]
903 /// #![feature(get_mut_unchecked)]
904 ///
905 /// use std::sync::Arc;
906 /// use std::alloc::System;
907 ///
908 /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
909 ///
910 /// let five = unsafe {
911 /// // Deferred initialization:
912 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
913 ///
914 /// five.assume_init()
915 /// };
916 ///
917 /// assert_eq!(*five, 5);
918 /// # Ok::<(), std::alloc::AllocError>(())
919 /// ```
920 #[unstable(feature = "allocator_api", issue = "32838")]
921 #[inline]
922 pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
923 unsafe {
924 Ok(Arc::from_ptr_in(
925 Arc::try_allocate_for_layout(
926 Layout::new::<T>(),
927 |layout| alloc.allocate(layout),
928 <*mut u8>::cast,
929 )?,
930 alloc,
931 ))
932 }
933 }
934
935 /// Constructs a new `Arc` with uninitialized contents, with the memory
936 /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
937 /// fails.
938 ///
939 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
940 /// of this method.
941 ///
942 /// # Examples
943 ///
944 /// ```
945 /// #![feature(allocator_api)]
946 ///
947 /// use std::sync::Arc;
948 /// use std::alloc::System;
949 ///
950 /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
951 /// let zero = unsafe { zero.assume_init() };
952 ///
953 /// assert_eq!(*zero, 0);
954 /// # Ok::<(), std::alloc::AllocError>(())
955 /// ```
956 ///
957 /// [zeroed]: mem::MaybeUninit::zeroed
958 #[unstable(feature = "allocator_api", issue = "32838")]
959 #[inline]
960 pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
961 unsafe {
962 Ok(Arc::from_ptr_in(
963 Arc::try_allocate_for_layout(
964 Layout::new::<T>(),
965 |layout| alloc.allocate_zeroed(layout),
966 <*mut u8>::cast,
967 )?,
968 alloc,
969 ))
970 }
971 }
972 /// Returns the inner value, if the `Arc` has exactly one strong reference.
973 ///
974 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
975 /// passed in.
976 ///
977 /// This will succeed even if there are outstanding weak references.
978 ///
979 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
980 /// keep the `Arc` in the [`Err`] case.
981 /// Immediately dropping the [`Err`]-value, as the expression
982 /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
983 /// drop to zero and the inner value of the `Arc` to be dropped.
984 /// For instance, if two threads execute such an expression in parallel,
985 /// there is a race condition without the possibility of unsafety:
986 /// The threads could first both check whether they own the last instance
987 /// in `Arc::try_unwrap`, determine that they both do not, and then both
988 /// discard and drop their instance in the call to [`ok`][`Result::ok`].
989 /// In this scenario, the value inside the `Arc` is safely destroyed
990 /// by exactly one of the threads, but neither thread will ever be able
991 /// to use the value.
992 ///
993 /// # Examples
994 ///
995 /// ```
996 /// use std::sync::Arc;
997 ///
998 /// let x = Arc::new(3);
999 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
1000 ///
1001 /// let x = Arc::new(4);
1002 /// let _y = Arc::clone(&x);
1003 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
1004 /// ```
1005 #[inline]
1006 #[stable(feature = "arc_unique", since = "1.4.0")]
1007 pub fn try_unwrap(this: Self) -> Result<T, Self> {
1008 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
1009 return Err(this);
1010 }
1011
1012 acquire!(this.inner().strong);
1013
1014 let this = ManuallyDrop::new(this);
1015 let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
1016 let alloc: A = unsafe { ptr::read(&this.alloc) }; // copy the allocator
1017
1018 // Make a weak pointer to clean up the implicit strong-weak reference
1019 let _weak = Weak { ptr: this.ptr, alloc };
1020
1021 Ok(elem)
1022 }
1023
1024 /// Returns the inner value, if the `Arc` has exactly one strong reference.
1025 ///
1026 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
1027 ///
1028 /// This will succeed even if there are outstanding weak references.
1029 ///
1030 /// If `Arc::into_inner` is called on every clone of this `Arc`,
1031 /// it is guaranteed that exactly one of the calls returns the inner value.
1032 /// This means in particular that the inner value is not dropped.
1033 ///
1034 /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
1035 /// is meant for different use-cases. If used as a direct replacement
1036 /// for `Arc::into_inner` anyway, such as with the expression
1037 /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1038 /// **not** give the same guarantee as described in the previous paragraph.
1039 /// For more information, see the examples below and read the documentation
1040 /// of [`Arc::try_unwrap`].
1041 ///
1042 /// # Examples
1043 ///
1044 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1045 /// ```
1046 /// use std::sync::Arc;
1047 ///
1048 /// let x = Arc::new(3);
1049 /// let y = Arc::clone(&x);
1050 ///
1051 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1052 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1053 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1054 ///
1055 /// let x_inner_value = x_thread.join().unwrap();
1056 /// let y_inner_value = y_thread.join().unwrap();
1057 ///
1058 /// // One of the threads is guaranteed to receive the inner value:
1059 /// assert!(matches!(
1060 /// (x_inner_value, y_inner_value),
1061 /// (None, Some(3)) | (Some(3), None)
1062 /// ));
1063 /// // The result could also be `(None, None)` if the threads called
1064 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1065 /// ```
1066 ///
1067 /// A more practical example demonstrating the need for `Arc::into_inner`:
1068 /// ```
1069 /// use std::sync::Arc;
1070 ///
1071 /// // Definition of a simple singly linked list using `Arc`:
1072 /// #[derive(Clone)]
1073 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1074 /// struct Node<T>(T, Option<Arc<Node<T>>>);
1075 ///
1076 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1077 /// // can cause a stack overflow. To prevent this, we can provide a
1078 /// // manual `Drop` implementation that does the destruction in a loop:
1079 /// impl<T> Drop for LinkedList<T> {
1080 /// fn drop(&mut self) {
1081 /// let mut link = self.0.take();
1082 /// while let Some(arc_node) = link.take() {
1083 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1084 /// link = next;
1085 /// }
1086 /// }
1087 /// }
1088 /// }
1089 ///
1090 /// // Implementation of `new` and `push` omitted
1091 /// impl<T> LinkedList<T> {
1092 /// /* ... */
1093 /// # fn new() -> Self {
1094 /// # LinkedList(None)
1095 /// # }
1096 /// # fn push(&mut self, x: T) {
1097 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
1098 /// # }
1099 /// }
1100 ///
1101 /// // The following code could have still caused a stack overflow
1102 /// // despite the manual `Drop` impl if that `Drop` impl had used
1103 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1104 ///
1105 /// // Create a long list and clone it
1106 /// let mut x = LinkedList::new();
1107 /// let size = 100000;
1108 /// # let size = if cfg!(miri) { 100 } else { size };
1109 /// for i in 0..size {
1110 /// x.push(i); // Adds i to the front of x
1111 /// }
1112 /// let y = x.clone();
1113 ///
1114 /// // Drop the clones in parallel
1115 /// let x_thread = std::thread::spawn(|| drop(x));
1116 /// let y_thread = std::thread::spawn(|| drop(y));
1117 /// x_thread.join().unwrap();
1118 /// y_thread.join().unwrap();
1119 /// ```
1120 #[inline]
1121 #[stable(feature = "arc_into_inner", since = "1.70.0")]
1122 pub fn into_inner(this: Self) -> Option<T> {
1123 // Make sure that the ordinary `Drop` implementation isn’t called as well
1124 let mut this = mem::ManuallyDrop::new(this);
1125
1126 // Following the implementation of `drop` and `drop_slow`
1127 if this.inner().strong.fetch_sub(1, Release) != 1 {
1128 return None;
1129 }
1130
1131 acquire!(this.inner().strong);
1132
1133 // SAFETY: This mirrors the line
1134 //
1135 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1136 //
1137 // in `drop_slow`. Instead of dropping the value behind the pointer,
1138 // it is read and eventually returned; `ptr::read` has the same
1139 // safety conditions as `ptr::drop_in_place`.
1140
1141 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1142 let alloc = unsafe { ptr::read(&this.alloc) };
1143
1144 drop(Weak { ptr: this.ptr, alloc });
1145
1146 Some(inner)
1147 }
1148}
1149
1150impl<T> Arc<[T]> {
1151 /// Constructs a new atomically reference-counted slice with uninitialized contents.
1152 ///
1153 /// # Examples
1154 ///
1155 /// ```
1156 /// use std::sync::Arc;
1157 ///
1158 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1159 ///
1160 /// // Deferred initialization:
1161 /// let data = Arc::get_mut(&mut values).unwrap();
1162 /// data[0].write(1);
1163 /// data[1].write(2);
1164 /// data[2].write(3);
1165 ///
1166 /// let values = unsafe { values.assume_init() };
1167 ///
1168 /// assert_eq!(*values, [1, 2, 3])
1169 /// ```
1170 #[cfg(not(no_global_oom_handling))]
1171 #[inline]
1172 #[stable(feature = "new_uninit", since = "1.82.0")]
1173 #[must_use]
1174 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1175 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1176 }
1177
1178 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1179 /// filled with `0` bytes.
1180 ///
1181 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1182 /// incorrect usage of this method.
1183 ///
1184 /// # Examples
1185 ///
1186 /// ```
1187 /// use std::sync::Arc;
1188 ///
1189 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1190 /// let values = unsafe { values.assume_init() };
1191 ///
1192 /// assert_eq!(*values, [0, 0, 0])
1193 /// ```
1194 ///
1195 /// [zeroed]: mem::MaybeUninit::zeroed
1196 #[cfg(not(no_global_oom_handling))]
1197 #[inline]
1198 #[stable(feature = "new_zeroed_alloc", since = "CURRENT_RUSTC_VERSION")]
1199 #[must_use]
1200 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1201 unsafe {
1202 Arc::from_ptr(Arc::allocate_for_layout(
1203 Layout::array::<T>(len).unwrap(),
1204 |layout| Global.allocate_zeroed(layout),
1205 |mem| {
1206 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1207 as *mut ArcInner<[mem::MaybeUninit<T>]>
1208 },
1209 ))
1210 }
1211 }
1212
1213 /// Converts the reference-counted slice into a reference-counted array.
1214 ///
1215 /// This operation does not reallocate; the underlying array of the slice is simply reinterpreted as an array type.
1216 ///
1217 /// If `N` is not exactly equal to the length of `self`, then this method returns `None`.
1218 #[unstable(feature = "slice_as_array", issue = "133508")]
1219 #[inline]
1220 #[must_use]
1221 pub fn into_array<const N: usize>(self) -> Option<Arc<[T; N]>> {
1222 if self.len() == N {
1223 let ptr = Self::into_raw(self) as *const [T; N];
1224
1225 // SAFETY: The underlying array of a slice has the exact same layout as an actual array `[T; N]` if `N` is equal to the slice's length.
1226 let me = unsafe { Arc::from_raw(ptr) };
1227 Some(me)
1228 } else {
1229 None
1230 }
1231 }
1232}
1233
1234impl<T, A: Allocator> Arc<[T], A> {
1235 /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1236 /// provided allocator.
1237 ///
1238 /// # Examples
1239 ///
1240 /// ```
1241 /// #![feature(get_mut_unchecked)]
1242 /// #![feature(allocator_api)]
1243 ///
1244 /// use std::sync::Arc;
1245 /// use std::alloc::System;
1246 ///
1247 /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1248 ///
1249 /// let values = unsafe {
1250 /// // Deferred initialization:
1251 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1252 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1253 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1254 ///
1255 /// values.assume_init()
1256 /// };
1257 ///
1258 /// assert_eq!(*values, [1, 2, 3])
1259 /// ```
1260 #[cfg(not(no_global_oom_handling))]
1261 #[unstable(feature = "allocator_api", issue = "32838")]
1262 #[inline]
1263 pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1264 unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1265 }
1266
1267 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1268 /// filled with `0` bytes, in the provided allocator.
1269 ///
1270 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1271 /// incorrect usage of this method.
1272 ///
1273 /// # Examples
1274 ///
1275 /// ```
1276 /// #![feature(allocator_api)]
1277 ///
1278 /// use std::sync::Arc;
1279 /// use std::alloc::System;
1280 ///
1281 /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1282 /// let values = unsafe { values.assume_init() };
1283 ///
1284 /// assert_eq!(*values, [0, 0, 0])
1285 /// ```
1286 ///
1287 /// [zeroed]: mem::MaybeUninit::zeroed
1288 #[cfg(not(no_global_oom_handling))]
1289 #[unstable(feature = "allocator_api", issue = "32838")]
1290 #[inline]
1291 pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1292 unsafe {
1293 Arc::from_ptr_in(
1294 Arc::allocate_for_layout(
1295 Layout::array::<T>(len).unwrap(),
1296 |layout| alloc.allocate_zeroed(layout),
1297 |mem| {
1298 ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1299 as *mut ArcInner<[mem::MaybeUninit<T>]>
1300 },
1301 ),
1302 alloc,
1303 )
1304 }
1305 }
1306}
1307
1308impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1309 /// Converts to `Arc<T>`.
1310 ///
1311 /// # Safety
1312 ///
1313 /// As with [`MaybeUninit::assume_init`],
1314 /// it is up to the caller to guarantee that the inner value
1315 /// really is in an initialized state.
1316 /// Calling this when the content is not yet fully initialized
1317 /// causes immediate undefined behavior.
1318 ///
1319 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1320 ///
1321 /// # Examples
1322 ///
1323 /// ```
1324 /// use std::sync::Arc;
1325 ///
1326 /// let mut five = Arc::<u32>::new_uninit();
1327 ///
1328 /// // Deferred initialization:
1329 /// Arc::get_mut(&mut five).unwrap().write(5);
1330 ///
1331 /// let five = unsafe { five.assume_init() };
1332 ///
1333 /// assert_eq!(*five, 5)
1334 /// ```
1335 #[stable(feature = "new_uninit", since = "1.82.0")]
1336 #[must_use = "`self` will be dropped if the result is not used"]
1337 #[inline]
1338 pub unsafe fn assume_init(self) -> Arc<T, A> {
1339 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1340 unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1341 }
1342}
1343
1344impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1345 /// Converts to `Arc<[T]>`.
1346 ///
1347 /// # Safety
1348 ///
1349 /// As with [`MaybeUninit::assume_init`],
1350 /// it is up to the caller to guarantee that the inner value
1351 /// really is in an initialized state.
1352 /// Calling this when the content is not yet fully initialized
1353 /// causes immediate undefined behavior.
1354 ///
1355 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1356 ///
1357 /// # Examples
1358 ///
1359 /// ```
1360 /// use std::sync::Arc;
1361 ///
1362 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1363 ///
1364 /// // Deferred initialization:
1365 /// let data = Arc::get_mut(&mut values).unwrap();
1366 /// data[0].write(1);
1367 /// data[1].write(2);
1368 /// data[2].write(3);
1369 ///
1370 /// let values = unsafe { values.assume_init() };
1371 ///
1372 /// assert_eq!(*values, [1, 2, 3])
1373 /// ```
1374 #[stable(feature = "new_uninit", since = "1.82.0")]
1375 #[must_use = "`self` will be dropped if the result is not used"]
1376 #[inline]
1377 pub unsafe fn assume_init(self) -> Arc<[T], A> {
1378 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
1379 unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1380 }
1381}
1382
1383impl<T: ?Sized> Arc<T> {
1384 /// Constructs an `Arc<T>` from a raw pointer.
1385 ///
1386 /// The raw pointer must have been previously returned by a call to
1387 /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1388 ///
1389 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1390 /// is trivially true if `U` is `T`.
1391 /// * If `U` is unsized, its data pointer must have the same size and
1392 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1393 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1394 /// coercion].
1395 ///
1396 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1397 /// and alignment, this is basically like transmuting references of
1398 /// different types. See [`mem::transmute`][transmute] for more information
1399 /// on what restrictions apply in this case.
1400 ///
1401 /// The raw pointer must point to a block of memory allocated by the global allocator.
1402 ///
1403 /// The user of `from_raw` has to make sure a specific value of `T` is only
1404 /// dropped once.
1405 ///
1406 /// This function is unsafe because improper use may lead to memory unsafety,
1407 /// even if the returned `Arc<T>` is never accessed.
1408 ///
1409 /// [into_raw]: Arc::into_raw
1410 /// [transmute]: core::mem::transmute
1411 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1412 ///
1413 /// # Examples
1414 ///
1415 /// ```
1416 /// use std::sync::Arc;
1417 ///
1418 /// let x = Arc::new("hello".to_owned());
1419 /// let x_ptr = Arc::into_raw(x);
1420 ///
1421 /// unsafe {
1422 /// // Convert back to an `Arc` to prevent leak.
1423 /// let x = Arc::from_raw(x_ptr);
1424 /// assert_eq!(&*x, "hello");
1425 ///
1426 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1427 /// }
1428 ///
1429 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1430 /// ```
1431 ///
1432 /// Convert a slice back into its original array:
1433 ///
1434 /// ```
1435 /// use std::sync::Arc;
1436 ///
1437 /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1438 /// let x_ptr: *const [u32] = Arc::into_raw(x);
1439 ///
1440 /// unsafe {
1441 /// let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1442 /// assert_eq!(&*x, &[1, 2, 3]);
1443 /// }
1444 /// ```
1445 #[inline]
1446 #[stable(feature = "rc_raw", since = "1.17.0")]
1447 pub unsafe fn from_raw(ptr: *const T) -> Self {
1448 unsafe { Arc::from_raw_in(ptr, Global) }
1449 }
1450
1451 /// Consumes the `Arc`, returning the wrapped pointer.
1452 ///
1453 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1454 /// [`Arc::from_raw`].
1455 ///
1456 /// # Examples
1457 ///
1458 /// ```
1459 /// use std::sync::Arc;
1460 ///
1461 /// let x = Arc::new("hello".to_owned());
1462 /// let x_ptr = Arc::into_raw(x);
1463 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1464 /// # // Prevent leaks for Miri.
1465 /// # drop(unsafe { Arc::from_raw(x_ptr) });
1466 /// ```
1467 #[must_use = "losing the pointer will leak memory"]
1468 #[stable(feature = "rc_raw", since = "1.17.0")]
1469 #[rustc_never_returns_null_ptr]
1470 pub fn into_raw(this: Self) -> *const T {
1471 let this = ManuallyDrop::new(this);
1472 Self::as_ptr(&*this)
1473 }
1474
1475 /// Increments the strong reference count on the `Arc<T>` associated with the
1476 /// provided pointer by one.
1477 ///
1478 /// # Safety
1479 ///
1480 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1481 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1482 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1483 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1484 /// allocated by the global allocator.
1485 ///
1486 /// [from_raw_in]: Arc::from_raw_in
1487 ///
1488 /// # Examples
1489 ///
1490 /// ```
1491 /// use std::sync::Arc;
1492 ///
1493 /// let five = Arc::new(5);
1494 ///
1495 /// unsafe {
1496 /// let ptr = Arc::into_raw(five);
1497 /// Arc::increment_strong_count(ptr);
1498 ///
1499 /// // This assertion is deterministic because we haven't shared
1500 /// // the `Arc` between threads.
1501 /// let five = Arc::from_raw(ptr);
1502 /// assert_eq!(2, Arc::strong_count(&five));
1503 /// # // Prevent leaks for Miri.
1504 /// # Arc::decrement_strong_count(ptr);
1505 /// }
1506 /// ```
1507 #[inline]
1508 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1509 pub unsafe fn increment_strong_count(ptr: *const T) {
1510 unsafe { Arc::increment_strong_count_in(ptr, Global) }
1511 }
1512
1513 /// Decrements the strong reference count on the `Arc<T>` associated with the
1514 /// provided pointer by one.
1515 ///
1516 /// # Safety
1517 ///
1518 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1519 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1520 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1521 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1522 /// allocated by the global allocator. This method can be used to release the final
1523 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1524 /// released.
1525 ///
1526 /// [from_raw_in]: Arc::from_raw_in
1527 ///
1528 /// # Examples
1529 ///
1530 /// ```
1531 /// use std::sync::Arc;
1532 ///
1533 /// let five = Arc::new(5);
1534 ///
1535 /// unsafe {
1536 /// let ptr = Arc::into_raw(five);
1537 /// Arc::increment_strong_count(ptr);
1538 ///
1539 /// // Those assertions are deterministic because we haven't shared
1540 /// // the `Arc` between threads.
1541 /// let five = Arc::from_raw(ptr);
1542 /// assert_eq!(2, Arc::strong_count(&five));
1543 /// Arc::decrement_strong_count(ptr);
1544 /// assert_eq!(1, Arc::strong_count(&five));
1545 /// }
1546 /// ```
1547 #[inline]
1548 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1549 pub unsafe fn decrement_strong_count(ptr: *const T) {
1550 unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1551 }
1552}
1553
1554impl<T: ?Sized, A: Allocator> Arc<T, A> {
1555 /// Returns a reference to the underlying allocator.
1556 ///
1557 /// Note: this is an associated function, which means that you have
1558 /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
1559 /// is so that there is no conflict with a method on the inner type.
1560 #[inline]
1561 #[unstable(feature = "allocator_api", issue = "32838")]
1562 pub fn allocator(this: &Self) -> &A {
1563 &this.alloc
1564 }
1565
1566 /// Consumes the `Arc`, returning the wrapped pointer and allocator.
1567 ///
1568 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1569 /// [`Arc::from_raw_in`].
1570 ///
1571 /// # Examples
1572 ///
1573 /// ```
1574 /// #![feature(allocator_api)]
1575 /// use std::sync::Arc;
1576 /// use std::alloc::System;
1577 ///
1578 /// let x = Arc::new_in("hello".to_owned(), System);
1579 /// let (ptr, alloc) = Arc::into_raw_with_allocator(x);
1580 /// assert_eq!(unsafe { &*ptr }, "hello");
1581 /// let x = unsafe { Arc::from_raw_in(ptr, alloc) };
1582 /// assert_eq!(&*x, "hello");
1583 /// ```
1584 #[must_use = "losing the pointer will leak memory"]
1585 #[unstable(feature = "allocator_api", issue = "32838")]
1586 pub fn into_raw_with_allocator(this: Self) -> (*const T, A) {
1587 let this = mem::ManuallyDrop::new(this);
1588 let ptr = Self::as_ptr(&this);
1589 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
1590 let alloc = unsafe { ptr::read(&this.alloc) };
1591 (ptr, alloc)
1592 }
1593
1594 /// Provides a raw pointer to the data.
1595 ///
1596 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1597 /// as long as there are strong counts in the `Arc`.
1598 ///
1599 /// # Examples
1600 ///
1601 /// ```
1602 /// use std::sync::Arc;
1603 ///
1604 /// let x = Arc::new("hello".to_owned());
1605 /// let y = Arc::clone(&x);
1606 /// let x_ptr = Arc::as_ptr(&x);
1607 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1608 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1609 /// ```
1610 #[must_use]
1611 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1612 #[rustc_never_returns_null_ptr]
1613 pub fn as_ptr(this: &Self) -> *const T {
1614 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1615
1616 // SAFETY: This cannot go through Deref::deref or RcInnerPtr::inner because
1617 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1618 // write through the pointer after the Rc is recovered through `from_raw`.
1619 unsafe { &raw mut (*ptr).data }
1620 }
1621
1622 /// Constructs an `Arc<T, A>` from a raw pointer.
1623 ///
1624 /// The raw pointer must have been previously returned by a call to [`Arc<U,
1625 /// A>::into_raw`][into_raw] with the following requirements:
1626 ///
1627 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1628 /// is trivially true if `U` is `T`.
1629 /// * If `U` is unsized, its data pointer must have the same size and
1630 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1631 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1632 /// coercion].
1633 ///
1634 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1635 /// and alignment, this is basically like transmuting references of
1636 /// different types. See [`mem::transmute`][transmute] for more information
1637 /// on what restrictions apply in this case.
1638 ///
1639 /// The raw pointer must point to a block of memory allocated by `alloc`
1640 ///
1641 /// The user of `from_raw` has to make sure a specific value of `T` is only
1642 /// dropped once.
1643 ///
1644 /// This function is unsafe because improper use may lead to memory unsafety,
1645 /// even if the returned `Arc<T>` is never accessed.
1646 ///
1647 /// [into_raw]: Arc::into_raw
1648 /// [transmute]: core::mem::transmute
1649 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1650 ///
1651 /// # Examples
1652 ///
1653 /// ```
1654 /// #![feature(allocator_api)]
1655 ///
1656 /// use std::sync::Arc;
1657 /// use std::alloc::System;
1658 ///
1659 /// let x = Arc::new_in("hello".to_owned(), System);
1660 /// let (x_ptr, alloc) = Arc::into_raw_with_allocator(x);
1661 ///
1662 /// unsafe {
1663 /// // Convert back to an `Arc` to prevent leak.
1664 /// let x = Arc::from_raw_in(x_ptr, System);
1665 /// assert_eq!(&*x, "hello");
1666 ///
1667 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1668 /// }
1669 ///
1670 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1671 /// ```
1672 ///
1673 /// Convert a slice back into its original array:
1674 ///
1675 /// ```
1676 /// #![feature(allocator_api)]
1677 ///
1678 /// use std::sync::Arc;
1679 /// use std::alloc::System;
1680 ///
1681 /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1682 /// let x_ptr: *const [u32] = Arc::into_raw_with_allocator(x).0;
1683 ///
1684 /// unsafe {
1685 /// let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1686 /// assert_eq!(&*x, &[1, 2, 3]);
1687 /// }
1688 /// ```
1689 #[inline]
1690 #[unstable(feature = "allocator_api", issue = "32838")]
1691 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1692 unsafe {
1693 let offset = data_offset(ptr);
1694
1695 // Reverse the offset to find the original ArcInner.
1696 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1697
1698 Self::from_ptr_in(arc_ptr, alloc)
1699 }
1700 }
1701
1702 /// Creates a new [`Weak`] pointer to this allocation.
1703 ///
1704 /// # Examples
1705 ///
1706 /// ```
1707 /// use std::sync::Arc;
1708 ///
1709 /// let five = Arc::new(5);
1710 ///
1711 /// let weak_five = Arc::downgrade(&five);
1712 /// ```
1713 #[must_use = "this returns a new `Weak` pointer, \
1714 without modifying the original `Arc`"]
1715 #[stable(feature = "arc_weak", since = "1.4.0")]
1716 pub fn downgrade(this: &Self) -> Weak<T, A>
1717 where
1718 A: Clone,
1719 {
1720 // This Relaxed is OK because we're checking the value in the CAS
1721 // below.
1722 let mut cur = this.inner().weak.load(Relaxed);
1723
1724 loop {
1725 // check if the weak counter is currently "locked"; if so, spin.
1726 if cur == usize::MAX {
1727 hint::spin_loop();
1728 cur = this.inner().weak.load(Relaxed);
1729 continue;
1730 }
1731
1732 // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1733 assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1734
1735 // NOTE: this code currently ignores the possibility of overflow
1736 // into usize::MAX; in general both Rc and Arc need to be adjusted
1737 // to deal with overflow.
1738
1739 // Unlike with Clone(), we need this to be an Acquire read to
1740 // synchronize with the write coming from `is_unique`, so that the
1741 // events prior to that write happen before this read.
1742 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1743 Ok(_) => {
1744 // Make sure we do not create a dangling Weak
1745 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1746 return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1747 }
1748 Err(old) => cur = old,
1749 }
1750 }
1751 }
1752
1753 /// Gets the number of [`Weak`] pointers to this allocation.
1754 ///
1755 /// # Safety
1756 ///
1757 /// This method by itself is safe, but using it correctly requires extra care.
1758 /// Another thread can change the weak count at any time,
1759 /// including potentially between calling this method and acting on the result.
1760 ///
1761 /// # Examples
1762 ///
1763 /// ```
1764 /// use std::sync::Arc;
1765 ///
1766 /// let five = Arc::new(5);
1767 /// let _weak_five = Arc::downgrade(&five);
1768 ///
1769 /// // This assertion is deterministic because we haven't shared
1770 /// // the `Arc` or `Weak` between threads.
1771 /// assert_eq!(1, Arc::weak_count(&five));
1772 /// ```
1773 #[inline]
1774 #[must_use]
1775 #[stable(feature = "arc_counts", since = "1.15.0")]
1776 pub fn weak_count(this: &Self) -> usize {
1777 let cnt = this.inner().weak.load(Relaxed);
1778 // If the weak count is currently locked, the value of the
1779 // count was 0 just before taking the lock.
1780 if cnt == usize::MAX { 0 } else { cnt - 1 }
1781 }
1782
1783 /// Gets the number of strong (`Arc`) pointers to this allocation.
1784 ///
1785 /// # Safety
1786 ///
1787 /// This method by itself is safe, but using it correctly requires extra care.
1788 /// Another thread can change the strong count at any time,
1789 /// including potentially between calling this method and acting on the result.
1790 ///
1791 /// # Examples
1792 ///
1793 /// ```
1794 /// use std::sync::Arc;
1795 ///
1796 /// let five = Arc::new(5);
1797 /// let _also_five = Arc::clone(&five);
1798 ///
1799 /// // This assertion is deterministic because we haven't shared
1800 /// // the `Arc` between threads.
1801 /// assert_eq!(2, Arc::strong_count(&five));
1802 /// ```
1803 #[inline]
1804 #[must_use]
1805 #[stable(feature = "arc_counts", since = "1.15.0")]
1806 pub fn strong_count(this: &Self) -> usize {
1807 this.inner().strong.load(Relaxed)
1808 }
1809
1810 /// Increments the strong reference count on the `Arc<T>` associated with the
1811 /// provided pointer by one.
1812 ///
1813 /// # Safety
1814 ///
1815 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1816 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1817 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1818 /// least 1) for the duration of this method, and `ptr` must point to a block of memory
1819 /// allocated by `alloc`.
1820 ///
1821 /// [from_raw_in]: Arc::from_raw_in
1822 ///
1823 /// # Examples
1824 ///
1825 /// ```
1826 /// #![feature(allocator_api)]
1827 ///
1828 /// use std::sync::Arc;
1829 /// use std::alloc::System;
1830 ///
1831 /// let five = Arc::new_in(5, System);
1832 ///
1833 /// unsafe {
1834 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1835 /// Arc::increment_strong_count_in(ptr, System);
1836 ///
1837 /// // This assertion is deterministic because we haven't shared
1838 /// // the `Arc` between threads.
1839 /// let five = Arc::from_raw_in(ptr, System);
1840 /// assert_eq!(2, Arc::strong_count(&five));
1841 /// # // Prevent leaks for Miri.
1842 /// # Arc::decrement_strong_count_in(ptr, System);
1843 /// }
1844 /// ```
1845 #[inline]
1846 #[unstable(feature = "allocator_api", issue = "32838")]
1847 pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1848 where
1849 A: Clone,
1850 {
1851 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1852 let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1853 // Now increase refcount, but don't drop new refcount either
1854 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1855 }
1856
1857 /// Decrements the strong reference count on the `Arc<T>` associated with the
1858 /// provided pointer by one.
1859 ///
1860 /// # Safety
1861 ///
1862 /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
1863 /// same layout requirements specified in [`Arc::from_raw_in`][from_raw_in].
1864 /// The associated `Arc` instance must be valid (i.e. the strong count must be at
1865 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1866 /// allocated by `alloc`. This method can be used to release the final
1867 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1868 /// released.
1869 ///
1870 /// [from_raw_in]: Arc::from_raw_in
1871 ///
1872 /// # Examples
1873 ///
1874 /// ```
1875 /// #![feature(allocator_api)]
1876 ///
1877 /// use std::sync::Arc;
1878 /// use std::alloc::System;
1879 ///
1880 /// let five = Arc::new_in(5, System);
1881 ///
1882 /// unsafe {
1883 /// let (ptr, _alloc) = Arc::into_raw_with_allocator(five);
1884 /// Arc::increment_strong_count_in(ptr, System);
1885 ///
1886 /// // Those assertions are deterministic because we haven't shared
1887 /// // the `Arc` between threads.
1888 /// let five = Arc::from_raw_in(ptr, System);
1889 /// assert_eq!(2, Arc::strong_count(&five));
1890 /// Arc::decrement_strong_count_in(ptr, System);
1891 /// assert_eq!(1, Arc::strong_count(&five));
1892 /// }
1893 /// ```
1894 #[inline]
1895 #[unstable(feature = "allocator_api", issue = "32838")]
1896 pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1897 unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1898 }
1899
1900 #[inline]
1901 fn inner(&self) -> &ArcInner<T> {
1902 // This unsafety is ok because while this arc is alive we're guaranteed
1903 // that the inner pointer is valid. Furthermore, we know that the
1904 // `ArcInner` structure itself is `Sync` because the inner data is
1905 // `Sync` as well, so we're ok loaning out an immutable pointer to these
1906 // contents.
1907 unsafe { self.ptr.as_ref() }
1908 }
1909
1910 // Non-inlined part of `drop`.
1911 #[inline(never)]
1912 unsafe fn drop_slow(&mut self) {
1913 // Drop the weak ref collectively held by all strong references when this
1914 // variable goes out of scope. This ensures that the memory is deallocated
1915 // even if the destructor of `T` panics.
1916 // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1917 // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1918 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
1919
1920 // Destroy the data at this time, even though we must not free the box
1921 // allocation itself (there might still be weak pointers lying around).
1922 // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1923 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1924 }
1925
1926 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1927 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
1928 ///
1929 /// # Examples
1930 ///
1931 /// ```
1932 /// use std::sync::Arc;
1933 ///
1934 /// let five = Arc::new(5);
1935 /// let same_five = Arc::clone(&five);
1936 /// let other_five = Arc::new(5);
1937 ///
1938 /// assert!(Arc::ptr_eq(&five, &same_five));
1939 /// assert!(!Arc::ptr_eq(&five, &other_five));
1940 /// ```
1941 ///
1942 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1943 #[inline]
1944 #[must_use]
1945 #[stable(feature = "ptr_eq", since = "1.17.0")]
1946 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1947 ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1948 }
1949}
1950
1951impl<T: ?Sized> Arc<T> {
1952 /// Allocates an `ArcInner<T>` with sufficient space for
1953 /// a possibly-unsized inner value where the value has the layout provided.
1954 ///
1955 /// The function `mem_to_arcinner` is called with the data pointer
1956 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1957 #[cfg(not(no_global_oom_handling))]
1958 unsafe fn allocate_for_layout(
1959 value_layout: Layout,
1960 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1961 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1962 ) -> *mut ArcInner<T> {
1963 let layout = arcinner_layout_for_value_layout(value_layout);
1964
1965 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1966
1967 unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1968 }
1969
1970 /// Allocates an `ArcInner<T>` with sufficient space for
1971 /// a possibly-unsized inner value where the value has the layout provided,
1972 /// returning an error if allocation fails.
1973 ///
1974 /// The function `mem_to_arcinner` is called with the data pointer
1975 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1976 unsafe fn try_allocate_for_layout(
1977 value_layout: Layout,
1978 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1979 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1980 ) -> Result<*mut ArcInner<T>, AllocError> {
1981 let layout = arcinner_layout_for_value_layout(value_layout);
1982
1983 let ptr = allocate(layout)?;
1984
1985 let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
1986
1987 Ok(inner)
1988 }
1989
1990 unsafe fn initialize_arcinner(
1991 ptr: NonNull<[u8]>,
1992 layout: Layout,
1993 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1994 ) -> *mut ArcInner<T> {
1995 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
1996 debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
1997
1998 unsafe {
1999 (&raw mut (*inner).strong).write(atomic::AtomicUsize::new(1));
2000 (&raw mut (*inner).weak).write(atomic::AtomicUsize::new(1));
2001 }
2002
2003 inner
2004 }
2005}
2006
2007impl<T: ?Sized, A: Allocator> Arc<T, A> {
2008 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
2009 #[inline]
2010 #[cfg(not(no_global_oom_handling))]
2011 unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
2012 // Allocate for the `ArcInner<T>` using the given value.
2013 unsafe {
2014 Arc::allocate_for_layout(
2015 Layout::for_value_raw(ptr),
2016 |layout| alloc.allocate(layout),
2017 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
2018 )
2019 }
2020 }
2021
2022 #[cfg(not(no_global_oom_handling))]
2023 fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
2024 unsafe {
2025 let value_size = size_of_val(&*src);
2026 let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
2027
2028 // Copy value as bytes
2029 ptr::copy_nonoverlapping(
2030 (&raw const *src) as *const u8,
2031 (&raw mut (*ptr).data) as *mut u8,
2032 value_size,
2033 );
2034
2035 // Free the allocation without dropping its contents
2036 let (bptr, alloc) = Box::into_raw_with_allocator(src);
2037 let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
2038 drop(src);
2039
2040 Self::from_ptr_in(ptr, alloc)
2041 }
2042 }
2043}
2044
2045impl<T> Arc<[T]> {
2046 /// Allocates an `ArcInner<[T]>` with the given length.
2047 #[cfg(not(no_global_oom_handling))]
2048 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
2049 unsafe {
2050 Self::allocate_for_layout(
2051 Layout::array::<T>(len).unwrap(),
2052 |layout| Global.allocate(layout),
2053 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2054 )
2055 }
2056 }
2057
2058 /// Copy elements from slice into newly allocated `Arc<[T]>`
2059 ///
2060 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
2061 #[cfg(not(no_global_oom_handling))]
2062 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
2063 unsafe {
2064 let ptr = Self::allocate_for_slice(v.len());
2065
2066 ptr::copy_nonoverlapping(v.as_ptr(), (&raw mut (*ptr).data) as *mut T, v.len());
2067
2068 Self::from_ptr(ptr)
2069 }
2070 }
2071
2072 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
2073 ///
2074 /// Behavior is undefined should the size be wrong.
2075 #[cfg(not(no_global_oom_handling))]
2076 unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
2077 // Panic guard while cloning T elements.
2078 // In the event of a panic, elements that have been written
2079 // into the new ArcInner will be dropped, then the memory freed.
2080 struct Guard<T> {
2081 mem: NonNull<u8>,
2082 elems: *mut T,
2083 layout: Layout,
2084 n_elems: usize,
2085 }
2086
2087 impl<T> Drop for Guard<T> {
2088 fn drop(&mut self) {
2089 unsafe {
2090 let slice = from_raw_parts_mut(self.elems, self.n_elems);
2091 ptr::drop_in_place(slice);
2092
2093 Global.deallocate(self.mem, self.layout);
2094 }
2095 }
2096 }
2097
2098 unsafe {
2099 let ptr = Self::allocate_for_slice(len);
2100
2101 let mem = ptr as *mut _ as *mut u8;
2102 let layout = Layout::for_value_raw(ptr);
2103
2104 // Pointer to first element
2105 let elems = (&raw mut (*ptr).data) as *mut T;
2106
2107 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2108
2109 for (i, item) in iter.enumerate() {
2110 ptr::write(elems.add(i), item);
2111 guard.n_elems += 1;
2112 }
2113
2114 // All clear. Forget the guard so it doesn't free the new ArcInner.
2115 mem::forget(guard);
2116
2117 Self::from_ptr(ptr)
2118 }
2119 }
2120}
2121
2122impl<T, A: Allocator> Arc<[T], A> {
2123 /// Allocates an `ArcInner<[T]>` with the given length.
2124 #[inline]
2125 #[cfg(not(no_global_oom_handling))]
2126 unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2127 unsafe {
2128 Arc::allocate_for_layout(
2129 Layout::array::<T>(len).unwrap(),
2130 |layout| alloc.allocate(layout),
2131 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2132 )
2133 }
2134 }
2135}
2136
2137/// Specialization trait used for `From<&[T]>`.
2138#[cfg(not(no_global_oom_handling))]
2139trait ArcFromSlice<T> {
2140 fn from_slice(slice: &[T]) -> Self;
2141}
2142
2143#[cfg(not(no_global_oom_handling))]
2144impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2145 #[inline]
2146 default fn from_slice(v: &[T]) -> Self {
2147 unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2148 }
2149}
2150
2151#[cfg(not(no_global_oom_handling))]
2152impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2153 #[inline]
2154 fn from_slice(v: &[T]) -> Self {
2155 unsafe { Arc::copy_from_slice(v) }
2156 }
2157}
2158
2159#[stable(feature = "rust1", since = "1.0.0")]
2160impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2161 /// Makes a clone of the `Arc` pointer.
2162 ///
2163 /// This creates another pointer to the same allocation, increasing the
2164 /// strong reference count.
2165 ///
2166 /// # Examples
2167 ///
2168 /// ```
2169 /// use std::sync::Arc;
2170 ///
2171 /// let five = Arc::new(5);
2172 ///
2173 /// let _ = Arc::clone(&five);
2174 /// ```
2175 #[inline]
2176 fn clone(&self) -> Arc<T, A> {
2177 // Using a relaxed ordering is alright here, as knowledge of the
2178 // original reference prevents other threads from erroneously deleting
2179 // the object.
2180 //
2181 // As explained in the [Boost documentation][1], Increasing the
2182 // reference counter can always be done with memory_order_relaxed: New
2183 // references to an object can only be formed from an existing
2184 // reference, and passing an existing reference from one thread to
2185 // another must already provide any required synchronization.
2186 //
2187 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2188 let old_size = self.inner().strong.fetch_add(1, Relaxed);
2189
2190 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2191 // Arcs. If we don't do this the count can overflow and users will use-after free. This
2192 // branch will never be taken in any realistic program. We abort because such a program is
2193 // incredibly degenerate, and we don't care to support it.
2194 //
2195 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2196 // But we do that check *after* having done the increment, so there is a chance here that
2197 // the worst already happened and we actually do overflow the `usize` counter. However, that
2198 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2199 // above and the `abort` below, which seems exceedingly unlikely.
2200 //
2201 // This is a global invariant, and also applies when using a compare-exchange loop to increment
2202 // counters in other methods.
2203 // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2204 // and then overflow using a few `fetch_add`s.
2205 if old_size > MAX_REFCOUNT {
2206 abort();
2207 }
2208
2209 unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2210 }
2211}
2212
2213#[unstable(feature = "ergonomic_clones", issue = "132290")]
2214impl<T: ?Sized, A: Allocator + Clone> UseCloned for Arc<T, A> {}
2215
2216#[stable(feature = "rust1", since = "1.0.0")]
2217impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2218 type Target = T;
2219
2220 #[inline]
2221 fn deref(&self) -> &T {
2222 &self.inner().data
2223 }
2224}
2225
2226#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2227unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Arc<T, A> {}
2228
2229#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
2230unsafe impl<T: ?Sized, A: Allocator> PinCoerceUnsized for Weak<T, A> {}
2231
2232#[unstable(feature = "deref_pure_trait", issue = "87121")]
2233unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2234
2235#[unstable(feature = "legacy_receiver_trait", issue = "none")]
2236impl<T: ?Sized> LegacyReceiver for Arc<T> {}
2237
2238#[cfg(not(no_global_oom_handling))]
2239impl<T: ?Sized + CloneToUninit, A: Allocator + Clone> Arc<T, A> {
2240 /// Makes a mutable reference into the given `Arc`.
2241 ///
2242 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2243 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
2244 /// referred to as clone-on-write.
2245 ///
2246 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2247 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2248 /// be cloned.
2249 ///
2250 /// See also [`get_mut`], which will fail rather than cloning the inner value
2251 /// or dissociating [`Weak`] pointers.
2252 ///
2253 /// [`clone`]: Clone::clone
2254 /// [`get_mut`]: Arc::get_mut
2255 ///
2256 /// # Examples
2257 ///
2258 /// ```
2259 /// use std::sync::Arc;
2260 ///
2261 /// let mut data = Arc::new(5);
2262 ///
2263 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2264 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2265 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
2266 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2267 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
2268 ///
2269 /// // Now `data` and `other_data` point to different allocations.
2270 /// assert_eq!(*data, 8);
2271 /// assert_eq!(*other_data, 12);
2272 /// ```
2273 ///
2274 /// [`Weak`] pointers will be dissociated:
2275 ///
2276 /// ```
2277 /// use std::sync::Arc;
2278 ///
2279 /// let mut data = Arc::new(75);
2280 /// let weak = Arc::downgrade(&data);
2281 ///
2282 /// assert!(75 == *data);
2283 /// assert!(75 == *weak.upgrade().unwrap());
2284 ///
2285 /// *Arc::make_mut(&mut data) += 1;
2286 ///
2287 /// assert!(76 == *data);
2288 /// assert!(weak.upgrade().is_none());
2289 /// ```
2290 #[inline]
2291 #[stable(feature = "arc_unique", since = "1.4.0")]
2292 pub fn make_mut(this: &mut Self) -> &mut T {
2293 let size_of_val = size_of_val::<T>(&**this);
2294
2295 // Note that we hold both a strong reference and a weak reference.
2296 // Thus, releasing our strong reference only will not, by itself, cause
2297 // the memory to be deallocated.
2298 //
2299 // Use Acquire to ensure that we see any writes to `weak` that happen
2300 // before release writes (i.e., decrements) to `strong`. Since we hold a
2301 // weak count, there's no chance the ArcInner itself could be
2302 // deallocated.
2303 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2304 // Another strong pointer exists, so we must clone.
2305
2306 let this_data_ref: &T = &**this;
2307 // `in_progress` drops the allocation if we panic before finishing initializing it.
2308 let mut in_progress: UniqueArcUninit<T, A> =
2309 UniqueArcUninit::new(this_data_ref, this.alloc.clone());
2310
2311 let initialized_clone = unsafe {
2312 // Clone. If the clone panics, `in_progress` will be dropped and clean up.
2313 this_data_ref.clone_to_uninit(in_progress.data_ptr().cast());
2314 // Cast type of pointer, now that it is initialized.
2315 in_progress.into_arc()
2316 };
2317 *this = initialized_clone;
2318 } else if this.inner().weak.load(Relaxed) != 1 {
2319 // Relaxed suffices in the above because this is fundamentally an
2320 // optimization: we are always racing with weak pointers being
2321 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2322
2323 // We removed the last strong ref, but there are additional weak
2324 // refs remaining. We'll move the contents to a new Arc, and
2325 // invalidate the other weak refs.
2326
2327 // Note that it is not possible for the read of `weak` to yield
2328 // usize::MAX (i.e., locked), since the weak count can only be
2329 // locked by a thread with a strong reference.
2330
2331 // Materialize our own implicit weak pointer, so that it can clean
2332 // up the ArcInner as needed.
2333 let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2334
2335 // Can just steal the data, all that's left is Weaks
2336 //
2337 // We don't need panic-protection like the above branch does, but we might as well
2338 // use the same mechanism.
2339 let mut in_progress: UniqueArcUninit<T, A> =
2340 UniqueArcUninit::new(&**this, this.alloc.clone());
2341 unsafe {
2342 // Initialize `in_progress` with move of **this.
2343 // We have to express this in terms of bytes because `T: ?Sized`; there is no
2344 // operation that just copies a value based on its `size_of_val()`.
2345 ptr::copy_nonoverlapping(
2346 ptr::from_ref(&**this).cast::<u8>(),
2347 in_progress.data_ptr().cast::<u8>(),
2348 size_of_val,
2349 );
2350
2351 ptr::write(this, in_progress.into_arc());
2352 }
2353 } else {
2354 // We were the sole reference of either kind; bump back up the
2355 // strong ref count.
2356 this.inner().strong.store(1, Release);
2357 }
2358
2359 // As with `get_mut()`, the unsafety is ok because our reference was
2360 // either unique to begin with, or became one upon cloning the contents.
2361 unsafe { Self::get_mut_unchecked(this) }
2362 }
2363}
2364
2365impl<T: Clone, A: Allocator> Arc<T, A> {
2366 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2367 /// clone.
2368 ///
2369 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2370 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2371 ///
2372 /// # Examples
2373 ///
2374 /// ```
2375 /// # use std::{ptr, sync::Arc};
2376 /// let inner = String::from("test");
2377 /// let ptr = inner.as_ptr();
2378 ///
2379 /// let arc = Arc::new(inner);
2380 /// let inner = Arc::unwrap_or_clone(arc);
2381 /// // The inner value was not cloned
2382 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2383 ///
2384 /// let arc = Arc::new(inner);
2385 /// let arc2 = arc.clone();
2386 /// let inner = Arc::unwrap_or_clone(arc);
2387 /// // Because there were 2 references, we had to clone the inner value.
2388 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2389 /// // `arc2` is the last reference, so when we unwrap it we get back
2390 /// // the original `String`.
2391 /// let inner = Arc::unwrap_or_clone(arc2);
2392 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2393 /// ```
2394 #[inline]
2395 #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2396 pub fn unwrap_or_clone(this: Self) -> T {
2397 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2398 }
2399}
2400
2401impl<T: ?Sized, A: Allocator> Arc<T, A> {
2402 /// Returns a mutable reference into the given `Arc`, if there are
2403 /// no other `Arc` or [`Weak`] pointers to the same allocation.
2404 ///
2405 /// Returns [`None`] otherwise, because it is not safe to
2406 /// mutate a shared value.
2407 ///
2408 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2409 /// the inner value when there are other `Arc` pointers.
2410 ///
2411 /// [make_mut]: Arc::make_mut
2412 /// [clone]: Clone::clone
2413 ///
2414 /// # Examples
2415 ///
2416 /// ```
2417 /// use std::sync::Arc;
2418 ///
2419 /// let mut x = Arc::new(3);
2420 /// *Arc::get_mut(&mut x).unwrap() = 4;
2421 /// assert_eq!(*x, 4);
2422 ///
2423 /// let _y = Arc::clone(&x);
2424 /// assert!(Arc::get_mut(&mut x).is_none());
2425 /// ```
2426 #[inline]
2427 #[stable(feature = "arc_unique", since = "1.4.0")]
2428 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2429 if Self::is_unique(this) {
2430 // This unsafety is ok because we're guaranteed that the pointer
2431 // returned is the *only* pointer that will ever be returned to T. Our
2432 // reference count is guaranteed to be 1 at this point, and we required
2433 // the Arc itself to be `mut`, so we're returning the only possible
2434 // reference to the inner data.
2435 unsafe { Some(Arc::get_mut_unchecked(this)) }
2436 } else {
2437 None
2438 }
2439 }
2440
2441 /// Returns a mutable reference into the given `Arc`,
2442 /// without any check.
2443 ///
2444 /// See also [`get_mut`], which is safe and does appropriate checks.
2445 ///
2446 /// [`get_mut`]: Arc::get_mut
2447 ///
2448 /// # Safety
2449 ///
2450 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2451 /// they must not be dereferenced or have active borrows for the duration
2452 /// of the returned borrow, and their inner type must be exactly the same as the
2453 /// inner type of this Rc (including lifetimes). This is trivially the case if no
2454 /// such pointers exist, for example immediately after `Arc::new`.
2455 ///
2456 /// # Examples
2457 ///
2458 /// ```
2459 /// #![feature(get_mut_unchecked)]
2460 ///
2461 /// use std::sync::Arc;
2462 ///
2463 /// let mut x = Arc::new(String::new());
2464 /// unsafe {
2465 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
2466 /// }
2467 /// assert_eq!(*x, "foo");
2468 /// ```
2469 /// Other `Arc` pointers to the same allocation must be to the same type.
2470 /// ```no_run
2471 /// #![feature(get_mut_unchecked)]
2472 ///
2473 /// use std::sync::Arc;
2474 ///
2475 /// let x: Arc<str> = Arc::from("Hello, world!");
2476 /// let mut y: Arc<[u8]> = x.clone().into();
2477 /// unsafe {
2478 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
2479 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2480 /// }
2481 /// println!("{}", &*x); // Invalid UTF-8 in a str
2482 /// ```
2483 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2484 /// ```no_run
2485 /// #![feature(get_mut_unchecked)]
2486 ///
2487 /// use std::sync::Arc;
2488 ///
2489 /// let x: Arc<&str> = Arc::new("Hello, world!");
2490 /// {
2491 /// let s = String::from("Oh, no!");
2492 /// let mut y: Arc<&str> = x.clone();
2493 /// unsafe {
2494 /// // this is Undefined Behavior, because x's inner type
2495 /// // is &'long str, not &'short str
2496 /// *Arc::get_mut_unchecked(&mut y) = &s;
2497 /// }
2498 /// }
2499 /// println!("{}", &*x); // Use-after-free
2500 /// ```
2501 #[inline]
2502 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2503 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2504 // We are careful to *not* create a reference covering the "count" fields, as
2505 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2506 unsafe { &mut (*this.ptr.as_ptr()).data }
2507 }
2508
2509 /// Determine whether this is the unique reference to the underlying data.
2510 ///
2511 /// Returns `true` if there are no other `Arc` or [`Weak`] pointers to the same allocation;
2512 /// returns `false` otherwise.
2513 ///
2514 /// If this function returns `true`, then is guaranteed to be safe to call [`get_mut_unchecked`]
2515 /// on this `Arc`, so long as no clones occur in between.
2516 ///
2517 /// # Examples
2518 ///
2519 /// ```
2520 /// #![feature(arc_is_unique)]
2521 ///
2522 /// use std::sync::Arc;
2523 ///
2524 /// let x = Arc::new(3);
2525 /// assert!(Arc::is_unique(&x));
2526 ///
2527 /// let y = Arc::clone(&x);
2528 /// assert!(!Arc::is_unique(&x));
2529 /// drop(y);
2530 ///
2531 /// // Weak references also count, because they could be upgraded at any time.
2532 /// let z = Arc::downgrade(&x);
2533 /// assert!(!Arc::is_unique(&x));
2534 /// ```
2535 ///
2536 /// # Pointer invalidation
2537 ///
2538 /// This function will always return the same value as `Arc::get_mut(arc).is_some()`. However,
2539 /// unlike that operation it does not produce any mutable references to the underlying data,
2540 /// meaning no pointers to the data inside the `Arc` are invalidated by the call. Thus, the
2541 /// following code is valid, even though it would be UB if it used `Arc::get_mut`:
2542 ///
2543 /// ```
2544 /// #![feature(arc_is_unique)]
2545 ///
2546 /// use std::sync::Arc;
2547 ///
2548 /// let arc = Arc::new(5);
2549 /// let pointer: *const i32 = &*arc;
2550 /// assert!(Arc::is_unique(&arc));
2551 /// assert_eq!(unsafe { *pointer }, 5);
2552 /// ```
2553 ///
2554 /// # Atomic orderings
2555 ///
2556 /// Concurrent drops to other `Arc` pointers to the same allocation will synchronize with this
2557 /// call - that is, this call performs an `Acquire` operation on the underlying strong and weak
2558 /// ref counts. This ensures that calling `get_mut_unchecked` is safe.
2559 ///
2560 /// Note that this operation requires locking the weak ref count, so concurrent calls to
2561 /// `downgrade` may spin-loop for a short period of time.
2562 ///
2563 /// [`get_mut_unchecked`]: Self::get_mut_unchecked
2564 #[inline]
2565 #[unstable(feature = "arc_is_unique", issue = "138938")]
2566 pub fn is_unique(this: &Self) -> bool {
2567 // lock the weak pointer count if we appear to be the sole weak pointer
2568 // holder.
2569 //
2570 // The acquire label here ensures a happens-before relationship with any
2571 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2572 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2573 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2574 if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2575 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2576 // counter in `drop` -- the only access that happens when any but the last reference
2577 // is being dropped.
2578 let unique = this.inner().strong.load(Acquire) == 1;
2579
2580 // The release write here synchronizes with a read in `downgrade`,
2581 // effectively preventing the above read of `strong` from happening
2582 // after the write.
2583 this.inner().weak.store(1, Release); // release the lock
2584 unique
2585 } else {
2586 false
2587 }
2588 }
2589}
2590
2591#[stable(feature = "rust1", since = "1.0.0")]
2592unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2593 /// Drops the `Arc`.
2594 ///
2595 /// This will decrement the strong reference count. If the strong reference
2596 /// count reaches zero then the only other references (if any) are
2597 /// [`Weak`], so we `drop` the inner value.
2598 ///
2599 /// # Examples
2600 ///
2601 /// ```
2602 /// use std::sync::Arc;
2603 ///
2604 /// struct Foo;
2605 ///
2606 /// impl Drop for Foo {
2607 /// fn drop(&mut self) {
2608 /// println!("dropped!");
2609 /// }
2610 /// }
2611 ///
2612 /// let foo = Arc::new(Foo);
2613 /// let foo2 = Arc::clone(&foo);
2614 ///
2615 /// drop(foo); // Doesn't print anything
2616 /// drop(foo2); // Prints "dropped!"
2617 /// ```
2618 #[inline]
2619 fn drop(&mut self) {
2620 // Because `fetch_sub` is already atomic, we do not need to synchronize
2621 // with other threads unless we are going to delete the object. This
2622 // same logic applies to the below `fetch_sub` to the `weak` count.
2623 if self.inner().strong.fetch_sub(1, Release) != 1 {
2624 return;
2625 }
2626
2627 // This fence is needed to prevent reordering of use of the data and
2628 // deletion of the data. Because it is marked `Release`, the decreasing
2629 // of the reference count synchronizes with this `Acquire` fence. This
2630 // means that use of the data happens before decreasing the reference
2631 // count, which happens before this fence, which happens before the
2632 // deletion of the data.
2633 //
2634 // As explained in the [Boost documentation][1],
2635 //
2636 // > It is important to enforce any possible access to the object in one
2637 // > thread (through an existing reference) to *happen before* deleting
2638 // > the object in a different thread. This is achieved by a "release"
2639 // > operation after dropping a reference (any access to the object
2640 // > through this reference must obviously happened before), and an
2641 // > "acquire" operation before deleting the object.
2642 //
2643 // In particular, while the contents of an Arc are usually immutable, it's
2644 // possible to have interior writes to something like a Mutex<T>. Since a
2645 // Mutex is not acquired when it is deleted, we can't rely on its
2646 // synchronization logic to make writes in thread A visible to a destructor
2647 // running in thread B.
2648 //
2649 // Also note that the Acquire fence here could probably be replaced with an
2650 // Acquire load, which could improve performance in highly-contended
2651 // situations. See [2].
2652 //
2653 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2654 // [2]: (https://github.com/rust-lang/rust/pull/41714)
2655 acquire!(self.inner().strong);
2656
2657 // Make sure we aren't trying to "drop" the shared static for empty slices
2658 // used by Default::default.
2659 debug_assert!(
2660 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
2661 "Arcs backed by a static should never reach a strong count of 0. \
2662 Likely decrement_strong_count or from_raw were called too many times.",
2663 );
2664
2665 unsafe {
2666 self.drop_slow();
2667 }
2668 }
2669}
2670
2671impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2672 /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2673 ///
2674 /// # Examples
2675 ///
2676 /// ```
2677 /// use std::any::Any;
2678 /// use std::sync::Arc;
2679 ///
2680 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2681 /// if let Ok(string) = value.downcast::<String>() {
2682 /// println!("String ({}): {}", string.len(), string);
2683 /// }
2684 /// }
2685 ///
2686 /// let my_string = "Hello World".to_string();
2687 /// print_if_string(Arc::new(my_string));
2688 /// print_if_string(Arc::new(0i8));
2689 /// ```
2690 #[inline]
2691 #[stable(feature = "rc_downcast", since = "1.29.0")]
2692 pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2693 where
2694 T: Any + Send + Sync,
2695 {
2696 if (*self).is::<T>() {
2697 unsafe {
2698 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2699 Ok(Arc::from_inner_in(ptr.cast(), alloc))
2700 }
2701 } else {
2702 Err(self)
2703 }
2704 }
2705
2706 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2707 ///
2708 /// For a safe alternative see [`downcast`].
2709 ///
2710 /// # Examples
2711 ///
2712 /// ```
2713 /// #![feature(downcast_unchecked)]
2714 ///
2715 /// use std::any::Any;
2716 /// use std::sync::Arc;
2717 ///
2718 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2719 ///
2720 /// unsafe {
2721 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2722 /// }
2723 /// ```
2724 ///
2725 /// # Safety
2726 ///
2727 /// The contained value must be of type `T`. Calling this method
2728 /// with the incorrect type is *undefined behavior*.
2729 ///
2730 ///
2731 /// [`downcast`]: Self::downcast
2732 #[inline]
2733 #[unstable(feature = "downcast_unchecked", issue = "90850")]
2734 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2735 where
2736 T: Any + Send + Sync,
2737 {
2738 unsafe {
2739 let (ptr, alloc) = Arc::into_inner_with_allocator(self);
2740 Arc::from_inner_in(ptr.cast(), alloc)
2741 }
2742 }
2743}
2744
2745impl<T> Weak<T> {
2746 /// Constructs a new `Weak<T>`, without allocating any memory.
2747 /// Calling [`upgrade`] on the return value always gives [`None`].
2748 ///
2749 /// [`upgrade`]: Weak::upgrade
2750 ///
2751 /// # Examples
2752 ///
2753 /// ```
2754 /// use std::sync::Weak;
2755 ///
2756 /// let empty: Weak<i64> = Weak::new();
2757 /// assert!(empty.upgrade().is_none());
2758 /// ```
2759 #[inline]
2760 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2761 #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2762 #[must_use]
2763 pub const fn new() -> Weak<T> {
2764 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc: Global }
2765 }
2766}
2767
2768impl<T, A: Allocator> Weak<T, A> {
2769 /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2770 /// allocator.
2771 /// Calling [`upgrade`] on the return value always gives [`None`].
2772 ///
2773 /// [`upgrade`]: Weak::upgrade
2774 ///
2775 /// # Examples
2776 ///
2777 /// ```
2778 /// #![feature(allocator_api)]
2779 ///
2780 /// use std::sync::Weak;
2781 /// use std::alloc::System;
2782 ///
2783 /// let empty: Weak<i64, _> = Weak::new_in(System);
2784 /// assert!(empty.upgrade().is_none());
2785 /// ```
2786 #[inline]
2787 #[unstable(feature = "allocator_api", issue = "32838")]
2788 pub fn new_in(alloc: A) -> Weak<T, A> {
2789 Weak { ptr: NonNull::without_provenance(NonZeroUsize::MAX), alloc }
2790 }
2791}
2792
2793/// Helper type to allow accessing the reference counts without
2794/// making any assertions about the data field.
2795struct WeakInner<'a> {
2796 weak: &'a Atomic<usize>,
2797 strong: &'a Atomic<usize>,
2798}
2799
2800impl<T: ?Sized> Weak<T> {
2801 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2802 ///
2803 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2804 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2805 ///
2806 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2807 /// as these don't own anything; the method still works on them).
2808 ///
2809 /// # Safety
2810 ///
2811 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2812 /// weak reference, and must point to a block of memory allocated by global allocator.
2813 ///
2814 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2815 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2816 /// count is not modified by this operation) and therefore it must be paired with a previous
2817 /// call to [`into_raw`].
2818 /// # Examples
2819 ///
2820 /// ```
2821 /// use std::sync::{Arc, Weak};
2822 ///
2823 /// let strong = Arc::new("hello".to_owned());
2824 ///
2825 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2826 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2827 ///
2828 /// assert_eq!(2, Arc::weak_count(&strong));
2829 ///
2830 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2831 /// assert_eq!(1, Arc::weak_count(&strong));
2832 ///
2833 /// drop(strong);
2834 ///
2835 /// // Decrement the last weak count.
2836 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2837 /// ```
2838 ///
2839 /// [`new`]: Weak::new
2840 /// [`into_raw`]: Weak::into_raw
2841 /// [`upgrade`]: Weak::upgrade
2842 #[inline]
2843 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2844 pub unsafe fn from_raw(ptr: *const T) -> Self {
2845 unsafe { Weak::from_raw_in(ptr, Global) }
2846 }
2847
2848 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2849 ///
2850 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2851 /// one weak reference (the weak count is not modified by this operation). It can be turned
2852 /// back into the `Weak<T>` with [`from_raw`].
2853 ///
2854 /// The same restrictions of accessing the target of the pointer as with
2855 /// [`as_ptr`] apply.
2856 ///
2857 /// # Examples
2858 ///
2859 /// ```
2860 /// use std::sync::{Arc, Weak};
2861 ///
2862 /// let strong = Arc::new("hello".to_owned());
2863 /// let weak = Arc::downgrade(&strong);
2864 /// let raw = weak.into_raw();
2865 ///
2866 /// assert_eq!(1, Arc::weak_count(&strong));
2867 /// assert_eq!("hello", unsafe { &*raw });
2868 ///
2869 /// drop(unsafe { Weak::from_raw(raw) });
2870 /// assert_eq!(0, Arc::weak_count(&strong));
2871 /// ```
2872 ///
2873 /// [`from_raw`]: Weak::from_raw
2874 /// [`as_ptr`]: Weak::as_ptr
2875 #[must_use = "losing the pointer will leak memory"]
2876 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2877 pub fn into_raw(self) -> *const T {
2878 ManuallyDrop::new(self).as_ptr()
2879 }
2880}
2881
2882impl<T: ?Sized, A: Allocator> Weak<T, A> {
2883 /// Returns a reference to the underlying allocator.
2884 #[inline]
2885 #[unstable(feature = "allocator_api", issue = "32838")]
2886 pub fn allocator(&self) -> &A {
2887 &self.alloc
2888 }
2889
2890 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2891 ///
2892 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2893 /// unaligned or even [`null`] otherwise.
2894 ///
2895 /// # Examples
2896 ///
2897 /// ```
2898 /// use std::sync::Arc;
2899 /// use std::ptr;
2900 ///
2901 /// let strong = Arc::new("hello".to_owned());
2902 /// let weak = Arc::downgrade(&strong);
2903 /// // Both point to the same object
2904 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2905 /// // The strong here keeps it alive, so we can still access the object.
2906 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2907 ///
2908 /// drop(strong);
2909 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2910 /// // undefined behavior.
2911 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2912 /// ```
2913 ///
2914 /// [`null`]: core::ptr::null "ptr::null"
2915 #[must_use]
2916 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2917 pub fn as_ptr(&self) -> *const T {
2918 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2919
2920 if is_dangling(ptr) {
2921 // If the pointer is dangling, we return the sentinel directly. This cannot be
2922 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2923 ptr as *const T
2924 } else {
2925 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2926 // The payload may be dropped at this point, and we have to maintain provenance,
2927 // so use raw pointer manipulation.
2928 unsafe { &raw mut (*ptr).data }
2929 }
2930 }
2931
2932 /// Consumes the `Weak<T>`, returning the wrapped pointer and allocator.
2933 ///
2934 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2935 /// one weak reference (the weak count is not modified by this operation). It can be turned
2936 /// back into the `Weak<T>` with [`from_raw_in`].
2937 ///
2938 /// The same restrictions of accessing the target of the pointer as with
2939 /// [`as_ptr`] apply.
2940 ///
2941 /// # Examples
2942 ///
2943 /// ```
2944 /// #![feature(allocator_api)]
2945 /// use std::sync::{Arc, Weak};
2946 /// use std::alloc::System;
2947 ///
2948 /// let strong = Arc::new_in("hello".to_owned(), System);
2949 /// let weak = Arc::downgrade(&strong);
2950 /// let (raw, alloc) = weak.into_raw_with_allocator();
2951 ///
2952 /// assert_eq!(1, Arc::weak_count(&strong));
2953 /// assert_eq!("hello", unsafe { &*raw });
2954 ///
2955 /// drop(unsafe { Weak::from_raw_in(raw, alloc) });
2956 /// assert_eq!(0, Arc::weak_count(&strong));
2957 /// ```
2958 ///
2959 /// [`from_raw_in`]: Weak::from_raw_in
2960 /// [`as_ptr`]: Weak::as_ptr
2961 #[must_use = "losing the pointer will leak memory"]
2962 #[unstable(feature = "allocator_api", issue = "32838")]
2963 pub fn into_raw_with_allocator(self) -> (*const T, A) {
2964 let this = mem::ManuallyDrop::new(self);
2965 let result = this.as_ptr();
2966 // Safety: `this` is ManuallyDrop so the allocator will not be double-dropped
2967 let alloc = unsafe { ptr::read(&this.alloc) };
2968 (result, alloc)
2969 }
2970
2971 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2972 /// allocator.
2973 ///
2974 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2975 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2976 ///
2977 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2978 /// as these don't own anything; the method still works on them).
2979 ///
2980 /// # Safety
2981 ///
2982 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2983 /// weak reference, and must point to a block of memory allocated by `alloc`.
2984 ///
2985 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2986 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2987 /// count is not modified by this operation) and therefore it must be paired with a previous
2988 /// call to [`into_raw`].
2989 /// # Examples
2990 ///
2991 /// ```
2992 /// use std::sync::{Arc, Weak};
2993 ///
2994 /// let strong = Arc::new("hello".to_owned());
2995 ///
2996 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2997 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2998 ///
2999 /// assert_eq!(2, Arc::weak_count(&strong));
3000 ///
3001 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
3002 /// assert_eq!(1, Arc::weak_count(&strong));
3003 ///
3004 /// drop(strong);
3005 ///
3006 /// // Decrement the last weak count.
3007 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
3008 /// ```
3009 ///
3010 /// [`new`]: Weak::new
3011 /// [`into_raw`]: Weak::into_raw
3012 /// [`upgrade`]: Weak::upgrade
3013 #[inline]
3014 #[unstable(feature = "allocator_api", issue = "32838")]
3015 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
3016 // See Weak::as_ptr for context on how the input pointer is derived.
3017
3018 let ptr = if is_dangling(ptr) {
3019 // This is a dangling Weak.
3020 ptr as *mut ArcInner<T>
3021 } else {
3022 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
3023 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
3024 let offset = unsafe { data_offset(ptr) };
3025 // Thus, we reverse the offset to get the whole RcInner.
3026 // SAFETY: the pointer originated from a Weak, so this offset is safe.
3027 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
3028 };
3029
3030 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
3031 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
3032 }
3033}
3034
3035impl<T: ?Sized, A: Allocator> Weak<T, A> {
3036 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
3037 /// dropping of the inner value if successful.
3038 ///
3039 /// Returns [`None`] if the inner value has since been dropped.
3040 ///
3041 /// # Examples
3042 ///
3043 /// ```
3044 /// use std::sync::Arc;
3045 ///
3046 /// let five = Arc::new(5);
3047 ///
3048 /// let weak_five = Arc::downgrade(&five);
3049 ///
3050 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
3051 /// assert!(strong_five.is_some());
3052 ///
3053 /// // Destroy all strong pointers.
3054 /// drop(strong_five);
3055 /// drop(five);
3056 ///
3057 /// assert!(weak_five.upgrade().is_none());
3058 /// ```
3059 #[must_use = "this returns a new `Arc`, \
3060 without modifying the original weak pointer"]
3061 #[stable(feature = "arc_weak", since = "1.4.0")]
3062 pub fn upgrade(&self) -> Option<Arc<T, A>>
3063 where
3064 A: Clone,
3065 {
3066 #[inline]
3067 fn checked_increment(n: usize) -> Option<usize> {
3068 // Any write of 0 we can observe leaves the field in permanently zero state.
3069 if n == 0 {
3070 return None;
3071 }
3072 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
3073 assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
3074 Some(n + 1)
3075 }
3076
3077 // We use a CAS loop to increment the strong count instead of a
3078 // fetch_add as this function should never take the reference count
3079 // from zero to one.
3080 //
3081 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
3082 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
3083 // value can be initialized after `Weak` references have already been created. In that case, we
3084 // expect to observe the fully initialized value.
3085 if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
3086 // SAFETY: pointer is not null, verified in checked_increment
3087 unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
3088 } else {
3089 None
3090 }
3091 }
3092
3093 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
3094 ///
3095 /// If `self` was created using [`Weak::new`], this will return 0.
3096 #[must_use]
3097 #[stable(feature = "weak_counts", since = "1.41.0")]
3098 pub fn strong_count(&self) -> usize {
3099 if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
3100 }
3101
3102 /// Gets an approximation of the number of `Weak` pointers pointing to this
3103 /// allocation.
3104 ///
3105 /// If `self` was created using [`Weak::new`], or if there are no remaining
3106 /// strong pointers, this will return 0.
3107 ///
3108 /// # Accuracy
3109 ///
3110 /// Due to implementation details, the returned value can be off by 1 in
3111 /// either direction when other threads are manipulating any `Arc`s or
3112 /// `Weak`s pointing to the same allocation.
3113 #[must_use]
3114 #[stable(feature = "weak_counts", since = "1.41.0")]
3115 pub fn weak_count(&self) -> usize {
3116 if let Some(inner) = self.inner() {
3117 let weak = inner.weak.load(Acquire);
3118 let strong = inner.strong.load(Relaxed);
3119 if strong == 0 {
3120 0
3121 } else {
3122 // Since we observed that there was at least one strong pointer
3123 // after reading the weak count, we know that the implicit weak
3124 // reference (present whenever any strong references are alive)
3125 // was still around when we observed the weak count, and can
3126 // therefore safely subtract it.
3127 weak - 1
3128 }
3129 } else {
3130 0
3131 }
3132 }
3133
3134 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
3135 /// (i.e., when this `Weak` was created by `Weak::new`).
3136 #[inline]
3137 fn inner(&self) -> Option<WeakInner<'_>> {
3138 let ptr = self.ptr.as_ptr();
3139 if is_dangling(ptr) {
3140 None
3141 } else {
3142 // We are careful to *not* create a reference covering the "data" field, as
3143 // the field may be mutated concurrently (for example, if the last `Arc`
3144 // is dropped, the data field will be dropped in-place).
3145 Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
3146 }
3147 }
3148
3149 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
3150 /// both don't point to any allocation (because they were created with `Weak::new()`). However,
3151 /// this function ignores the metadata of `dyn Trait` pointers.
3152 ///
3153 /// # Notes
3154 ///
3155 /// Since this compares pointers it means that `Weak::new()` will equal each
3156 /// other, even though they don't point to any allocation.
3157 ///
3158 /// # Examples
3159 ///
3160 /// ```
3161 /// use std::sync::Arc;
3162 ///
3163 /// let first_rc = Arc::new(5);
3164 /// let first = Arc::downgrade(&first_rc);
3165 /// let second = Arc::downgrade(&first_rc);
3166 ///
3167 /// assert!(first.ptr_eq(&second));
3168 ///
3169 /// let third_rc = Arc::new(5);
3170 /// let third = Arc::downgrade(&third_rc);
3171 ///
3172 /// assert!(!first.ptr_eq(&third));
3173 /// ```
3174 ///
3175 /// Comparing `Weak::new`.
3176 ///
3177 /// ```
3178 /// use std::sync::{Arc, Weak};
3179 ///
3180 /// let first = Weak::new();
3181 /// let second = Weak::new();
3182 /// assert!(first.ptr_eq(&second));
3183 ///
3184 /// let third_rc = Arc::new(());
3185 /// let third = Arc::downgrade(&third_rc);
3186 /// assert!(!first.ptr_eq(&third));
3187 /// ```
3188 ///
3189 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
3190 #[inline]
3191 #[must_use]
3192 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
3193 pub fn ptr_eq(&self, other: &Self) -> bool {
3194 ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
3195 }
3196}
3197
3198#[stable(feature = "arc_weak", since = "1.4.0")]
3199impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
3200 /// Makes a clone of the `Weak` pointer that points to the same allocation.
3201 ///
3202 /// # Examples
3203 ///
3204 /// ```
3205 /// use std::sync::{Arc, Weak};
3206 ///
3207 /// let weak_five = Arc::downgrade(&Arc::new(5));
3208 ///
3209 /// let _ = Weak::clone(&weak_five);
3210 /// ```
3211 #[inline]
3212 fn clone(&self) -> Weak<T, A> {
3213 if let Some(inner) = self.inner() {
3214 // See comments in Arc::clone() for why this is relaxed. This can use a
3215 // fetch_add (ignoring the lock) because the weak count is only locked
3216 // where are *no other* weak pointers in existence. (So we can't be
3217 // running this code in that case).
3218 let old_size = inner.weak.fetch_add(1, Relaxed);
3219
3220 // See comments in Arc::clone() for why we do this (for mem::forget).
3221 if old_size > MAX_REFCOUNT {
3222 abort();
3223 }
3224 }
3225
3226 Weak { ptr: self.ptr, alloc: self.alloc.clone() }
3227 }
3228}
3229
3230#[unstable(feature = "ergonomic_clones", issue = "132290")]
3231impl<T: ?Sized, A: Allocator + Clone> UseCloned for Weak<T, A> {}
3232
3233#[stable(feature = "downgraded_weak", since = "1.10.0")]
3234impl<T> Default for Weak<T> {
3235 /// Constructs a new `Weak<T>`, without allocating memory.
3236 /// Calling [`upgrade`] on the return value always
3237 /// gives [`None`].
3238 ///
3239 /// [`upgrade`]: Weak::upgrade
3240 ///
3241 /// # Examples
3242 ///
3243 /// ```
3244 /// use std::sync::Weak;
3245 ///
3246 /// let empty: Weak<i64> = Default::default();
3247 /// assert!(empty.upgrade().is_none());
3248 /// ```
3249 fn default() -> Weak<T> {
3250 Weak::new()
3251 }
3252}
3253
3254#[stable(feature = "arc_weak", since = "1.4.0")]
3255unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3256 /// Drops the `Weak` pointer.
3257 ///
3258 /// # Examples
3259 ///
3260 /// ```
3261 /// use std::sync::{Arc, Weak};
3262 ///
3263 /// struct Foo;
3264 ///
3265 /// impl Drop for Foo {
3266 /// fn drop(&mut self) {
3267 /// println!("dropped!");
3268 /// }
3269 /// }
3270 ///
3271 /// let foo = Arc::new(Foo);
3272 /// let weak_foo = Arc::downgrade(&foo);
3273 /// let other_weak_foo = Weak::clone(&weak_foo);
3274 ///
3275 /// drop(weak_foo); // Doesn't print anything
3276 /// drop(foo); // Prints "dropped!"
3277 ///
3278 /// assert!(other_weak_foo.upgrade().is_none());
3279 /// ```
3280 fn drop(&mut self) {
3281 // If we find out that we were the last weak pointer, then its time to
3282 // deallocate the data entirely. See the discussion in Arc::drop() about
3283 // the memory orderings
3284 //
3285 // It's not necessary to check for the locked state here, because the
3286 // weak count can only be locked if there was precisely one weak ref,
3287 // meaning that drop could only subsequently run ON that remaining weak
3288 // ref, which can only happen after the lock is released.
3289 let inner = if let Some(inner) = self.inner() { inner } else { return };
3290
3291 if inner.weak.fetch_sub(1, Release) == 1 {
3292 acquire!(inner.weak);
3293
3294 // Make sure we aren't trying to "deallocate" the shared static for empty slices
3295 // used by Default::default.
3296 debug_assert!(
3297 !ptr::addr_eq(self.ptr.as_ptr(), &STATIC_INNER_SLICE.inner),
3298 "Arc/Weaks backed by a static should never be deallocated. \
3299 Likely decrement_strong_count or from_raw were called too many times.",
3300 );
3301
3302 unsafe {
3303 self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3304 }
3305 }
3306 }
3307}
3308
3309#[stable(feature = "rust1", since = "1.0.0")]
3310trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3311 fn eq(&self, other: &Arc<T, A>) -> bool;
3312 fn ne(&self, other: &Arc<T, A>) -> bool;
3313}
3314
3315#[stable(feature = "rust1", since = "1.0.0")]
3316impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3317 #[inline]
3318 default fn eq(&self, other: &Arc<T, A>) -> bool {
3319 **self == **other
3320 }
3321 #[inline]
3322 default fn ne(&self, other: &Arc<T, A>) -> bool {
3323 **self != **other
3324 }
3325}
3326
3327/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3328/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3329/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3330/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3331/// the same value, than two `&T`s.
3332///
3333/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3334#[stable(feature = "rust1", since = "1.0.0")]
3335impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3336 #[inline]
3337 fn eq(&self, other: &Arc<T, A>) -> bool {
3338 Arc::ptr_eq(self, other) || **self == **other
3339 }
3340
3341 #[inline]
3342 fn ne(&self, other: &Arc<T, A>) -> bool {
3343 !Arc::ptr_eq(self, other) && **self != **other
3344 }
3345}
3346
3347#[stable(feature = "rust1", since = "1.0.0")]
3348impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3349 /// Equality for two `Arc`s.
3350 ///
3351 /// Two `Arc`s are equal if their inner values are equal, even if they are
3352 /// stored in different allocation.
3353 ///
3354 /// If `T` also implements `Eq` (implying reflexivity of equality),
3355 /// two `Arc`s that point to the same allocation are always equal.
3356 ///
3357 /// # Examples
3358 ///
3359 /// ```
3360 /// use std::sync::Arc;
3361 ///
3362 /// let five = Arc::new(5);
3363 ///
3364 /// assert!(five == Arc::new(5));
3365 /// ```
3366 #[inline]
3367 fn eq(&self, other: &Arc<T, A>) -> bool {
3368 ArcEqIdent::eq(self, other)
3369 }
3370
3371 /// Inequality for two `Arc`s.
3372 ///
3373 /// Two `Arc`s are not equal if their inner values are not equal.
3374 ///
3375 /// If `T` also implements `Eq` (implying reflexivity of equality),
3376 /// two `Arc`s that point to the same value are always equal.
3377 ///
3378 /// # Examples
3379 ///
3380 /// ```
3381 /// use std::sync::Arc;
3382 ///
3383 /// let five = Arc::new(5);
3384 ///
3385 /// assert!(five != Arc::new(6));
3386 /// ```
3387 #[inline]
3388 fn ne(&self, other: &Arc<T, A>) -> bool {
3389 ArcEqIdent::ne(self, other)
3390 }
3391}
3392
3393#[stable(feature = "rust1", since = "1.0.0")]
3394impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3395 /// Partial comparison for two `Arc`s.
3396 ///
3397 /// The two are compared by calling `partial_cmp()` on their inner values.
3398 ///
3399 /// # Examples
3400 ///
3401 /// ```
3402 /// use std::sync::Arc;
3403 /// use std::cmp::Ordering;
3404 ///
3405 /// let five = Arc::new(5);
3406 ///
3407 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3408 /// ```
3409 fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3410 (**self).partial_cmp(&**other)
3411 }
3412
3413 /// Less-than comparison for two `Arc`s.
3414 ///
3415 /// The two are compared by calling `<` on their inner values.
3416 ///
3417 /// # Examples
3418 ///
3419 /// ```
3420 /// use std::sync::Arc;
3421 ///
3422 /// let five = Arc::new(5);
3423 ///
3424 /// assert!(five < Arc::new(6));
3425 /// ```
3426 fn lt(&self, other: &Arc<T, A>) -> bool {
3427 *(*self) < *(*other)
3428 }
3429
3430 /// 'Less than or equal to' comparison for two `Arc`s.
3431 ///
3432 /// The two are compared by calling `<=` on their inner values.
3433 ///
3434 /// # Examples
3435 ///
3436 /// ```
3437 /// use std::sync::Arc;
3438 ///
3439 /// let five = Arc::new(5);
3440 ///
3441 /// assert!(five <= Arc::new(5));
3442 /// ```
3443 fn le(&self, other: &Arc<T, A>) -> bool {
3444 *(*self) <= *(*other)
3445 }
3446
3447 /// Greater-than comparison for two `Arc`s.
3448 ///
3449 /// The two are compared by calling `>` on their inner values.
3450 ///
3451 /// # Examples
3452 ///
3453 /// ```
3454 /// use std::sync::Arc;
3455 ///
3456 /// let five = Arc::new(5);
3457 ///
3458 /// assert!(five > Arc::new(4));
3459 /// ```
3460 fn gt(&self, other: &Arc<T, A>) -> bool {
3461 *(*self) > *(*other)
3462 }
3463
3464 /// 'Greater than or equal to' comparison for two `Arc`s.
3465 ///
3466 /// The two are compared by calling `>=` on their inner values.
3467 ///
3468 /// # Examples
3469 ///
3470 /// ```
3471 /// use std::sync::Arc;
3472 ///
3473 /// let five = Arc::new(5);
3474 ///
3475 /// assert!(five >= Arc::new(5));
3476 /// ```
3477 fn ge(&self, other: &Arc<T, A>) -> bool {
3478 *(*self) >= *(*other)
3479 }
3480}
3481#[stable(feature = "rust1", since = "1.0.0")]
3482impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3483 /// Comparison for two `Arc`s.
3484 ///
3485 /// The two are compared by calling `cmp()` on their inner values.
3486 ///
3487 /// # Examples
3488 ///
3489 /// ```
3490 /// use std::sync::Arc;
3491 /// use std::cmp::Ordering;
3492 ///
3493 /// let five = Arc::new(5);
3494 ///
3495 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3496 /// ```
3497 fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3498 (**self).cmp(&**other)
3499 }
3500}
3501#[stable(feature = "rust1", since = "1.0.0")]
3502impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3503
3504#[stable(feature = "rust1", since = "1.0.0")]
3505impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3506 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3507 fmt::Display::fmt(&**self, f)
3508 }
3509}
3510
3511#[stable(feature = "rust1", since = "1.0.0")]
3512impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3513 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3514 fmt::Debug::fmt(&**self, f)
3515 }
3516}
3517
3518#[stable(feature = "rust1", since = "1.0.0")]
3519impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3520 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3521 fmt::Pointer::fmt(&(&raw const **self), f)
3522 }
3523}
3524
3525#[cfg(not(no_global_oom_handling))]
3526#[stable(feature = "rust1", since = "1.0.0")]
3527impl<T: Default> Default for Arc<T> {
3528 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3529 ///
3530 /// # Examples
3531 ///
3532 /// ```
3533 /// use std::sync::Arc;
3534 ///
3535 /// let x: Arc<i32> = Default::default();
3536 /// assert_eq!(*x, 0);
3537 /// ```
3538 fn default() -> Arc<T> {
3539 unsafe {
3540 Self::from_inner(
3541 Box::leak(Box::write(
3542 Box::new_uninit(),
3543 ArcInner {
3544 strong: atomic::AtomicUsize::new(1),
3545 weak: atomic::AtomicUsize::new(1),
3546 data: T::default(),
3547 },
3548 ))
3549 .into(),
3550 )
3551 }
3552 }
3553}
3554
3555/// Struct to hold the static `ArcInner` used for empty `Arc<str/CStr/[T]>` as
3556/// returned by `Default::default`.
3557///
3558/// Layout notes:
3559/// * `repr(align(16))` so we can use it for `[T]` with `align_of::<T>() <= 16`.
3560/// * `repr(C)` so `inner` is at offset 0 (and thus guaranteed to actually be aligned to 16).
3561/// * `[u8; 1]` (to be initialized with 0) so it can be used for `Arc<CStr>`.
3562#[repr(C, align(16))]
3563struct SliceArcInnerForStatic {
3564 inner: ArcInner<[u8; 1]>,
3565}
3566#[cfg(not(no_global_oom_handling))]
3567const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16;
3568
3569static STATIC_INNER_SLICE: SliceArcInnerForStatic = SliceArcInnerForStatic {
3570 inner: ArcInner {
3571 strong: atomic::AtomicUsize::new(1),
3572 weak: atomic::AtomicUsize::new(1),
3573 data: [0],
3574 },
3575};
3576
3577#[cfg(not(no_global_oom_handling))]
3578#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3579impl Default for Arc<str> {
3580 /// Creates an empty str inside an Arc
3581 ///
3582 /// This may or may not share an allocation with other Arcs.
3583 #[inline]
3584 fn default() -> Self {
3585 let arc: Arc<[u8]> = Default::default();
3586 debug_assert!(core::str::from_utf8(&*arc).is_ok());
3587 let (ptr, alloc) = Arc::into_inner_with_allocator(arc);
3588 unsafe { Arc::from_ptr_in(ptr.as_ptr() as *mut ArcInner<str>, alloc) }
3589 }
3590}
3591
3592#[cfg(not(no_global_oom_handling))]
3593#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3594impl Default for Arc<core::ffi::CStr> {
3595 /// Creates an empty CStr inside an Arc
3596 ///
3597 /// This may or may not share an allocation with other Arcs.
3598 #[inline]
3599 fn default() -> Self {
3600 use core::ffi::CStr;
3601 let inner: NonNull<ArcInner<[u8]>> = NonNull::from(&STATIC_INNER_SLICE.inner);
3602 let inner: NonNull<ArcInner<CStr>> =
3603 NonNull::new(inner.as_ptr() as *mut ArcInner<CStr>).unwrap();
3604 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3605 let this: mem::ManuallyDrop<Arc<CStr>> =
3606 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3607 (*this).clone()
3608 }
3609}
3610
3611#[cfg(not(no_global_oom_handling))]
3612#[stable(feature = "more_rc_default_impls", since = "1.80.0")]
3613impl<T> Default for Arc<[T]> {
3614 /// Creates an empty `[T]` inside an Arc
3615 ///
3616 /// This may or may not share an allocation with other Arcs.
3617 #[inline]
3618 fn default() -> Self {
3619 if align_of::<T>() <= MAX_STATIC_INNER_SLICE_ALIGNMENT {
3620 // We take a reference to the whole struct instead of the ArcInner<[u8; 1]> inside it so
3621 // we don't shrink the range of bytes the ptr is allowed to access under Stacked Borrows.
3622 // (Miri complains on 32-bit targets with Arc<[Align16]> otherwise.)
3623 // (Note that NonNull::from(&STATIC_INNER_SLICE.inner) is fine under Tree Borrows.)
3624 let inner: NonNull<SliceArcInnerForStatic> = NonNull::from(&STATIC_INNER_SLICE);
3625 let inner: NonNull<ArcInner<[T; 0]>> = inner.cast();
3626 // `this` semantically is the Arc "owned" by the static, so make sure not to drop it.
3627 let this: mem::ManuallyDrop<Arc<[T; 0]>> =
3628 unsafe { mem::ManuallyDrop::new(Arc::from_inner(inner)) };
3629 return (*this).clone();
3630 }
3631
3632 // If T's alignment is too large for the static, make a new unique allocation.
3633 let arr: [T; 0] = [];
3634 Arc::from(arr)
3635 }
3636}
3637
3638#[cfg(not(no_global_oom_handling))]
3639#[stable(feature = "pin_default_impls", since = "CURRENT_RUSTC_VERSION")]
3640impl<T> Default for Pin<Arc<T>>
3641where
3642 T: ?Sized,
3643 Arc<T>: Default,
3644{
3645 #[inline]
3646 fn default() -> Self {
3647 unsafe { Pin::new_unchecked(Arc::<T>::default()) }
3648 }
3649}
3650
3651#[stable(feature = "rust1", since = "1.0.0")]
3652impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3653 fn hash<H: Hasher>(&self, state: &mut H) {
3654 (**self).hash(state)
3655 }
3656}
3657
3658#[cfg(not(no_global_oom_handling))]
3659#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3660impl<T> From<T> for Arc<T> {
3661 /// Converts a `T` into an `Arc<T>`
3662 ///
3663 /// The conversion moves the value into a
3664 /// newly allocated `Arc`. It is equivalent to
3665 /// calling `Arc::new(t)`.
3666 ///
3667 /// # Example
3668 /// ```rust
3669 /// # use std::sync::Arc;
3670 /// let x = 5;
3671 /// let arc = Arc::new(5);
3672 ///
3673 /// assert_eq!(Arc::from(x), arc);
3674 /// ```
3675 fn from(t: T) -> Self {
3676 Arc::new(t)
3677 }
3678}
3679
3680#[cfg(not(no_global_oom_handling))]
3681#[stable(feature = "shared_from_array", since = "1.74.0")]
3682impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3683 /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3684 ///
3685 /// The conversion moves the array into a newly allocated `Arc`.
3686 ///
3687 /// # Example
3688 ///
3689 /// ```
3690 /// # use std::sync::Arc;
3691 /// let original: [i32; 3] = [1, 2, 3];
3692 /// let shared: Arc<[i32]> = Arc::from(original);
3693 /// assert_eq!(&[1, 2, 3], &shared[..]);
3694 /// ```
3695 #[inline]
3696 fn from(v: [T; N]) -> Arc<[T]> {
3697 Arc::<[T; N]>::from(v)
3698 }
3699}
3700
3701#[cfg(not(no_global_oom_handling))]
3702#[stable(feature = "shared_from_slice", since = "1.21.0")]
3703impl<T: Clone> From<&[T]> for Arc<[T]> {
3704 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3705 ///
3706 /// # Example
3707 ///
3708 /// ```
3709 /// # use std::sync::Arc;
3710 /// let original: &[i32] = &[1, 2, 3];
3711 /// let shared: Arc<[i32]> = Arc::from(original);
3712 /// assert_eq!(&[1, 2, 3], &shared[..]);
3713 /// ```
3714 #[inline]
3715 fn from(v: &[T]) -> Arc<[T]> {
3716 <Self as ArcFromSlice<T>>::from_slice(v)
3717 }
3718}
3719
3720#[cfg(not(no_global_oom_handling))]
3721#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3722impl<T: Clone> From<&mut [T]> for Arc<[T]> {
3723 /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
3724 ///
3725 /// # Example
3726 ///
3727 /// ```
3728 /// # use std::sync::Arc;
3729 /// let mut original = [1, 2, 3];
3730 /// let original: &mut [i32] = &mut original;
3731 /// let shared: Arc<[i32]> = Arc::from(original);
3732 /// assert_eq!(&[1, 2, 3], &shared[..]);
3733 /// ```
3734 #[inline]
3735 fn from(v: &mut [T]) -> Arc<[T]> {
3736 Arc::from(&*v)
3737 }
3738}
3739
3740#[cfg(not(no_global_oom_handling))]
3741#[stable(feature = "shared_from_slice", since = "1.21.0")]
3742impl From<&str> for Arc<str> {
3743 /// Allocates a reference-counted `str` and copies `v` into it.
3744 ///
3745 /// # Example
3746 ///
3747 /// ```
3748 /// # use std::sync::Arc;
3749 /// let shared: Arc<str> = Arc::from("eggplant");
3750 /// assert_eq!("eggplant", &shared[..]);
3751 /// ```
3752 #[inline]
3753 fn from(v: &str) -> Arc<str> {
3754 let arc = Arc::<[u8]>::from(v.as_bytes());
3755 unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
3756 }
3757}
3758
3759#[cfg(not(no_global_oom_handling))]
3760#[stable(feature = "shared_from_mut_slice", since = "1.84.0")]
3761impl From<&mut str> for Arc<str> {
3762 /// Allocates a reference-counted `str` and copies `v` into it.
3763 ///
3764 /// # Example
3765 ///
3766 /// ```
3767 /// # use std::sync::Arc;
3768 /// let mut original = String::from("eggplant");
3769 /// let original: &mut str = &mut original;
3770 /// let shared: Arc<str> = Arc::from(original);
3771 /// assert_eq!("eggplant", &shared[..]);
3772 /// ```
3773 #[inline]
3774 fn from(v: &mut str) -> Arc<str> {
3775 Arc::from(&*v)
3776 }
3777}
3778
3779#[cfg(not(no_global_oom_handling))]
3780#[stable(feature = "shared_from_slice", since = "1.21.0")]
3781impl From<String> for Arc<str> {
3782 /// Allocates a reference-counted `str` and copies `v` into it.
3783 ///
3784 /// # Example
3785 ///
3786 /// ```
3787 /// # use std::sync::Arc;
3788 /// let unique: String = "eggplant".to_owned();
3789 /// let shared: Arc<str> = Arc::from(unique);
3790 /// assert_eq!("eggplant", &shared[..]);
3791 /// ```
3792 #[inline]
3793 fn from(v: String) -> Arc<str> {
3794 Arc::from(&v[..])
3795 }
3796}
3797
3798#[cfg(not(no_global_oom_handling))]
3799#[stable(feature = "shared_from_slice", since = "1.21.0")]
3800impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3801 /// Move a boxed object to a new, reference-counted allocation.
3802 ///
3803 /// # Example
3804 ///
3805 /// ```
3806 /// # use std::sync::Arc;
3807 /// let unique: Box<str> = Box::from("eggplant");
3808 /// let shared: Arc<str> = Arc::from(unique);
3809 /// assert_eq!("eggplant", &shared[..]);
3810 /// ```
3811 #[inline]
3812 fn from(v: Box<T, A>) -> Arc<T, A> {
3813 Arc::from_box_in(v)
3814 }
3815}
3816
3817#[cfg(not(no_global_oom_handling))]
3818#[stable(feature = "shared_from_slice", since = "1.21.0")]
3819impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3820 /// Allocates a reference-counted slice and moves `v`'s items into it.
3821 ///
3822 /// # Example
3823 ///
3824 /// ```
3825 /// # use std::sync::Arc;
3826 /// let unique: Vec<i32> = vec![1, 2, 3];
3827 /// let shared: Arc<[i32]> = Arc::from(unique);
3828 /// assert_eq!(&[1, 2, 3], &shared[..]);
3829 /// ```
3830 #[inline]
3831 fn from(v: Vec<T, A>) -> Arc<[T], A> {
3832 unsafe {
3833 let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3834
3835 let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3836 ptr::copy_nonoverlapping(vec_ptr, (&raw mut (*rc_ptr).data) as *mut T, len);
3837
3838 // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3839 // without dropping its contents or the allocator
3840 let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3841
3842 Self::from_ptr_in(rc_ptr, alloc)
3843 }
3844 }
3845}
3846
3847#[stable(feature = "shared_from_cow", since = "1.45.0")]
3848impl<'a, B> From<Cow<'a, B>> for Arc<B>
3849where
3850 B: ToOwned + ?Sized,
3851 Arc<B>: From<&'a B> + From<B::Owned>,
3852{
3853 /// Creates an atomically reference-counted pointer from a clone-on-write
3854 /// pointer by copying its content.
3855 ///
3856 /// # Example
3857 ///
3858 /// ```rust
3859 /// # use std::sync::Arc;
3860 /// # use std::borrow::Cow;
3861 /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3862 /// let shared: Arc<str> = Arc::from(cow);
3863 /// assert_eq!("eggplant", &shared[..]);
3864 /// ```
3865 #[inline]
3866 fn from(cow: Cow<'a, B>) -> Arc<B> {
3867 match cow {
3868 Cow::Borrowed(s) => Arc::from(s),
3869 Cow::Owned(s) => Arc::from(s),
3870 }
3871 }
3872}
3873
3874#[stable(feature = "shared_from_str", since = "1.62.0")]
3875impl From<Arc<str>> for Arc<[u8]> {
3876 /// Converts an atomically reference-counted string slice into a byte slice.
3877 ///
3878 /// # Example
3879 ///
3880 /// ```
3881 /// # use std::sync::Arc;
3882 /// let string: Arc<str> = Arc::from("eggplant");
3883 /// let bytes: Arc<[u8]> = Arc::from(string);
3884 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3885 /// ```
3886 #[inline]
3887 fn from(rc: Arc<str>) -> Self {
3888 // SAFETY: `str` has the same layout as `[u8]`.
3889 unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
3890 }
3891}
3892
3893#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3894impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3895 type Error = Arc<[T], A>;
3896
3897 fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3898 if boxed_slice.len() == N {
3899 let (ptr, alloc) = Arc::into_inner_with_allocator(boxed_slice);
3900 Ok(unsafe { Arc::from_inner_in(ptr.cast(), alloc) })
3901 } else {
3902 Err(boxed_slice)
3903 }
3904 }
3905}
3906
3907#[cfg(not(no_global_oom_handling))]
3908#[stable(feature = "shared_from_iter", since = "1.37.0")]
3909impl<T> FromIterator<T> for Arc<[T]> {
3910 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3911 ///
3912 /// # Performance characteristics
3913 ///
3914 /// ## The general case
3915 ///
3916 /// In the general case, collecting into `Arc<[T]>` is done by first
3917 /// collecting into a `Vec<T>`. That is, when writing the following:
3918 ///
3919 /// ```rust
3920 /// # use std::sync::Arc;
3921 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3922 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3923 /// ```
3924 ///
3925 /// this behaves as if we wrote:
3926 ///
3927 /// ```rust
3928 /// # use std::sync::Arc;
3929 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3930 /// .collect::<Vec<_>>() // The first set of allocations happens here.
3931 /// .into(); // A second allocation for `Arc<[T]>` happens here.
3932 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3933 /// ```
3934 ///
3935 /// This will allocate as many times as needed for constructing the `Vec<T>`
3936 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3937 ///
3938 /// ## Iterators of known length
3939 ///
3940 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3941 /// a single allocation will be made for the `Arc<[T]>`. For example:
3942 ///
3943 /// ```rust
3944 /// # use std::sync::Arc;
3945 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3946 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3947 /// ```
3948 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3949 ToArcSlice::to_arc_slice(iter.into_iter())
3950 }
3951}
3952
3953#[cfg(not(no_global_oom_handling))]
3954/// Specialization trait used for collecting into `Arc<[T]>`.
3955trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3956 fn to_arc_slice(self) -> Arc<[T]>;
3957}
3958
3959#[cfg(not(no_global_oom_handling))]
3960impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3961 default fn to_arc_slice(self) -> Arc<[T]> {
3962 self.collect::<Vec<T>>().into()
3963 }
3964}
3965
3966#[cfg(not(no_global_oom_handling))]
3967impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3968 fn to_arc_slice(self) -> Arc<[T]> {
3969 // This is the case for a `TrustedLen` iterator.
3970 let (low, high) = self.size_hint();
3971 if let Some(high) = high {
3972 debug_assert_eq!(
3973 low,
3974 high,
3975 "TrustedLen iterator's size hint is not exact: {:?}",
3976 (low, high)
3977 );
3978
3979 unsafe {
3980 // SAFETY: We need to ensure that the iterator has an exact length and we have.
3981 Arc::from_iter_exact(self, low)
3982 }
3983 } else {
3984 // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3985 // length exceeding `usize::MAX`.
3986 // The default implementation would collect into a vec which would panic.
3987 // Thus we panic here immediately without invoking `Vec` code.
3988 panic!("capacity overflow");
3989 }
3990 }
3991}
3992
3993#[stable(feature = "rust1", since = "1.0.0")]
3994impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
3995 fn borrow(&self) -> &T {
3996 &**self
3997 }
3998}
3999
4000#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
4001impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
4002 fn as_ref(&self) -> &T {
4003 &**self
4004 }
4005}
4006
4007#[stable(feature = "pin", since = "1.33.0")]
4008impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
4009
4010/// Gets the offset within an `ArcInner` for the payload behind a pointer.
4011///
4012/// # Safety
4013///
4014/// The pointer must point to (and have valid metadata for) a previously
4015/// valid instance of T, but the T is allowed to be dropped.
4016unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
4017 // Align the unsized value to the end of the ArcInner.
4018 // Because RcInner is repr(C), it will always be the last field in memory.
4019 // SAFETY: since the only unsized types possible are slices, trait objects,
4020 // and extern types, the input safety requirement is currently enough to
4021 // satisfy the requirements of align_of_val_raw; this is an implementation
4022 // detail of the language that must not be relied upon outside of std.
4023 unsafe { data_offset_align(align_of_val_raw(ptr)) }
4024}
4025
4026#[inline]
4027fn data_offset_align(align: usize) -> usize {
4028 let layout = Layout::new::<ArcInner<()>>();
4029 layout.size() + layout.padding_needed_for(align)
4030}
4031
4032/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
4033/// but will deallocate it (without dropping the value) when dropped.
4034///
4035/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
4036#[cfg(not(no_global_oom_handling))]
4037struct UniqueArcUninit<T: ?Sized, A: Allocator> {
4038 ptr: NonNull<ArcInner<T>>,
4039 layout_for_value: Layout,
4040 alloc: Option<A>,
4041}
4042
4043#[cfg(not(no_global_oom_handling))]
4044impl<T: ?Sized, A: Allocator> UniqueArcUninit<T, A> {
4045 /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
4046 fn new(for_value: &T, alloc: A) -> UniqueArcUninit<T, A> {
4047 let layout = Layout::for_value(for_value);
4048 let ptr = unsafe {
4049 Arc::allocate_for_layout(
4050 layout,
4051 |layout_for_arcinner| alloc.allocate(layout_for_arcinner),
4052 |mem| mem.with_metadata_of(ptr::from_ref(for_value) as *const ArcInner<T>),
4053 )
4054 };
4055 Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout, alloc: Some(alloc) }
4056 }
4057
4058 /// Returns the pointer to be written into to initialize the [`Arc`].
4059 fn data_ptr(&mut self) -> *mut T {
4060 let offset = data_offset_align(self.layout_for_value.align());
4061 unsafe { self.ptr.as_ptr().byte_add(offset) as *mut T }
4062 }
4063
4064 /// Upgrade this into a normal [`Arc`].
4065 ///
4066 /// # Safety
4067 ///
4068 /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
4069 unsafe fn into_arc(self) -> Arc<T, A> {
4070 let mut this = ManuallyDrop::new(self);
4071 let ptr = this.ptr.as_ptr();
4072 let alloc = this.alloc.take().unwrap();
4073
4074 // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
4075 // for having initialized the data.
4076 unsafe { Arc::from_ptr_in(ptr, alloc) }
4077 }
4078}
4079
4080#[cfg(not(no_global_oom_handling))]
4081impl<T: ?Sized, A: Allocator> Drop for UniqueArcUninit<T, A> {
4082 fn drop(&mut self) {
4083 // SAFETY:
4084 // * new() produced a pointer safe to deallocate.
4085 // * We own the pointer unless into_arc() was called, which forgets us.
4086 unsafe {
4087 self.alloc.take().unwrap().deallocate(
4088 self.ptr.cast(),
4089 arcinner_layout_for_value_layout(self.layout_for_value),
4090 );
4091 }
4092 }
4093}
4094
4095#[stable(feature = "arc_error", since = "1.52.0")]
4096impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
4097 #[allow(deprecated)]
4098 fn cause(&self) -> Option<&dyn core::error::Error> {
4099 core::error::Error::cause(&**self)
4100 }
4101
4102 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
4103 core::error::Error::source(&**self)
4104 }
4105
4106 fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
4107 core::error::Error::provide(&**self, req);
4108 }
4109}
4110
4111/// A uniquely owned [`Arc`].
4112///
4113/// This represents an `Arc` that is known to be uniquely owned -- that is, have exactly one strong
4114/// reference. Multiple weak pointers can be created, but attempts to upgrade those to strong
4115/// references will fail unless the `UniqueArc` they point to has been converted into a regular `Arc`.
4116///
4117/// Because it is uniquely owned, the contents of a `UniqueArc` can be freely mutated. A common
4118/// use case is to have an object be mutable during its initialization phase but then have it become
4119/// immutable and converted to a normal `Arc`.
4120///
4121/// This can be used as a flexible way to create cyclic data structures, as in the example below.
4122///
4123/// ```
4124/// #![feature(unique_rc_arc)]
4125/// use std::sync::{Arc, Weak, UniqueArc};
4126///
4127/// struct Gadget {
4128/// me: Weak<Gadget>,
4129/// }
4130///
4131/// fn create_gadget() -> Option<Arc<Gadget>> {
4132/// let mut rc = UniqueArc::new(Gadget {
4133/// me: Weak::new(),
4134/// });
4135/// rc.me = UniqueArc::downgrade(&rc);
4136/// Some(UniqueArc::into_arc(rc))
4137/// }
4138///
4139/// create_gadget().unwrap();
4140/// ```
4141///
4142/// An advantage of using `UniqueArc` over [`Arc::new_cyclic`] to build cyclic data structures is that
4143/// [`Arc::new_cyclic`]'s `data_fn` parameter cannot be async or return a [`Result`]. As shown in the
4144/// previous example, `UniqueArc` allows for more flexibility in the construction of cyclic data,
4145/// including fallible or async constructors.
4146#[unstable(feature = "unique_rc_arc", issue = "112566")]
4147pub struct UniqueArc<
4148 T: ?Sized,
4149 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
4150> {
4151 ptr: NonNull<ArcInner<T>>,
4152 // Define the ownership of `ArcInner<T>` for drop-check
4153 _marker: PhantomData<ArcInner<T>>,
4154 // Invariance is necessary for soundness: once other `Weak`
4155 // references exist, we already have a form of shared mutability!
4156 _marker2: PhantomData<*mut T>,
4157 alloc: A,
4158}
4159
4160#[unstable(feature = "unique_rc_arc", issue = "112566")]
4161unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for UniqueArc<T, A> {}
4162
4163#[unstable(feature = "unique_rc_arc", issue = "112566")]
4164unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for UniqueArc<T, A> {}
4165
4166#[unstable(feature = "unique_rc_arc", issue = "112566")]
4167// #[unstable(feature = "coerce_unsized", issue = "18598")]
4168impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<UniqueArc<U, A>>
4169 for UniqueArc<T, A>
4170{
4171}
4172
4173//#[unstable(feature = "unique_rc_arc", issue = "112566")]
4174#[unstable(feature = "dispatch_from_dyn", issue = "none")]
4175impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<UniqueArc<U>> for UniqueArc<T> {}
4176
4177#[unstable(feature = "unique_rc_arc", issue = "112566")]
4178impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for UniqueArc<T, A> {
4179 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4180 fmt::Display::fmt(&**self, f)
4181 }
4182}
4183
4184#[unstable(feature = "unique_rc_arc", issue = "112566")]
4185impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for UniqueArc<T, A> {
4186 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4187 fmt::Debug::fmt(&**self, f)
4188 }
4189}
4190
4191#[unstable(feature = "unique_rc_arc", issue = "112566")]
4192impl<T: ?Sized, A: Allocator> fmt::Pointer for UniqueArc<T, A> {
4193 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4194 fmt::Pointer::fmt(&(&raw const **self), f)
4195 }
4196}
4197
4198#[unstable(feature = "unique_rc_arc", issue = "112566")]
4199impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for UniqueArc<T, A> {
4200 fn borrow(&self) -> &T {
4201 &**self
4202 }
4203}
4204
4205#[unstable(feature = "unique_rc_arc", issue = "112566")]
4206impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for UniqueArc<T, A> {
4207 fn borrow_mut(&mut self) -> &mut T {
4208 &mut **self
4209 }
4210}
4211
4212#[unstable(feature = "unique_rc_arc", issue = "112566")]
4213impl<T: ?Sized, A: Allocator> AsRef<T> for UniqueArc<T, A> {
4214 fn as_ref(&self) -> &T {
4215 &**self
4216 }
4217}
4218
4219#[unstable(feature = "unique_rc_arc", issue = "112566")]
4220impl<T: ?Sized, A: Allocator> AsMut<T> for UniqueArc<T, A> {
4221 fn as_mut(&mut self) -> &mut T {
4222 &mut **self
4223 }
4224}
4225
4226#[unstable(feature = "unique_rc_arc", issue = "112566")]
4227impl<T: ?Sized, A: Allocator> Unpin for UniqueArc<T, A> {}
4228
4229#[unstable(feature = "unique_rc_arc", issue = "112566")]
4230impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for UniqueArc<T, A> {
4231 /// Equality for two `UniqueArc`s.
4232 ///
4233 /// Two `UniqueArc`s are equal if their inner values are equal.
4234 ///
4235 /// # Examples
4236 ///
4237 /// ```
4238 /// #![feature(unique_rc_arc)]
4239 /// use std::sync::UniqueArc;
4240 ///
4241 /// let five = UniqueArc::new(5);
4242 ///
4243 /// assert!(five == UniqueArc::new(5));
4244 /// ```
4245 #[inline]
4246 fn eq(&self, other: &Self) -> bool {
4247 PartialEq::eq(&**self, &**other)
4248 }
4249}
4250
4251#[unstable(feature = "unique_rc_arc", issue = "112566")]
4252impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for UniqueArc<T, A> {
4253 /// Partial comparison for two `UniqueArc`s.
4254 ///
4255 /// The two are compared by calling `partial_cmp()` on their inner values.
4256 ///
4257 /// # Examples
4258 ///
4259 /// ```
4260 /// #![feature(unique_rc_arc)]
4261 /// use std::sync::UniqueArc;
4262 /// use std::cmp::Ordering;
4263 ///
4264 /// let five = UniqueArc::new(5);
4265 ///
4266 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&UniqueArc::new(6)));
4267 /// ```
4268 #[inline(always)]
4269 fn partial_cmp(&self, other: &UniqueArc<T, A>) -> Option<Ordering> {
4270 (**self).partial_cmp(&**other)
4271 }
4272
4273 /// Less-than comparison for two `UniqueArc`s.
4274 ///
4275 /// The two are compared by calling `<` on their inner values.
4276 ///
4277 /// # Examples
4278 ///
4279 /// ```
4280 /// #![feature(unique_rc_arc)]
4281 /// use std::sync::UniqueArc;
4282 ///
4283 /// let five = UniqueArc::new(5);
4284 ///
4285 /// assert!(five < UniqueArc::new(6));
4286 /// ```
4287 #[inline(always)]
4288 fn lt(&self, other: &UniqueArc<T, A>) -> bool {
4289 **self < **other
4290 }
4291
4292 /// 'Less than or equal to' comparison for two `UniqueArc`s.
4293 ///
4294 /// The two are compared by calling `<=` on their inner values.
4295 ///
4296 /// # Examples
4297 ///
4298 /// ```
4299 /// #![feature(unique_rc_arc)]
4300 /// use std::sync::UniqueArc;
4301 ///
4302 /// let five = UniqueArc::new(5);
4303 ///
4304 /// assert!(five <= UniqueArc::new(5));
4305 /// ```
4306 #[inline(always)]
4307 fn le(&self, other: &UniqueArc<T, A>) -> bool {
4308 **self <= **other
4309 }
4310
4311 /// Greater-than comparison for two `UniqueArc`s.
4312 ///
4313 /// The two are compared by calling `>` on their inner values.
4314 ///
4315 /// # Examples
4316 ///
4317 /// ```
4318 /// #![feature(unique_rc_arc)]
4319 /// use std::sync::UniqueArc;
4320 ///
4321 /// let five = UniqueArc::new(5);
4322 ///
4323 /// assert!(five > UniqueArc::new(4));
4324 /// ```
4325 #[inline(always)]
4326 fn gt(&self, other: &UniqueArc<T, A>) -> bool {
4327 **self > **other
4328 }
4329
4330 /// 'Greater than or equal to' comparison for two `UniqueArc`s.
4331 ///
4332 /// The two are compared by calling `>=` on their inner values.
4333 ///
4334 /// # Examples
4335 ///
4336 /// ```
4337 /// #![feature(unique_rc_arc)]
4338 /// use std::sync::UniqueArc;
4339 ///
4340 /// let five = UniqueArc::new(5);
4341 ///
4342 /// assert!(five >= UniqueArc::new(5));
4343 /// ```
4344 #[inline(always)]
4345 fn ge(&self, other: &UniqueArc<T, A>) -> bool {
4346 **self >= **other
4347 }
4348}
4349
4350#[unstable(feature = "unique_rc_arc", issue = "112566")]
4351impl<T: ?Sized + Ord, A: Allocator> Ord for UniqueArc<T, A> {
4352 /// Comparison for two `UniqueArc`s.
4353 ///
4354 /// The two are compared by calling `cmp()` on their inner values.
4355 ///
4356 /// # Examples
4357 ///
4358 /// ```
4359 /// #![feature(unique_rc_arc)]
4360 /// use std::sync::UniqueArc;
4361 /// use std::cmp::Ordering;
4362 ///
4363 /// let five = UniqueArc::new(5);
4364 ///
4365 /// assert_eq!(Ordering::Less, five.cmp(&UniqueArc::new(6)));
4366 /// ```
4367 #[inline]
4368 fn cmp(&self, other: &UniqueArc<T, A>) -> Ordering {
4369 (**self).cmp(&**other)
4370 }
4371}
4372
4373#[unstable(feature = "unique_rc_arc", issue = "112566")]
4374impl<T: ?Sized + Eq, A: Allocator> Eq for UniqueArc<T, A> {}
4375
4376#[unstable(feature = "unique_rc_arc", issue = "112566")]
4377impl<T: ?Sized + Hash, A: Allocator> Hash for UniqueArc<T, A> {
4378 fn hash<H: Hasher>(&self, state: &mut H) {
4379 (**self).hash(state);
4380 }
4381}
4382
4383impl<T> UniqueArc<T, Global> {
4384 /// Creates a new `UniqueArc`.
4385 ///
4386 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4387 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4388 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4389 /// point to the new [`Arc`].
4390 #[cfg(not(no_global_oom_handling))]
4391 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4392 #[must_use]
4393 pub fn new(value: T) -> Self {
4394 Self::new_in(value, Global)
4395 }
4396}
4397
4398impl<T, A: Allocator> UniqueArc<T, A> {
4399 /// Creates a new `UniqueArc` in the provided allocator.
4400 ///
4401 /// Weak references to this `UniqueArc` can be created with [`UniqueArc::downgrade`]. Upgrading
4402 /// these weak references will fail before the `UniqueArc` has been converted into an [`Arc`].
4403 /// After converting the `UniqueArc` into an [`Arc`], any weak references created beforehand will
4404 /// point to the new [`Arc`].
4405 #[cfg(not(no_global_oom_handling))]
4406 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4407 #[must_use]
4408 // #[unstable(feature = "allocator_api", issue = "32838")]
4409 pub fn new_in(data: T, alloc: A) -> Self {
4410 let (ptr, alloc) = Box::into_unique(Box::new_in(
4411 ArcInner {
4412 strong: atomic::AtomicUsize::new(0),
4413 // keep one weak reference so if all the weak pointers that are created are dropped
4414 // the UniqueArc still stays valid.
4415 weak: atomic::AtomicUsize::new(1),
4416 data,
4417 },
4418 alloc,
4419 ));
4420 Self { ptr: ptr.into(), _marker: PhantomData, _marker2: PhantomData, alloc }
4421 }
4422}
4423
4424impl<T: ?Sized, A: Allocator> UniqueArc<T, A> {
4425 /// Converts the `UniqueArc` into a regular [`Arc`].
4426 ///
4427 /// This consumes the `UniqueArc` and returns a regular [`Arc`] that contains the `value` that
4428 /// is passed to `into_arc`.
4429 ///
4430 /// Any weak references created before this method is called can now be upgraded to strong
4431 /// references.
4432 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4433 #[must_use]
4434 pub fn into_arc(this: Self) -> Arc<T, A> {
4435 let this = ManuallyDrop::new(this);
4436
4437 // Move the allocator out.
4438 // SAFETY: `this.alloc` will not be accessed again, nor dropped because it is in
4439 // a `ManuallyDrop`.
4440 let alloc: A = unsafe { ptr::read(&this.alloc) };
4441
4442 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4443 unsafe {
4444 // Convert our weak reference into a strong reference
4445 (*this.ptr.as_ptr()).strong.store(1, Release);
4446 Arc::from_inner_in(this.ptr, alloc)
4447 }
4448 }
4449}
4450
4451impl<T: ?Sized, A: Allocator + Clone> UniqueArc<T, A> {
4452 /// Creates a new weak reference to the `UniqueArc`.
4453 ///
4454 /// Attempting to upgrade this weak reference will fail before the `UniqueArc` has been converted
4455 /// to a [`Arc`] using [`UniqueArc::into_arc`].
4456 #[unstable(feature = "unique_rc_arc", issue = "112566")]
4457 #[must_use]
4458 pub fn downgrade(this: &Self) -> Weak<T, A> {
4459 // Using a relaxed ordering is alright here, as knowledge of the
4460 // original reference prevents other threads from erroneously deleting
4461 // the object or converting the object to a normal `Arc<T, A>`.
4462 //
4463 // Note that we don't need to test if the weak counter is locked because there
4464 // are no such operations like `Arc::get_mut` or `Arc::make_mut` that will lock
4465 // the weak counter.
4466 //
4467 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4468 let old_size = unsafe { (*this.ptr.as_ptr()).weak.fetch_add(1, Relaxed) };
4469
4470 // See comments in Arc::clone() for why we do this (for mem::forget).
4471 if old_size > MAX_REFCOUNT {
4472 abort();
4473 }
4474
4475 Weak { ptr: this.ptr, alloc: this.alloc.clone() }
4476 }
4477}
4478
4479#[unstable(feature = "unique_rc_arc", issue = "112566")]
4480impl<T: ?Sized, A: Allocator> Deref for UniqueArc<T, A> {
4481 type Target = T;
4482
4483 fn deref(&self) -> &T {
4484 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4485 unsafe { &self.ptr.as_ref().data }
4486 }
4487}
4488
4489// #[unstable(feature = "unique_rc_arc", issue = "112566")]
4490#[unstable(feature = "pin_coerce_unsized_trait", issue = "123430")]
4491unsafe impl<T: ?Sized> PinCoerceUnsized for UniqueArc<T> {}
4492
4493#[unstable(feature = "unique_rc_arc", issue = "112566")]
4494impl<T: ?Sized, A: Allocator> DerefMut for UniqueArc<T, A> {
4495 fn deref_mut(&mut self) -> &mut T {
4496 // SAFETY: This pointer was allocated at creation time so we know it is valid. We know we
4497 // have unique ownership and therefore it's safe to make a mutable reference because
4498 // `UniqueArc` owns the only strong reference to itself.
4499 // We also need to be careful to only create a mutable reference to the `data` field,
4500 // as a mutable reference to the entire `ArcInner` would assert uniqueness over the
4501 // ref count fields too, invalidating any attempt by `Weak`s to access the ref count.
4502 unsafe { &mut (*self.ptr.as_ptr()).data }
4503 }
4504}
4505
4506#[unstable(feature = "unique_rc_arc", issue = "112566")]
4507// #[unstable(feature = "deref_pure_trait", issue = "87121")]
4508unsafe impl<T: ?Sized, A: Allocator> DerefPure for UniqueArc<T, A> {}
4509
4510#[unstable(feature = "unique_rc_arc", issue = "112566")]
4511unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for UniqueArc<T, A> {
4512 fn drop(&mut self) {
4513 // See `Arc::drop_slow` which drops an `Arc` with a strong count of 0.
4514 // SAFETY: This pointer was allocated at creation time so we know it is valid.
4515 let _weak = Weak { ptr: self.ptr, alloc: &self.alloc };
4516
4517 unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
4518 }
4519}