Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

108
third-party/vendor/loom/src/alloc.rs vendored Normal file
View file

@ -0,0 +1,108 @@
//! Memory allocation APIs
use crate::rt;
pub use std::alloc::Layout;
/// Allocate memory with the global allocator.
///
/// This is equivalent to the standard library's [`std::alloc::alloc`], but with
/// the addition of leak tracking for allocated objects. Loom's leak tracking
/// will not function for allocations not performed via this method.
///
/// This function forwards calls to the [`GlobalAlloc::alloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc`].
///
/// [`GlobalAlloc::alloc`]: std::alloc::GlobalAlloc::alloc
#[track_caller]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
let ptr = std::alloc::alloc(layout);
rt::alloc(ptr, location!());
ptr
}
/// Allocate zero-initialized memory with the global allocator.
///
/// This is equivalent to the standard library's [`std::alloc::alloc_zeroed`],
/// but with the addition of leak tracking for allocated objects. Loom's leak
/// tracking will not function for allocations not performed via this method.
///
/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc_zeroed`].
///
/// [`GlobalAlloc::alloc_zeroed`]: std::alloc::GlobalAlloc::alloc_zeroed
#[track_caller]
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
let ptr = std::alloc::alloc_zeroed(layout);
rt::alloc(ptr, location!());
ptr
}
/// Deallocate memory with the global allocator.
///
/// This is equivalent to the standard library's [`std::alloc::dealloc`],
/// but with the addition of leak tracking for allocated objects. Loom's leak
/// tracking may report false positives if allocations allocated with
/// [`loom::alloc::alloc`] or [`loom::alloc::alloc_zeroed`] are deallocated via
/// [`std::alloc::dealloc`] rather than by this function.
///
/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// # Safety
///
/// See [`GlobalAlloc::dealloc`].
///
/// [`GlobalAlloc::dealloc`]: std::alloc::GlobalAlloc::dealloc
/// [`loom::alloc::alloc`]: crate::alloc::alloc
/// [`loom::alloc::alloc_zeroed`]: crate::alloc::alloc_zeroed
#[track_caller]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
rt::dealloc(ptr, location!());
std::alloc::dealloc(ptr, layout)
}
/// Track allocations, detecting leaks
#[derive(Debug)]
pub struct Track<T> {
value: T,
/// Drop guard tracking the allocation's lifetime.
_obj: rt::Allocation,
}
impl<T> Track<T> {
/// Track a value for leaks
#[track_caller]
pub fn new(value: T) -> Track<T> {
Track {
value,
_obj: rt::Allocation::new(location!()),
}
}
/// Get a reference to the value
pub fn get_ref(&self) -> &T {
&self.value
}
/// Get a mutable reference to the value
pub fn get_mut(&mut self) -> &mut T {
&mut self.value
}
/// Stop tracking the value for leaks
pub fn into_inner(self) -> T {
self.value
}
}

111
third-party/vendor/loom/src/cell/cell.rs vendored Normal file
View file

@ -0,0 +1,111 @@
use super::UnsafeCell;
/// A checked version of [`std::cell::Cell`], implemented on top of
/// [`loom::cell::UnsafeCell`][unsafecell].
///
/// Unlike [`loom::cell::UnsafeCell`][unsafecell], this provides an API that's
/// largely compatible with the standard counterpart.
///
/// [unsafecell]: crate::cell::UnsafeCell
#[derive(Debug)]
pub struct Cell<T> {
cell: UnsafeCell<T>,
}
// unsafe impl<T> Send for Cell<T> where T: Send {}
impl<T> Cell<T> {
/// Creates a new instance of `Cell` wrapping the given value.
#[track_caller]
pub fn new(v: T) -> Self {
Self {
cell: UnsafeCell::new(v),
}
}
/// Sets the contained value.
#[track_caller]
pub fn set(&self, val: T) {
let old = self.replace(val);
drop(old);
}
/// Swaps the values of two Cells.
#[track_caller]
pub fn swap(&self, other: &Self) {
if core::ptr::eq(self, other) {
return;
}
self.cell.with_mut(|my_ptr| {
other.cell.with_mut(|their_ptr| unsafe {
core::ptr::swap(my_ptr, their_ptr);
})
})
}
/// Replaces the contained value, and returns it.
#[track_caller]
pub fn replace(&self, val: T) -> T {
self.cell
.with_mut(|ptr| unsafe { core::mem::replace(&mut *ptr, val) })
}
/// Returns a copy of the contained value.
#[track_caller]
pub fn get(&self) -> T
where
T: Copy,
{
self.cell.with(|ptr| unsafe { *ptr })
}
/// Takes the value of the cell, leaving `Default::default()` in its place.
#[track_caller]
pub fn take(&self) -> T
where
T: Default,
{
self.replace(T::default())
}
}
impl<T: Default> Default for Cell<T> {
#[track_caller]
fn default() -> Cell<T> {
Cell::new(T::default())
}
}
impl<T: Copy> Clone for Cell<T> {
#[track_caller]
fn clone(&self) -> Cell<T> {
Cell::new(self.get())
}
}
impl<T> From<T> for Cell<T> {
#[track_caller]
fn from(src: T) -> Cell<T> {
Cell::new(src)
}
}
impl<T: PartialEq + Copy> PartialEq for Cell<T> {
fn eq(&self, other: &Self) -> bool {
self.get() == other.get()
}
}
impl<T: Eq + Copy> Eq for Cell<T> {}
impl<T: PartialOrd + Copy> PartialOrd for Cell<T> {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.get().partial_cmp(&other.get())
}
}
impl<T: Ord + Copy> Ord for Cell<T> {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.get().cmp(&other.get())
}
}

View file

@ -0,0 +1,8 @@
//! Shareable mutable containers.
#[allow(clippy::module_inception)]
mod cell;
mod unsafe_cell;
pub use self::cell::Cell;
pub use self::unsafe_cell::{ConstPtr, MutPtr, UnsafeCell};

View file

@ -0,0 +1,538 @@
use crate::rt;
/// A checked version of `std::cell::UnsafeCell`.
///
/// Instead of providing a `get()` API, this version of `UnsafeCell` provides
/// `with` and `with_mut`. Both functions take a closure in order to track the
/// start and end of the access to the underlying cell.
#[derive(Debug)]
pub struct UnsafeCell<T: ?Sized> {
/// Causality associated with the cell
state: rt::Cell,
data: std::cell::UnsafeCell<T>,
}
/// A checked immutable raw pointer to an [`UnsafeCell`].
///
/// This type is essentially a [`*const T`], but with the added ability to
/// participate in Loom's [`UnsafeCell`] access tracking. While a `ConstPtr` to a
/// given [`UnsafeCell`] exists, Loom will track that the [`UnsafeCell`] is
/// being accessed immutably.
///
/// [`ConstPtr`]s are produced by the [`UnsafeCell::get`] method. The pointed
/// value can be accessed using [`ConstPtr::deref`].
///
/// Any number of [`ConstPtr`]s may concurrently access a given [`UnsafeCell`].
/// However, if the [`UnsafeCell`] is accessed mutably (by
/// [`UnsafeCell::with_mut`] or [`UnsafeCell::get_mut`]) while a [`ConstPtr`]
/// exists, Loom will detect the concurrent mutable and immutable accesses and
/// panic.
///
/// Note that the cell is considered to be immutably accessed for *the entire
/// lifespan of the `ConstPtr`*, not just when the `ConstPtr` is actively
/// dereferenced.
///
/// # Safety
///
/// Although the `ConstPtr` type is checked for concurrent access violations, it
/// is **still a raw pointer**. A `ConstPtr` is not bound to the lifetime of the
/// [`UnsafeCell`] from which it was produced, and may outlive the cell. Loom
/// does *not* currently check for dangling pointers. Therefore, the user is
/// responsible for ensuring that a `ConstPtr` does not dangle. However, unlike
/// a normal `*const T`, `ConstPtr`s may only be produced from a valid
/// [`UnsafeCell`], and therefore can be assumed to never be null.
///
/// Additionally, it is possible to write code in which raw pointers to an
/// [`UnsafeCell`] are constructed that are *not* checked by Loom. If a raw
/// pointer "escapes" Loom's tracking, invalid accesses may not be detected,
/// resulting in tests passing when they should have failed. See [here] for
/// details on how to avoid accidentally escaping the model.
///
/// [`*const T`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html
/// [here]: #correct-usage
#[derive(Debug)]
pub struct ConstPtr<T: ?Sized> {
/// Drop guard representing the lifetime of the `ConstPtr`'s access.
_guard: rt::cell::Reading,
ptr: *const T,
}
/// A checked mutable raw pointer to an [`UnsafeCell`].
///
/// This type is essentially a [`*mut T`], but with the added ability to
/// participate in Loom's [`UnsafeCell`] access tracking. While a `MutPtr` to a
/// given [`UnsafeCell`] exists, Loom will track that the [`UnsafeCell`] is
/// being accessed mutably.
///
/// [`MutPtr`]s are produced by the [`UnsafeCell::get_mut`] method. The pointed
/// value can be accessed using [`MutPtr::deref`].
///
/// If an [`UnsafeCell`] is accessed mutably (by [`UnsafeCell::with_mut`] or
/// [`UnsafeCell::get_mut`]) or immutably (by [`UnsafeCell::with`] or
/// [`UnsafeCell::get`]) while a [`MutPtr`] to that cell exists, Loom will
/// detect the invalid accesses and panic.
///
/// Note that the cell is considered to be mutably accessed for *the entire
/// lifespan of the `MutPtr`*, not just when the `MutPtr` is actively
/// dereferenced.
///
/// # Safety
///
/// Although the `MutPtr` type is checked for concurrent access violations, it
/// is **still a raw pointer**. A `MutPtr` is not bound to the lifetime of the
/// [`UnsafeCell`] from which it was produced, and may outlive the cell. Loom
/// does *not* currently check for dangling pointers. Therefore, the user is
/// responsible for ensuring that a `MutPtr` does not dangle. However, unlike
/// a normal `*mut T`, `MutPtr`s may only be produced from a valid
/// [`UnsafeCell`], and therefore can be assumed to never be null.
///
/// Additionally, it is possible to write code in which raw pointers to an
/// [`UnsafeCell`] are constructed that are *not* checked by Loom. If a raw
/// pointer "escapes" Loom's tracking, invalid accesses may not be detected,
/// resulting in tests passing when they should have failed. See [here] for
/// details on how to avoid accidentally escaping the model.
///
/// [`*mut T`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html
/// [here]: #correct-usage
#[derive(Debug)]
pub struct MutPtr<T: ?Sized> {
/// Drop guard representing the lifetime of the `ConstPtr`'s access.
_guard: rt::cell::Writing,
ptr: *mut T,
}
impl<T> UnsafeCell<T> {
/// Constructs a new instance of `UnsafeCell` which will wrap the specified value.
#[track_caller]
pub fn new(data: T) -> UnsafeCell<T> {
let state = rt::Cell::new(location!());
UnsafeCell {
state,
data: std::cell::UnsafeCell::new(data),
}
}
/// Unwraps the value.
pub fn into_inner(self) -> T {
self.data.into_inner()
}
}
impl<T: ?Sized> UnsafeCell<T> {
/// Get an immutable pointer to the wrapped value.
///
/// # Panics
///
/// This function will panic if the access is not valid under the Rust memory
/// model.
#[track_caller]
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*const T) -> R,
{
let _reading = self.state.start_read(location!());
f(self.data.get() as *const T)
}
/// Get a mutable pointer to the wrapped value.
///
/// # Panics
///
/// This function will panic if the access is not valid under the Rust memory
/// model.
#[track_caller]
pub fn with_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(*mut T) -> R,
{
let _writing = self.state.start_write(location!());
f(self.data.get())
}
/// Get an immutable pointer to the wrapped value.
///
/// This function returns a [`ConstPtr`] guard, which is analogous to a
/// `*const T`, but tracked by Loom. As long as the returned `ConstPtr`
/// exists, Loom will consider the cell to be accessed immutably.
///
/// This means that any mutable accesses (e.g. calls to [`with_mut`] or
/// [`get_mut`]) while the returned guard is live will result in a panic.
///
/// # Panics
///
/// This function will panic if the access is not valid under the Rust memory
/// model.
///
/// [`with_mut`]: UnsafeCell::with_mut
/// [`get_mut`]: UnsafeCell::get_mut
#[track_caller]
pub fn get(&self) -> ConstPtr<T> {
ConstPtr {
_guard: self.state.start_read(location!()),
ptr: self.data.get(),
}
}
/// Get a mutable pointer to the wrapped value.
///
/// This function returns a [`MutPtr`] guard, which is analogous to a
/// `*mut T`, but tracked by Loom. As long as the returned `MutPtr`
/// exists, Loom will consider the cell to be accessed mutably.
///
/// This means that any concurrent mutable or immutable accesses (e.g. calls
/// to [`with`], [`with_mut`], [`get`], or [`get_mut`]) while the returned
/// guard is live will result in a panic.
///
/// # Panics
///
/// This function will panic if the access is not valid under the Rust memory
/// model.
///
/// [`with`]: UnsafeCell::with
/// [`with_mut`]: UnsafeCell::with_mut
/// [`get`]: UnsafeCell::get
/// [`get_mut`]: UnsafeCell::get_mut
#[track_caller]
pub fn get_mut(&self) -> MutPtr<T> {
MutPtr {
_guard: self.state.start_write(location!()),
ptr: self.data.get(),
}
}
}
impl<T: Default> Default for UnsafeCell<T> {
fn default() -> UnsafeCell<T> {
UnsafeCell::new(Default::default())
}
}
impl<T> From<T> for UnsafeCell<T> {
fn from(src: T) -> UnsafeCell<T> {
UnsafeCell::new(src)
}
}
impl<T: ?Sized> ConstPtr<T> {
/// Dereference the raw pointer.
///
/// # Safety
///
/// This is equivalent to dereferencing a `*const T` pointer, so all the
/// same safety considerations apply here.
///
///
/// Because the `ConstPtr` type can only be created by calling
/// [`UnsafeCell::get_mut`] on a valid `UnsafeCell`, we know the pointer
/// will never be null.
///
/// Loom tracks whether the value contained in the [`UnsafeCell`] from which
/// this pointer originated is being concurrently accessed, and will panic
/// if a data race could occur. However, `loom` does _not_ track liveness
/// --- the [`UnsafeCell`] this pointer points to may have been dropped.
/// Therefore, the caller is responsible for ensuring this pointer is not
/// dangling.
///
pub unsafe fn deref(&self) -> &T {
&*self.ptr
}
/// Perform an operation with the actual value of the raw pointer.
///
/// This may be used to call functions like [`ptr::read]` and [`ptr::eq`],
/// which are not exposed by the `ConstPtr` type, cast the pointer to an
/// integer, et cetera.
///
/// # Correct Usage
///
/// Note that the raw pointer passed into the closure *must not* be moved
/// out of the closure, as doing so will allow it to "escape" Loom's ability
/// to track accesses.
///
/// Loom considers the [`UnsafeCell`] from which this pointer originated to
/// be "accessed immutably" as long as the [`ConstPtr`] guard exists. When the
/// guard is dropped, Loom considers the immutable access to have ended. This
/// means that if the `*const T` passed to a `with` closure is moved _out_ of
/// that closure, it may outlive the guard, and thus exist past the end of
/// the access (as understood by Loom).
///
/// For example, code like this is incorrect:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
/// let cell = UnsafeCell::new(1);
///
/// let ptr = {
/// let tracked_ptr = cell.get(); // tracked immutable access begins here
///
/// // move the real pointer out of the simulated pointer
/// tracked_ptr.with(|real_ptr| real_ptr)
///
/// }; // tracked immutable access *ends here* (when the tracked pointer is dropped)
///
/// // now, another thread can mutate the value *without* loom knowing it is being
/// // accessed concurrently by this thread! this is BAD NEWS --- loom would have
/// // failed to detect a potential data race!
/// unsafe { println!("{}", (*ptr)) }
/// # })
/// ```
///
/// More subtly, if a *new* pointer is constructed from the original
/// pointer, that pointer is not tracked by Loom, either. This might occur
/// when constructing a pointer to a struct field or array index. For
/// example, this is incorrect:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
///
/// struct MyStruct {
/// foo: usize,
/// bar: usize,
/// }
///
/// let my_struct = UnsafeCell::new(MyStruct { foo: 1, bar: 1});
///
/// fn get_bar(cell: &UnsafeCell<MyStruct>) -> *const usize {
/// let tracked_ptr = cell.get(); // tracked immutable access begins here
///
/// tracked_ptr.with(|ptr| unsafe {
/// &(*ptr).bar as *const usize
/// })
/// } // tracked access ends here, when `tracked_ptr` is dropped
///
///
/// // now, a pointer to `mystruct.bar` exists that Loom is not aware of!
/// // if another thread were to mutate `mystruct.bar` while we are holding this
/// // pointer, Loom would fail to detect the data race!
/// let ptr_to_bar = get_bar(&my_struct);
/// # })
/// ```
///
/// Similarly, constructing pointers via pointer math (such as [`offset`])
/// may also escape Loom's ability to track accesses.
///
/// Furthermore, the raw pointer passed to the `with` closure may only be passed
/// into function calls that don't take ownership of that pointer past the
/// end of the function call. Therefore, code like this is okay:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
///
/// let cell = UnsafeCell::new(1);
///
/// let ptr = cell.get();
/// let value_in_cell = ptr.with(|ptr| unsafe {
/// // This is fine, because `ptr::read` does not retain ownership of
/// // the pointer after when the function call returns.
/// std::ptr::read(ptr)
/// });
/// # })
/// ```
///
/// But code like *this* is not okay:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
/// use std::ptr;
///
/// struct ListNode<T> {
/// value: *const T,
/// next: *const ListNode<T>,
/// }
///
/// impl<T> ListNode<T> {
/// fn new(value: *const T) -> Box<Self> {
/// Box::new(ListNode {
/// value, // the pointer is moved into the `ListNode`, which will outlive this function!
/// next: ptr::null::<ListNode<T>>(),
/// })
/// }
/// }
///
/// let cell = UnsafeCell::new(1);
///
/// let ptr = cell.get(); // immutable access begins here
///
/// // the pointer passed into `ListNode::new` will outlive the function call
/// let node = ptr.with(|ptr| ListNode::new(ptr));
///
/// drop(ptr); // immutable access ends here
///
/// // loom doesn't know that the cell can still be accessed via the `ListNode`!
/// # })
/// ```
///
/// Finally, the `*const T` passed to `with` should *not* be cast to an
/// `*mut T`. This will permit untracked mutable access, as Loom only tracks
/// the existence of a `ConstPtr` as representing an immutable access.
///
/// [`ptr::read`]: std::ptr::read
/// [`ptr::eq`]: std::ptr::eq
/// [`offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*const T) -> R,
{
f(self.ptr)
}
}
impl<T: ?Sized> MutPtr<T> {
/// Dereference the raw pointer.
///
/// # Safety
///
/// This is equivalent to dereferencing a `*mut T` pointer, so all the same
/// safety considerations apply here.
///
/// Because the `MutPtr` type can only be created by calling
/// [`UnsafeCell::get_mut`] on a valid `UnsafeCell`, we know the pointer
/// will never be null.
///
/// Loom tracks whether the value contained in the [`UnsafeCell`] from which
/// this pointer originated is being concurrently accessed, and will panic
/// if a data race could occur. However, `loom` does _not_ track liveness
/// --- the [`UnsafeCell`] this pointer points to may have been dropped.
/// Therefore, the caller is responsible for ensuring this pointer is not
/// dangling.
///
// Clippy knows that it's Bad and Wrong to construct a mutable reference
// from an immutable one...but this function is intended to simulate a raw
// pointer, so we have to do that here.
#[allow(clippy::mut_from_ref)]
pub unsafe fn deref(&self) -> &mut T {
&mut *self.ptr
}
/// Perform an operation with the actual value of the raw pointer.
///
/// This may be used to call functions like [`ptr::write`], [`ptr::read]`,
/// and [`ptr::eq`], which are not exposed by the `MutPtr` type, cast the
/// pointer to an integer, et cetera.
///
/// # Correct Usage
///
/// Note that the raw pointer passed into the closure *must not* be moved
/// out of the closure, as doing so will allow it to "escape" Loom's ability
/// to track accesses.
///
/// Loom considers the [`UnsafeCell`] from which this pointer originated to
/// be "accessed mutably" as long as the [`MutPtr`] guard exists. When the
/// guard is dropped, Loom considers the mutable access to have ended. This
/// means that if the `*mut T` passed to a `with` closure is moved _out_ of
/// that closure, it may outlive the guard, and thus exist past the end of
/// the mutable access (as understood by Loom).
///
/// For example, code like this is incorrect:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
/// let cell = UnsafeCell::new(1);
///
/// let ptr = {
/// let tracked_ptr = cell.get_mut(); // tracked mutable access begins here
///
/// // move the real pointer out of the simulated pointer
/// tracked_ptr.with(|real_ptr| real_ptr)
///
/// }; // tracked mutable access *ends here* (when the tracked pointer is dropped)
///
/// // now, we can mutate the value *without* loom knowing it is being mutably
/// // accessed! this is BAD NEWS --- if the cell was being accessed concurrently,
/// // loom would have failed to detect the error!
/// unsafe { (*ptr) = 2 }
/// # })
/// ```
///
/// More subtly, if a *new* pointer is constructed from the original
/// pointer, that pointer is not tracked by Loom, either. This might occur
/// when constructing a pointer to a struct field or array index. For
/// example, this is incorrect:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
///
/// struct MyStruct {
/// foo: usize,
/// bar: usize,
/// }
///
/// let my_struct = UnsafeCell::new(MyStruct { foo: 1, bar: 1});
///
/// fn get_bar(cell: &UnsafeCell<MyStruct>) -> *mut usize {
/// let tracked_ptr = cell.get_mut(); // tracked mutable access begins here
///
/// tracked_ptr.with(|ptr| unsafe {
/// &mut (*ptr).bar as *mut usize
/// })
/// } // tracked mutable access ends here, when `tracked_ptr` is dropped
///
///
/// // now, a pointer to `mystruct.bar` exists that Loom is not aware of!
/// // if we were to mutate `mystruct.bar` through this pointer while another
/// // thread was accessing `mystruct` concurrently, Loom would fail to detect
/// /// this.
/// let ptr_to_bar = get_bar(&my_struct);
/// # })
/// ```
///
/// Similarly, constructing pointers via pointer math (such as [`offset`])
/// may also escape Loom's ability to track accesses.
///
/// Finally, the raw pointer passed to the `with` closure may only be passed
/// into function calls that don't take ownership of that pointer past the
/// end of the function call. Therefore, code like this is okay:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
///
/// let cell = UnsafeCell::new(1);
///
/// let ptr = cell.get_mut();
/// let value_in_cell = ptr.with(|ptr| unsafe {
/// // This is fine, because `ptr::write` does not retain ownership of
/// // the pointer after when the function call returns.
/// std::ptr::write(ptr, 2)
/// });
/// # })
/// ```
///
/// But code like *this* is not okay:
///
/// ```rust
/// # loom::model(|| {
/// use loom::cell::UnsafeCell;
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// static SOME_IMPORTANT_POINTER: AtomicPtr<usize> = AtomicPtr::new(std::ptr::null_mut());
///
/// fn mess_with_important_pointer(cell: &UnsafeCell<usize>) {
/// cell.get_mut() // mutable access begins here
/// .with(|ptr| {
/// SOME_IMPORTANT_POINTER.store(ptr, Ordering::SeqCst);
/// })
/// } // mutable access ends here
///
/// // loom doesn't know that the cell can still be accessed via the `AtomicPtr`!
/// # })
/// ```
///
/// [`ptr::write`]: std::ptr::write
/// [`ptr::read`]: std::ptr::read
/// [`ptr::eq`]: std::ptr::eq
/// [`offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*mut T) -> R,
{
f(self.ptr)
}
}

View file

@ -0,0 +1,67 @@
use crate::rt;
use crate::thread;
use std::sync::Mutex;
use std::task::Waker;
/// Mock implementation of `tokio::sync::AtomicWaker`.
#[derive(Debug)]
pub struct AtomicWaker {
waker: Mutex<Option<Waker>>,
object: rt::Mutex,
}
impl AtomicWaker {
/// Create a new instance of `AtomicWaker`.
pub fn new() -> AtomicWaker {
AtomicWaker {
waker: Mutex::new(None),
object: rt::Mutex::new(false),
}
}
/// Registers the current task to be notified on calls to `wake`.
#[track_caller]
pub fn register(&self, waker: Waker) {
if dbg!(!self.object.try_acquire_lock(location!())) {
waker.wake();
// yield the task and try again... this is a spin lock.
thread::yield_now();
return;
}
*self.waker.lock().unwrap() = Some(waker);
dbg!(self.object.release_lock());
}
/// Registers the current task to be woken without consuming the value.
pub fn register_by_ref(&self, waker: &Waker) {
self.register(waker.clone());
}
/// Notifies the task that last called `register`.
pub fn wake(&self) {
if let Some(waker) = self.take_waker() {
waker.wake();
}
}
/// Attempts to take the `Waker` value out of the `AtomicWaker` with the
/// intention that the caller will wake the task later.
#[track_caller]
pub fn take_waker(&self) -> Option<Waker> {
dbg!(self.object.acquire_lock(location!()));
let ret = self.waker.lock().unwrap().take();
dbg!(self.object.release_lock());
ret
}
}
impl Default for AtomicWaker {
fn default() -> Self {
AtomicWaker::new()
}
}

View file

@ -0,0 +1,78 @@
//! Future related synchronization primitives.
mod atomic_waker;
pub use self::atomic_waker::AtomicWaker;
use crate::rt;
use crate::sync::Arc;
use pin_utils::pin_mut;
use std::future::Future;
use std::mem;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
/// Block the current thread, driving `f` to completion.
#[track_caller]
pub fn block_on<F>(f: F) -> F::Output
where
F: Future,
{
pin_mut!(f);
let notify = Arc::new(rt::Notify::new(false, true));
let waker = unsafe {
mem::ManuallyDrop::new(Waker::from_raw(RawWaker::new(
&*notify as *const _ as *const (),
waker_vtable(),
)))
};
let mut cx = Context::from_waker(&waker);
loop {
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {}
}
notify.wait(location!());
}
}
pub(super) fn waker_vtable() -> &'static RawWakerVTable {
&RawWakerVTable::new(
clone_arc_raw,
wake_arc_raw,
wake_by_ref_arc_raw,
drop_arc_raw,
)
}
unsafe fn increase_refcount(data: *const ()) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<rt::Notify>::from_raw(data as *const _));
// Now increase refcount, but don't drop new refcount either
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}
unsafe fn clone_arc_raw(data: *const ()) -> RawWaker {
increase_refcount(data);
RawWaker::new(data, waker_vtable())
}
unsafe fn wake_arc_raw(data: *const ()) {
let notify: Arc<rt::Notify> = Arc::from_raw(data as *const _);
notify.notify(location!());
}
unsafe fn wake_by_ref_arc_raw(data: *const ()) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<rt::Notify>::from_raw(data as *const _));
arc.notify(location!());
}
unsafe fn drop_arc_raw(data: *const ()) {
drop(Arc::<rt::Notify>::from_raw(data as *const _))
}

28
third-party/vendor/loom/src/hint.rs vendored Normal file
View file

@ -0,0 +1,28 @@
//! Mocked versions of [`std::hint`] functions.
/// Signals the processor that it is entering a busy-wait spin-loop.
pub fn spin_loop() {
crate::sync::atomic::spin_loop_hint();
}
/// Informs the compiler that this point in the code is not reachable, enabling
/// further optimizations.
///
/// This is a mocked version of the standard library's
/// [`std::hint::unreachable_unchecked`]. Loom's wrapper of this function
/// unconditionally panics.
///
/// # Safety
///
/// Technically, this function is safe to call (unlike the standard library's
/// version), as it always panics rather than invoking UB. However, this
/// function is marked as `unsafe` because it's intended to be used as a
/// simulated version of [`std::hint::unreachable_unchecked`], which is unsafe.
///
/// See [the documentation for
/// `std::hint::unreachable_unchecked`](`std::hint::unreachable_unchecked#Safety)
/// for safety details.
#[track_caller]
pub unsafe fn unreachable_unchecked() -> ! {
unreachable!("unreachable_unchecked was reached!");
}

View file

@ -0,0 +1,91 @@
//! Mock implementation of the `lazy_static` crate.
use crate::rt;
pub use crate::rt::thread::AccessError;
pub use crate::rt::yield_now;
use crate::sync::atomic::Ordering;
pub use std::thread::panicking;
use std::fmt;
use std::marker::PhantomData;
/// Mock implementation of `lazy_static::Lazy`.
pub struct Lazy<T> {
// Sadly, these fields have to be public, since function pointers in const
// fns are unstable. When fn pointer arguments to const fns stabilize, these
// should be made private and replaced with a `const fn new`.
//
// User code should not rely on the existence of these fields.
#[doc(hidden)]
pub init: fn() -> T,
#[doc(hidden)]
pub _p: PhantomData<fn(T)>,
}
impl<T: 'static> Lazy<T> {
/// Mock implementation of `lazy_static::Lazy::get`.
pub fn get(&'static self) -> &'static T {
// This is not great. Specifically, we're returning a 'static reference to a value that
// only lives for the duration of the execution. Unfortunately, the semantics of lazy
// static is that, well, you get a value that is in fact 'static. If we did not provide
// that, then this replacement wouldn't actually work.
//
// The "upside" here is that _if_ the code compiled with `lazy_static::lazy_static!`,
// _then_ this is safe. That's not super satisfying, but I'm not sure how we do better
// without changing the API pretty drastically. We could perhaps here provide a
// `with(closure)` like we do for `UnsafeCell`, and require people to wrap the "real"
// `lazy_static` the same way, but that seems like its own kind of unfortunate as I'm sure
// users sometimes _rely_ on the returned reference being 'static. If we provided something
// that used a closure to give the user a non-`'static` reference, we wouldn't be all that
// much further along.
match unsafe { self.try_get() } {
Some(v) => v,
None => {
// Init the value out of the `rt::execution`
let sv = crate::rt::lazy_static::StaticValue::new((self.init)());
// While calling init, we may have yielded to the scheduler, in which case some
// _other_ thread may have initialized the static. The real lazy_static does not
// have this issue, since it takes a lock before initializing the new value, and
// readers wait on that lock if they encounter it. We could implement that here
// too, but for simplicity's sake, we just do another try_get here for now.
if let Some(v) = unsafe { self.try_get() } {
return v;
}
rt::execution(|execution| {
let sv = execution.lazy_statics.init_static(self, sv);
// lazy_static uses std::sync::Once, which does a swap(AcqRel) to set
sv.sync.sync_store(&mut execution.threads, Ordering::AcqRel);
});
unsafe { self.try_get() }.expect("bug")
}
}
}
unsafe fn try_get(&'static self) -> Option<&'static T> {
unsafe fn transmute_lt<'a, 'b, T>(t: &'a T) -> &'b T {
std::mem::transmute::<&'a T, &'b T>(t)
}
let sv = rt::execution(|execution| {
let sv = execution.lazy_statics.get_static(self)?;
// lazy_static uses std::sync::Once, which does a load(Acquire) to get
sv.sync.sync_load(&mut execution.threads, Ordering::Acquire);
Some(transmute_lt(sv))
})?;
Some(sv.get::<T>())
}
}
impl<T: 'static> fmt::Debug for Lazy<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Lazy { .. }")
}
}

473
third-party/vendor/loom/src/lib.rs vendored Normal file
View file

@ -0,0 +1,473 @@
#![deny(missing_debug_implementations, missing_docs, rust_2018_idioms)]
#![cfg_attr(docsrs, feature(doc_cfg))]
//! Loom is a tool for testing concurrent programs.
//!
//! At a high level, it runs tests many times, permuting the possible concurrent executions of each
//! test according to what constitutes valid executions under the [C11 memory model][spec]. It then
//! uses state reduction techniques to avoid combinatorial explosion of the number of possible
//! executions.
//!
//! # Background
//!
//! Testing concurrent programs is challenging; concurrent strands of execution can interleave in
//! all sorts of ways, and each such interleaving might expose a concurrency bug in the program.
//! Some bugs may be so rare that they only occur under a very small set of possible executions,
//! and may not surface even if you run the code millions or billions of times.
//!
//! Loom provides a way to deterministically explore the various possible execution permutations
//! without relying on random executions. This allows you to write tests that verify that your
//! concurrent code is correct under _all_ executions, not just "most of the time".
//!
//! Consider a simple example:
//!
//! ```no_run
//! use std::sync::Arc;
//! use std::sync::atomic::AtomicUsize;
//! use std::sync::atomic::Ordering::SeqCst;
//! use std::thread;
//!
//! # /*
//! #[test]
//! # */
//! fn test_concurrent_logic() {
//! let v1 = Arc::new(AtomicUsize::new(0));
//! let v2 = v1.clone();
//!
//! thread::spawn(move || {
//! v1.store(1, SeqCst);
//! });
//!
//! assert_eq!(0, v2.load(SeqCst));
//! }
//! ```
//!
//! This program is incorrect: the main thread might yield between spawning the thread that stores
//! to `v1` and loading from `v2`, during which time the spawned thread may get to run and store 1
//! into `v1`. **Most** of the time, the main thread will get to the assertion before the spawned
//! thread executes, so the assertion will succeed. But, every once in a while, the spawned thread
//! manages to run just in time and the assertion will fail! This is obviously a contrived example,
//! but in practice many concurrent programs exhibit similar behavior -- they operate correctly
//! under most executions, but _some_ executions end up producing buggy behavior.
//!
//! Historically, the strategy for testing concurrent code has been to run tests in loops and hope
//! that an execution fails. Or to place the testing host under load while running the test suite
//! in an attempt to produce less frequently exercised executions. However, this kind of testing is
//! not reliable, and, in the event an iteration should fail, debugging the cause is exceedingly
//! difficult.
//!
//! The problem is compounded when other memory orderings than `SeqCst` are considered, where bugs
//! may only occur on hardware with particular memory characteristics, and thus **no** amount of
//! iteration will demonstrate the bug on different hardware!
//!
//! # Solution
//!
//! Loom fixes the problem by simulating the operating system's scheduler and Rust's memory model
//! such that all possible valid behaviors are explored and tested. To see how this works out in
//! practice, the above example can be rewritten to use loom's concurrency types as:
//!
//! ```no_run
//! use loom::sync::atomic::AtomicUsize;
//! use loom::thread;
//!
//! use std::sync::Arc;
//! use std::sync::atomic::Ordering::SeqCst;
//!
//! # /*
//! #[test]
//! # */
//! fn test_concurrent_logic() {
//! loom::model(|| {
//! let v1 = Arc::new(AtomicUsize::new(0));
//! let v2 = v1.clone();
//!
//! thread::spawn(move || {
//! v1.store(1, SeqCst);
//! });
//!
//! assert_eq!(0, v2.load(SeqCst));
//! });
//! }
//! ```
//!
//! Loom will run the closure provided to `loom::model` many times over, and each time a different
//! thread scheduling will be used. That is, one execution will have the spawned thread run after
//! the load from `v2`, and another will have the spawned thread run before the store to `v2`.
//! Thus, the test is guaranteed to fail.
//!
//! # Writing tests
//!
//! Test cases using loom must be fully determinstic. All sources of non-determism must be via loom
//! types so that loom can expose different possible values on each execution of the test closure.
//! Other sources of non-determinism like random number generation or system calls cannot be
//! modeled directly by loom, and must be mocked to be testable by loom.
//!
//! To model synchronization non-determinism, tests must use the loom synchronization types, such
//! as [`Atomic*`](sync::atomic), [`Mutex`](sync::Mutex), [`RwLock`](sync::RwLock),
//! [`Condvar`](sync::Condvar), as well as other concurrency primitives like [`thread::spawn`],
//! [`UnsafeCell`](cell::UnsafeCell), and [`lazy_static!`]. However, when **not** running loom
//! tests, the `std` should be used, since the loom runtime won't be active. This means that
//! library code will need to use conditional compilation to decide which types to use.
//!
//! It is recommended to use a `loom` cfg flag to signal using the loom types. You can do this by
//! passing `RUSTFLAGS="--cfg loom"` as part of the command when you want to run the loom tests.
//! Then modify your `Cargo.toml` to include loom like this:
//!
//! ```toml
//! [target.'cfg(loom)'.dependencies]
//! loom = "0.5"
//! ```
//!
//! One common strategy to use the right types with and without loom is to create a module in your
//! crate named `sync` or any other name of your choosing. In this module, list out the types that
//! need to be toggled between loom and `std`:
//!
//! ```
//! #[cfg(loom)]
//! pub(crate) use loom::sync::atomic::AtomicUsize;
//!
//! #[cfg(not(loom))]
//! pub(crate) use std::sync::atomic::AtomicUsize;
//! ```
//!
//! Then, elsewhere in the library:
//!
//! ```ignore
//! use crate::sync::AtomicUsize;
//! ```
//!
//! ## Handling Loom API differences.
//!
//! Most of loom's type are drop-in replacements for their counterpart in `std`, but sometimes
//! there are minor API differences that you must work around. If your library must use Loom APIs
//! that differ from `std` types, then the library will be required to implement those APIs for
//! `std`. For example, for `UnsafeCell`, in the library's source, add the following:
//!
//! ```
//! #![cfg(not(loom))]
//!
//! #[derive(Debug)]
//! pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>);
//!
//! impl<T> UnsafeCell<T> {
//! pub(crate) fn new(data: T) -> UnsafeCell<T> {
//! UnsafeCell(std::cell::UnsafeCell::new(data))
//! }
//!
//! pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R {
//! f(self.0.get())
//! }
//!
//! pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R {
//! f(self.0.get())
//! }
//! }
//! ```
//!
//! ## Yielding
//!
//! Some concurrent algorithms assume a fair scheduler. For example, a spin lock assumes that, at
//! some point, another thread will make enough progress for the lock to become available. This
//! presents a challenge for loom as its scheduler is, by design, not fair. It is specifically
//! trying to emulate every _possible_ execution, which may mean that another thread does not get
//! to run for a very long time (see also [Spinlocks Considered Harmful]). In such cases, loops
//! must include calls to [`loom::thread::yield_now`](thread::yield_now). This tells loom that
//! another thread needs to be scheduled in order for the current one to make progress.
//!
//! # Running Loom Tests
//!
//! Loom tests must be run separately, with `RUSTFLAGS="--cfg loom"` specified (assuming you went
//! with the `cfg` approach suggested above). For example, if the library includes a test file:
//! `tests/loom_my_struct.rs` that includes tests with [`loom::model`](mod@model), then run the
//! following command:
//!
//! ```console
//! RUSTFLAGS="--cfg loom" cargo test --test loom_my_struct --release
//! ```
//!
//! Note that you will generally want to run loom tests with `--release` since loom must execute
//! each test closure a large number of times, at which point the speed win from optimized code
//! makes a big difference.
//!
//! # Debugging Loom Failures
//!
//! Loom's deterministic execution allows the specific chain of events leading to a test failure
//! can be isolated for debugging. When a loom test fails, the first step is to isolate the exact
//! execution path that resulted in the failure. To do this, Loom is able to output the execution
//! path to a file. Two environment variables are useful for this process:
//!
//! - `LOOM_CHECKPOINT_FILE`
//! - `LOOM_CHECKPOINT_INTERVAL`
//!
//! The first specifies the file to write to and read from. The second specifies how often to write
//! to the file. If the execution fails on the 10,000,000th permutation, it is faster to write to a
//! file every 10,000 iterations instead of every single one.
//!
//! To isolate the exact failing path, first run the following command to generate the checkpoint
//! for the failing scenario:
//!
//! ```console
//! LOOM_CHECKPOINT_FILE=my_test.json [other env vars] \
//! cargo test --test loom_my_struct --release [failing test]
//! ```
//!
//! Then this to check that the next permutation indeed triggers the fault:
//!
//! ```console
//! LOOM_CHECKPOINT_INTERVAL=1 LOOM_CHECKPOINT_FILE=my_test.json [other env vars] \
//! cargo test --test loom_my_struct --release [failing test]
//! ```
//!
//! The test should fail on the first permutation, effectively isolating the failure
//! scenario.
//!
//! The next step is to enable additional log output for just the failing permutation. Again, there
//! are some environment variables for this:
//!
//! - `LOOM_LOG`
//! - `LOOM_LOCATION`
//!
//! The first environment variable, `LOOM_LOG`, outputs a marker on every thread switch. This helps
//! with tracing the exact steps in a threaded environment that results in the test failure.
//!
//! The second, `LOOM_LOCATION`, enables location tracking. This includes additional information in
//! panic messages that helps identify which specific field resulted in the error.
//!
//! Put together, the command becomes (yes, we know this is not great... but it works):
//!
//! ```console
//! LOOM_LOG=trace \
//! LOOM_LOCATION=1 \
//! LOOM_CHECKPOINT_INTERVAL=1 \
//! LOOM_CHECKPOINT_FILE=my_test.json \
//! RUSTFLAGS="--cfg loom" \
//! [other env vars] \
//! cargo test --test loom_my_struct --release [failing test]
//! ```
//!
//! This should provide you with a trace of all the concurrency events leading up to the failure,
//! which should allow you to identify how the bug is triggered.
//!
//! # Limitations and Caveats
//!
//! ## Intrusive Implementation
//!
//! Loom works by intercepting all loads, stores, and other concurrency-sensitive operations (like
//! spawning threads) that may trigger concurrency bugs in an applications. But this interception
//! is not automatic -- it requires that the code being tested specifically uses the loom
//! replacement types. Any code that does not use loom's replacement types is invisible to loom,
//! and thus won't be subject to the loom model's permutation.
//!
//! While it is relatively simple to utilize loom's types in a single crate through the root-level
//! `#[cfg(loom)] mod sync` approach suggested earlier, more complex use-cases may require the use
//! of a library that itself uses concurrent constructs like locks and channels. In such cases,
//! that library must _also_ be augmented to support loom to achieve complete execution coverage.
//!
//! Note that loom still works if some concurrent operations are hidden from it (for example, if
//! you use `std::sync::Arc` instead of `loom::sync::Arc`). It just means that loom won't be able
//! to reason about the interaction between those operations and the other concurrent operations in
//! your program, and thus certain executions that are possible in the real world won't be modeled.
//!
//! ## Large Models
//!
//! By default, loom runs an **exhaustive** check of your program's possible concurrent executions
//! where **all** possible interleavings are checked. Loom's state reduction algorithms (see
//! "Implementation" below) significantly reduce the state space that must be explored, but complex
//! models can still take **significant** time to complete.
//!
//! To handle such large models in a more reasonable amount of time, you may need to **not** run
//! an exhaustive check, and instead tell loom to prune out interleavings that are unlikely to
//! reveal additional bugs. You do this by providing loom with a _thread pre-emption bound_. If you
//! set such a bound, loom will check all possible executions that include **at most** `n` thread
//! pre-emptions (where one thread is forcibly stopped and another one runs in its place. **In
//! practice, setting the thread pre-emption bound to 2 or 3 is enough to catch most bugs** while
//! significantly reducing the number of possible executions.
//!
//! To set the thread pre-emption bound, set the `LOOM_MAX_PREEMPTIONS` environment
//! variable when running tests (or set
//! [`Builder::preemption_bound`](model::Builder::preemption_bound)). For example:
//!
//! ```console
//! LOOM_MAX_PREEMPTIONS=3 RUSTFLAGS="--cfg loom" cargo test --test loom_my_struct --release
//! ```
//!
//! ## Relaxed Memory Ordering
//!
//! The [`Relaxed` memory ordering](std::sync::atomic::Ordering::Relaxed) allows particularly
//! strange executions. For example, in the following code snippet, it is [completely
//! legal][spec-relaxed] for `r1 == r2 == 42`!
//!
//! ```rust,no_run
//! # use std::sync::atomic::{AtomicUsize, Ordering};
//! # use std::thread;
//! # let x: &'static _ = Box::leak(Box::new(AtomicUsize::new(0)));
//! # let y: &'static _ = Box::leak(Box::new(AtomicUsize::new(0)));
//! thread::spawn(move || {
//! let r1 = y.load(Ordering::Relaxed); // A
//! x.store(r1, Ordering::Relaxed); // B
//! });
//! thread::spawn(move || {
//! let r2 = x.load(Ordering::Relaxed); // C
//! y.store(42, Ordering::Relaxed); // D
//! });
//! ```
//!
//! Unfortunately, it is not possible for loom to completely model all the interleavings that
//! relaxed memory ordering allows. This is because the relaxed memory ordering allows memory
//! operations to be re-ordered within a single thread -- B can run *before* A -- which loom cannot
//! emulate. The same restriction applies to certain reorderings that are possible across different
//! atomic variables with other memory orderings, and means that there are certain concurrency bugs
//! that loom cannot catch.
//!
//! ## Combinatorial Explosion with Many Threads
//!
//! The number of possible execution interleavings grows exponentially with the number of threads,
//! as each possible execution of each additional thread must be taken into account for each
//! possible execution of the current threads. Loom mitigates this to an extent by reducing the
//! state space (see "Implementation" below) through _equivalent execution elimination_. For
//! example, if two threads **read** from the same atomic variable, loom does not attempt another
//! execution given that the order in which two threads read from the same atomic cannot impact the
//! execution.
//!
//! However, even with equivalent execution elimination, the number of possible executions grows
//! significantly with each new thread, to the point where checking becomes infeasible. Loom
//! therefore specifically limits the number of threads it will model (see [`MAX_THREADS`]), and
//! tailors its implementation to that limit.
//!
//! # Implementation
//!
//! Loom is an implementation of techniques described in [CDSChecker: Checking Concurrent Data
//! Structures Written with C/C++ Atomics][cdschecker]. Please see the paper for much more detail
//! on equivalent execution elimination and the other techniques loom uses to accurately model the
//! [C11 memory model][spec].
//!
//! [spec]: https://en.cppreference.com/w/cpp/atomic/memory_order
//! [spec-relaxed]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
//! [Spinlocks Considered Harmful]: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html
//! [cdschecker]: http://demsky.eecs.uci.edu/publications/c11modelcheck.pdf
macro_rules! if_futures {
($($t:tt)*) => {
cfg_if::cfg_if! {
if #[cfg(feature = "futures")] {
#[cfg_attr(docsrs, doc(cfg(feature = "futures")))]
$($t)*
}
}
}
}
macro_rules! dbg {
($($t:tt)*) => {
$($t)*
};
}
#[macro_use]
mod rt;
// Expose for documentation purposes.
pub use rt::MAX_THREADS;
pub mod alloc;
pub mod cell;
pub mod hint;
pub mod lazy_static;
pub mod model;
pub mod sync;
pub mod thread;
#[doc(inline)]
pub use crate::model::model;
if_futures! {
pub mod future;
}
/// Mock version of `std::thread_local!`.
// This is defined *after* all other code in `loom`, since we use
// `scoped_thread_local!` internally, which uses the `std::thread_local!` macro
// without namespacing it. Defining this after all other `loom` modules
// prevents internal code from accidentally using the mock thread local instead
// of the real one.
#[macro_export]
macro_rules! thread_local {
// empty (base case for the recursion)
() => {};
// process multiple declarations
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => (
$crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
$crate::thread_local!($($rest)*);
);
// handle a single declaration
($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => (
$crate::__thread_local_inner!($(#[$attr])* $vis $name, $t, $init);
);
}
/// Mock version of `lazy_static::lazy_static!`.
#[macro_export]
macro_rules! lazy_static {
($(#[$attr:meta])* static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => {
// use `()` to explicitly forward the information about private items
$crate::__lazy_static_internal!($(#[$attr])* () static ref $N : $T = $e; $($t)*);
};
($(#[$attr:meta])* pub static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => {
$crate::__lazy_static_internal!($(#[$attr])* (pub) static ref $N : $T = $e; $($t)*);
};
($(#[$attr:meta])* pub ($($vis:tt)+) static ref $N:ident : $T:ty = $e:expr; $($t:tt)*) => {
$crate::__lazy_static_internal!($(#[$attr])* (pub ($($vis)+)) static ref $N : $T = $e; $($t)*);
};
() => ()
}
#[macro_export]
#[doc(hidden)]
macro_rules! __thread_local_inner {
($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => {
$(#[$attr])* $vis static $name: $crate::thread::LocalKey<$t> =
$crate::thread::LocalKey {
init: (|| { $init }) as fn() -> $t,
_p: std::marker::PhantomData,
};
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! __lazy_static_internal {
// optional visibility restrictions are wrapped in `()` to allow for
// explicitly passing otherwise implicit information about private items
($(#[$attr:meta])* ($($vis:tt)*) static ref $N:ident : $T:ty = $init:expr; $($t:tt)*) => {
#[allow(missing_copy_implementations)]
#[allow(non_camel_case_types)]
#[allow(dead_code)]
$(#[$attr])*
$($vis)* struct $N {__private_field: ()}
#[doc(hidden)]
$($vis)* static $N: $N = $N {__private_field: ()};
impl ::core::ops::Deref for $N {
type Target = $T;
// this and the two __ functions below should really also be #[track_caller]
fn deref(&self) -> &$T {
#[inline(always)]
fn __static_ref_initialize() -> $T { $init }
#[inline(always)]
fn __stability() -> &'static $T {
static LAZY: $crate::lazy_static::Lazy<$T> =
$crate::lazy_static::Lazy {
init: __static_ref_initialize,
_p: std::marker::PhantomData,
};
LAZY.get()
}
__stability()
}
}
$crate::lazy_static!($($t)*);
};
() => ()
}

270
third-party/vendor/loom/src/model.rs vendored Normal file
View file

@ -0,0 +1,270 @@
//! Model concurrent programs.
use crate::rt::{self, Execution, Scheduler};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tracing::{info, subscriber};
use tracing_subscriber::{fmt, EnvFilter};
const DEFAULT_MAX_THREADS: usize = 4;
const DEFAULT_MAX_BRANCHES: usize = 1_000;
/// Configure a model
#[derive(Debug)]
#[non_exhaustive] // Support adding more fields in the future
pub struct Builder {
/// Max number of threads to check as part of the execution.
///
/// This should be set as low as possible and must be less than
/// [`MAX_THREADS`](crate::MAX_THREADS).
pub max_threads: usize,
/// Maximum number of thread switches per permutation.
///
/// Defaults to `LOOM_MAX_BRANCHES` environment variable.
pub max_branches: usize,
/// Maximum number of permutations to explore.
///
/// Defaults to `LOOM_MAX_PERMUTATIONS` environment variable.
pub max_permutations: Option<usize>,
/// Maximum amount of time to spend on checking
///
/// Defaults to `LOOM_MAX_DURATION` environment variable.
pub max_duration: Option<Duration>,
/// Maximum number of thread preemptions to explore
///
/// Defaults to `LOOM_MAX_PREEMPTIONS` environment variable.
pub preemption_bound: Option<usize>,
/// When doing an exhaustive check, uses the file to store and load the
/// check progress
///
/// Defaults to `LOOM_CHECKPOINT_FILE` environment variable.
pub checkpoint_file: Option<PathBuf>,
/// How often to write the checkpoint file
///
/// Defaults to `LOOM_CHECKPOINT_INTERVAL` environment variable.
pub checkpoint_interval: usize,
/// When `true`, locations are captured on each loom operation.
///
/// Note that is is **very** expensive. It is recommended to first isolate a
/// failing iteration using `LOOM_CHECKPOINT_FILE`, then enable location
/// tracking.
///
/// Defaults to `LOOM_LOCATION` environment variable.
pub location: bool,
/// Log execution output to stdout.
///
/// Defaults to existence of `LOOM_LOG` environment variable.
pub log: bool,
}
impl Builder {
/// Create a new `Builder` instance with default values.
pub fn new() -> Builder {
use std::env;
let checkpoint_interval = env::var("LOOM_CHECKPOINT_INTERVAL")
.map(|v| {
v.parse()
.expect("invalid value for `LOOM_CHECKPOINT_INTERVAL`")
})
.unwrap_or(20_000);
let max_branches = env::var("LOOM_MAX_BRANCHES")
.map(|v| v.parse().expect("invalid value for `LOOM_MAX_BRANCHES`"))
.unwrap_or(DEFAULT_MAX_BRANCHES);
let location = env::var("LOOM_LOCATION").is_ok();
let log = env::var("LOOM_LOG").is_ok();
let max_duration = env::var("LOOM_MAX_DURATION")
.map(|v| {
let secs = v.parse().expect("invalid value for `LOOM_MAX_DURATION`");
Duration::from_secs(secs)
})
.ok();
let max_permutations = env::var("LOOM_MAX_PERMUTATIONS")
.map(|v| {
v.parse()
.expect("invalid value for `LOOM_MAX_PERMUTATIONS`")
})
.ok();
let preemption_bound = env::var("LOOM_MAX_PREEMPTIONS")
.map(|v| v.parse().expect("invalid value for `LOOM_MAX_PREEMPTIONS`"))
.ok();
let checkpoint_file = env::var("LOOM_CHECKPOINT_FILE")
.map(|v| v.parse().expect("invalid value for `LOOM_CHECKPOINT_FILE`"))
.ok();
Builder {
max_threads: DEFAULT_MAX_THREADS,
max_branches,
max_duration,
max_permutations,
preemption_bound,
checkpoint_file,
checkpoint_interval,
location,
log,
}
}
/// Set the checkpoint file.
pub fn checkpoint_file(&mut self, file: &str) -> &mut Self {
self.checkpoint_file = Some(file.into());
self
}
/// Check the provided model.
pub fn check<F>(&self, f: F)
where
F: Fn() + Sync + Send + 'static,
{
let mut i = 1;
let mut _span = tracing::info_span!("iter", message = i).entered();
let mut execution =
Execution::new(self.max_threads, self.max_branches, self.preemption_bound);
let mut scheduler = Scheduler::new(self.max_threads);
if let Some(ref path) = self.checkpoint_file {
if path.exists() {
execution.path = checkpoint::load_execution_path(path);
execution.path.set_max_branches(self.max_branches);
}
}
execution.log = self.log;
execution.location = self.location;
let f = Arc::new(f);
let start = Instant::now();
loop {
if i % self.checkpoint_interval == 0 {
info!(parent: None, "");
info!(
parent: None,
" ================== Iteration {} ==================", i
);
info!(parent: None, "");
if let Some(ref path) = self.checkpoint_file {
checkpoint::store_execution_path(&execution.path, path);
}
if let Some(max_permutations) = self.max_permutations {
if i >= max_permutations {
return;
}
}
if let Some(max_duration) = self.max_duration {
if start.elapsed() >= max_duration {
return;
}
}
}
let f = f.clone();
scheduler.run(&mut execution, move || {
f();
let lazy_statics = rt::execution(|execution| execution.lazy_statics.drop());
// drop outside of execution
drop(lazy_statics);
rt::thread_done();
});
execution.check_for_leaks();
i += 1;
// Create the next iteration's `tracing` span before trying to step to the next
// execution, as the `Execution` will capture the current span when
// it's reset.
_span = tracing::info_span!(parent: None, "iter", message = i).entered();
if let Some(next) = execution.step() {
execution = next;
} else {
info!(parent: None, "Completed in {} iterations", i - 1);
return;
}
}
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
/// Run all concurrent permutations of the provided closure.
///
/// Uses a default [`Builder`](crate::model::Builder) which can be affected
/// by environment variables.
pub fn model<F>(f: F)
where
F: Fn() + Sync + Send + 'static,
{
let subscriber = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_env("LOOM_LOG"))
.with_test_writer()
.without_time()
.finish();
subscriber::with_default(subscriber, || {
Builder::new().check(f);
});
}
#[cfg(feature = "checkpoint")]
mod checkpoint {
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
pub(crate) fn load_execution_path(fs_path: &Path) -> crate::rt::Path {
let mut file = File::open(fs_path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
serde_json::from_str(&contents).unwrap()
}
pub(crate) fn store_execution_path(path: &crate::rt::Path, fs_path: &Path) {
let serialized = serde_json::to_string(path).unwrap();
let mut file = File::create(fs_path).unwrap();
file.write_all(serialized.as_bytes()).unwrap();
}
}
#[cfg(not(feature = "checkpoint"))]
mod checkpoint {
use std::path::Path;
pub(crate) fn load_execution_path(_fs_path: &Path) -> crate::rt::Path {
panic!("not compiled with `checkpoint` feature")
}
pub(crate) fn store_execution_path(_path: &crate::rt::Path, _fs_path: &Path) {
panic!("not compiled with `checkpoint` feature")
}
}

View file

@ -0,0 +1,42 @@
use crate::rt::VersionVec;
#[derive(Debug, Clone)]
pub(crate) struct Access {
path_id: usize,
dpor_vv: VersionVec,
}
impl Access {
pub(crate) fn new(path_id: usize, version: &VersionVec) -> Access {
Access {
path_id,
dpor_vv: *version,
}
}
pub(crate) fn set(&mut self, path_id: usize, version: &VersionVec) {
self.path_id = path_id;
self.dpor_vv = *version;
}
pub(crate) fn set_or_create(access: &mut Option<Self>, path_id: usize, version: &VersionVec) {
if let Some(access) = access.as_mut() {
access.set(path_id, version);
} else {
*access = Some(Access::new(path_id, version));
}
}
/// Location in the path
pub(crate) fn path_id(&self) -> usize {
self.path_id
}
pub(crate) fn version(&self) -> &VersionVec {
&self.dpor_vv
}
pub(crate) fn happens_before(&self, version: &VersionVec) -> bool {
self.dpor_vv <= *version
}
}

95
third-party/vendor/loom/src/rt/alloc.rs vendored Normal file
View file

@ -0,0 +1,95 @@
use crate::rt;
use crate::rt::{object, Location};
use tracing::trace;
/// Tracks an allocation
#[derive(Debug)]
pub(crate) struct Allocation {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
is_dropped: bool,
allocated: Location,
}
/// Track a raw allocation
pub(crate) fn alloc(ptr: *mut u8, location: Location) {
rt::execution(|execution| {
let state = execution.objects.insert(State {
is_dropped: false,
allocated: location,
});
let allocation = Allocation { state };
trace!(?allocation.state, ?ptr, %location, "alloc");
let prev = execution.raw_allocations.insert(ptr as usize, allocation);
assert!(prev.is_none(), "pointer already tracked");
});
}
/// Track a raw deallocation
pub(crate) fn dealloc(ptr: *mut u8, location: Location) {
let allocation =
rt::execution(
|execution| match execution.raw_allocations.remove(&(ptr as usize)) {
Some(allocation) => {
trace!(state = ?allocation.state, ?ptr, %location, "dealloc");
allocation
}
None => panic!("pointer not tracked"),
},
);
// Drop outside of the `rt::execution` block
drop(allocation);
}
impl Allocation {
pub(crate) fn new(location: Location) -> Allocation {
rt::execution(|execution| {
let state = execution.objects.insert(State {
is_dropped: false,
allocated: location,
});
trace!(?state, %location, "Allocation::new");
Allocation { state }
})
}
}
impl Drop for Allocation {
#[track_caller]
fn drop(&mut self) {
let location = location!();
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
trace!(state = ?self.state, drop.location = %location, "Allocation::drop");
state.is_dropped = true;
});
}
}
impl State {
pub(super) fn check_for_leaks(&self, index: usize) {
if !self.is_dropped {
if self.allocated.is_captured() {
panic!(
"Allocation leaked.\n Allocated: {}\n Index: {}",
self.allocated, index
);
} else {
panic!("Allocation leaked.\n Index: {}", index);
}
}
}
}

210
third-party/vendor/loom/src/rt/arc.rs vendored Normal file
View file

@ -0,0 +1,210 @@
use crate::rt::object;
use crate::rt::{self, Access, Location, Synchronize, VersionVec};
use std::sync::atomic::Ordering::{Acquire, Release, SeqCst};
use tracing::trace;
#[derive(Debug)]
pub(crate) struct Arc {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// Reference count
ref_cnt: usize,
/// Location where the arc was allocated
allocated: Location,
/// Causality transfers between threads
///
/// Only updated on on ref dec and acquired before drop
synchronize: Synchronize,
/// Tracks access to the arc object
last_ref_inc: Option<Access>,
last_ref_dec: Option<Access>,
last_ref_inspect: Option<Access>,
last_ref_modification: Option<RefModify>,
}
/// Actions performed on the Arc
///
/// Clones are only dependent with inspections. Drops are dependent between each
/// other.
#[derive(Debug, Copy, Clone, PartialEq)]
pub(super) enum Action {
/// Clone the arc
RefInc,
/// Drop the Arc
RefDec,
/// Inspect internals (such as get ref count). This is done with SeqCst
/// causality
Inspect,
}
/// Actions which modify the Arc's reference count
///
/// This is used to ascertain dependence for Action::Inspect
#[derive(Debug, Copy, Clone, PartialEq)]
enum RefModify {
/// Corresponds to Action::RefInc
RefInc,
/// Corresponds to Action::RefDec
RefDec,
}
impl Arc {
pub(crate) fn new(location: Location) -> Arc {
rt::execution(|execution| {
let state = execution.objects.insert(State {
ref_cnt: 1,
allocated: location,
synchronize: Synchronize::new(),
last_ref_inc: None,
last_ref_dec: None,
last_ref_inspect: None,
last_ref_modification: None,
});
trace!(?state, %location, "Arc::new");
Arc { state }
})
}
pub(crate) fn ref_inc(&self, location: Location) {
self.branch(Action::RefInc, location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state.ref_cnt = state.ref_cnt.checked_add(1).expect("overflow");
trace!(state = ?self.state, ref_cnt = ?state.ref_cnt, %location, "Arc::ref_inc");
})
}
/// Validate a `get_mut` call
pub(crate) fn get_mut(&self, location: Location) -> bool {
self.branch(Action::RefDec, location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.ref_cnt >= 1, "Arc is released");
// Synchronize the threads
state.synchronize.sync_load(&mut execution.threads, Acquire);
let is_only_ref = state.ref_cnt == 1;
trace!(state = ?self.state, ?is_only_ref, %location, "Arc::get_mut");
is_only_ref
})
}
/// Returns true if the memory should be dropped.
pub(crate) fn ref_dec(&self, location: Location) -> bool {
self.branch(Action::RefDec, location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.ref_cnt >= 1, "Arc is already released");
// Decrement the ref count
state.ref_cnt -= 1;
trace!(state = ?self.state, ref_cnt = ?state.ref_cnt, %location, "Arc::ref_dec");
// Synchronize the threads.
state
.synchronize
.sync_store(&mut execution.threads, Release);
if state.ref_cnt == 0 {
// Final ref count, the arc will be dropped. This requires
// acquiring the causality
//
// In the real implementation, this is done with a fence.
state.synchronize.sync_load(&mut execution.threads, Acquire);
true
} else {
false
}
})
}
#[track_caller]
pub(crate) fn strong_count(&self) -> usize {
self.branch(Action::Inspect, location!());
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.ref_cnt > 0, "Arc is already released");
// Synchronize the threads.
state.synchronize.sync_load(&mut execution.threads, SeqCst);
state.ref_cnt
})
}
fn branch(&self, action: Action, location: Location) {
let r = self.state;
r.branch_action(action, location);
assert!(
r.ref_eq(self.state),
"Internal state mutated during branch. This is \
usually due to a bug in the algorithm being tested writing in \
an invalid memory location."
);
}
}
impl State {
pub(super) fn check_for_leaks(&self, index: usize) {
if self.ref_cnt != 0 {
if self.allocated.is_captured() {
panic!(
"Arc leaked.\n Allocated: {}\n Index: {}",
self.allocated, index
);
} else {
panic!("Arc leaked.\n Index: {}", index);
}
}
}
pub(super) fn last_dependent_access(&self, action: Action) -> Option<&Access> {
match action {
// RefIncs are not dependent w/ RefDec, only inspections
Action::RefInc => self.last_ref_inspect.as_ref(),
Action::RefDec => self.last_ref_dec.as_ref(),
Action::Inspect => match self.last_ref_modification {
Some(RefModify::RefInc) => self.last_ref_inc.as_ref(),
Some(RefModify::RefDec) => self.last_ref_dec.as_ref(),
None => None,
},
}
}
pub(super) fn set_last_access(&mut self, action: Action, path_id: usize, version: &VersionVec) {
match action {
Action::RefInc => {
self.last_ref_modification = Some(RefModify::RefInc);
Access::set_or_create(&mut self.last_ref_inc, path_id, version)
}
Action::RefDec => {
self.last_ref_modification = Some(RefModify::RefDec);
Access::set_or_create(&mut self.last_ref_dec, path_id, version)
}
Action::Inspect => Access::set_or_create(&mut self.last_ref_inspect, path_id, version),
}
}
}

931
third-party/vendor/loom/src/rt/atomic.rs vendored Normal file
View file

@ -0,0 +1,931 @@
//! An atomic cell
//!
//! See the CDSChecker paper for detailed explanation.
//!
//! # Modification order implications (figure 7)
//!
//! - Read-Read Coherence:
//!
//! On `load`, all stores are iterated, finding stores that were read by
//! actions in the current thread's causality. These loads happen-before the
//! current load. The `modification_order` of these happen-before loads are
//! joined into the current load's `modification_order`.
//!
//! - Write-Read Coherence:
//!
//! On `load`, all stores are iterated, finding stores that happens-before the
//! current thread's causality. The `modification_order` of these stores are
//! joined into the current load's `modification_order`.
//!
//! - Read-Write Coherence:
//!
//! On `store`, find all existing stores that were read in the current
//! thread's causality. Join these stores' `modification_order` into the new
//! store's modification order.
//!
//! - Write-Write Coherence:
//!
//! The `modification_order` is initialized to the thread's causality. Any
//! store that happened in the thread causality will be earlier in the
//! modification order.
//!
//! - Seq-cst/MO Consistency:
//!
//! - Seq-cst Write-Read Coherence:
//!
//! - RMW/MO Consistency: Subsumed by Write-Write Coherence?
//!
//! - RMW Atomicity:
//!
//!
//! # Fence modification order implications (figure 9)
//!
//! - SC Fences Restrict RF:
//! - SC Fences Restrict RF (Collapsed Store):
//! - SC Fences Restrict RF (Collapsed Load):
//! - SC Fences Impose MO:
//! - SC Fences Impose MO (Collapsed 1st Store):
//! - SC Fences Impose MO (Collapsed 2st Store):
//!
//!
//! # Fence Synchronization implications (figure 10)
//!
//! - Fence Synchronization
//! - Fence Synchronization (Collapsed Store)
//! - Fence Synchronization (Collapsed Load)
use crate::rt::execution::Execution;
use crate::rt::location::{self, Location, LocationSet};
use crate::rt::object;
use crate::rt::{
self, thread, Access, Numeric, Synchronize, VersionVec, MAX_ATOMIC_HISTORY, MAX_THREADS,
};
use std::cmp;
use std::marker::PhantomData;
use std::sync::atomic::Ordering;
use std::u16;
use tracing::trace;
#[derive(Debug)]
pub(crate) struct Atomic<T> {
state: object::Ref<State>,
_p: PhantomData<fn() -> T>,
}
#[derive(Debug)]
pub(super) struct State {
/// Where the atomic was created
created_location: Location,
/// Transitive closure of all atomic loads from the cell.
loaded_at: VersionVec,
/// Location for the *last* time a thread atomically loaded from the cell.
loaded_locations: LocationSet,
/// Transitive closure of all **unsynchronized** loads from the cell.
unsync_loaded_at: VersionVec,
/// Location for the *last* time a thread read **synchronized** from the cell.
unsync_loaded_locations: LocationSet,
/// Transitive closure of all atomic stores to the cell.
stored_at: VersionVec,
/// Location for the *last* time a thread atomically stored to the cell.
stored_locations: LocationSet,
/// Version of the most recent **unsynchronized** mutable access to the
/// cell.
///
/// This includes the initialization of the cell as well as any calls to
/// `get_mut`.
unsync_mut_at: VersionVec,
/// Location for the *last* time a thread `with_mut` from the cell.
unsync_mut_locations: LocationSet,
/// `true` when in a `with_mut` closure. If this is set, there can be no
/// access to the cell.
is_mutating: bool,
/// Last time the atomic was accessed. This tracks the dependent access for
/// the DPOR algorithm.
last_access: Option<Access>,
/// Last time the atomic was accessed for a store or rmw operation.
last_non_load_access: Option<Access>,
/// Currently tracked stored values. This is the `MAX_ATOMIC_HISTORY` most
/// recent stores to the atomic cell in loom execution order.
stores: [Store; MAX_ATOMIC_HISTORY],
/// The total number of stores to the cell.
cnt: u16,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub(super) enum Action {
/// Atomic load
Load,
/// Atomic store
Store,
/// Atomic read-modify-write
Rmw,
}
#[derive(Debug)]
struct Store {
/// The stored value. All atomic types can be converted to `u64`.
value: u64,
/// The causality of the thread when it stores the value.
happens_before: VersionVec,
/// Tracks the modification order. Order is tracked as a partially-ordered
/// set.
modification_order: VersionVec,
/// Manages causality transfers between threads
sync: Synchronize,
/// Tracks when each thread first saw value
first_seen: FirstSeen,
/// True when the store was done with `SeqCst` ordering
seq_cst: bool,
}
#[derive(Debug)]
struct FirstSeen([u16; MAX_THREADS]);
/// Implements atomic fence behavior
pub(crate) fn fence(ordering: Ordering) {
rt::synchronize(|execution| match ordering {
Ordering::Acquire => fence_acq(execution),
Ordering::Release => fence_rel(execution),
Ordering::AcqRel => fence_acqrel(execution),
Ordering::SeqCst => fence_seqcst(execution),
Ordering::Relaxed => panic!("there is no such thing as a relaxed fence"),
order => unimplemented!("unimplemented ordering {:?}", order),
});
}
fn fence_acq(execution: &mut Execution) {
// Find all stores for all atomic objects and, if they have been read by
// the current thread, establish an acquire synchronization.
for state in execution.objects.iter_mut::<State>() {
// Iterate all the stores
for store in state.stores_mut() {
if !store.first_seen.is_seen_by_current(&execution.threads) {
continue;
}
store
.sync
.sync_load(&mut execution.threads, Ordering::Acquire);
}
}
}
fn fence_rel(execution: &mut Execution) {
// take snapshot of cur view and record as rel view
let active = execution.threads.active_mut();
active.released = active.causality;
}
fn fence_acqrel(execution: &mut Execution) {
fence_acq(execution);
fence_rel(execution);
}
fn fence_seqcst(execution: &mut Execution) {
fence_acqrel(execution);
execution.threads.seq_cst_fence();
}
impl<T: Numeric> Atomic<T> {
/// Create a new, atomic cell initialized with the provided value
pub(crate) fn new(value: T, location: Location) -> Atomic<T> {
rt::execution(|execution| {
let state = State::new(&mut execution.threads, value.into_u64(), location);
let state = execution.objects.insert(state);
trace!(?state, "Atomic::new");
Atomic {
state,
_p: PhantomData,
}
})
}
/// Loads a value from the atomic cell.
pub(crate) fn load(&self, location: Location, ordering: Ordering) -> T {
self.branch(Action::Load, location);
super::synchronize(|execution| {
let state = self.state.get_mut(&mut execution.objects);
// If necessary, generate the list of stores to permute through
if execution.path.is_traversed() {
let mut seed = [0; MAX_ATOMIC_HISTORY];
let n = state.match_load_to_stores(&execution.threads, &mut seed[..], ordering);
execution.path.push_load(&seed[..n]);
}
// Get the store to return from this load.
let index = execution.path.branch_load();
trace!(state = ?self.state, ?ordering, "Atomic::load");
T::from_u64(state.load(&mut execution.threads, index, location, ordering))
})
}
/// Loads a value from the atomic cell without performing synchronization
pub(crate) fn unsync_load(&self, location: Location) -> T {
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state
.unsync_loaded_locations
.track(location, &execution.threads);
// An unsync load counts as a "read" access
state.track_unsync_load(&execution.threads);
trace!(state = ?self.state, "Atomic::unsync_load");
// Return the value
let index = index(state.cnt - 1);
T::from_u64(state.stores[index].value)
})
}
/// Stores a value into the atomic cell.
pub(crate) fn store(&self, location: Location, val: T, ordering: Ordering) {
self.branch(Action::Store, location);
super::synchronize(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state.stored_locations.track(location, &execution.threads);
// An atomic store counts as a read access to the underlying memory
// cell.
state.track_store(&execution.threads);
trace!(state = ?self.state, ?ordering, "Atomic::store");
// Do the store
state.store(
&mut execution.threads,
Synchronize::new(),
val.into_u64(),
ordering,
);
})
}
pub(crate) fn rmw<F, E>(
&self,
location: Location,
success: Ordering,
failure: Ordering,
f: F,
) -> Result<T, E>
where
F: FnOnce(T) -> Result<T, E>,
{
self.branch(Action::Rmw, location);
super::synchronize(|execution| {
let state = self.state.get_mut(&mut execution.objects);
// If necessary, generate the list of stores to permute through
if execution.path.is_traversed() {
let mut seed = [0; MAX_ATOMIC_HISTORY];
let n = state.match_rmw_to_stores(&mut seed[..]);
execution.path.push_load(&seed[..n]);
}
// Get the store to use for the read portion of the rmw operation.
let index = execution.path.branch_load();
trace!(state = ?self.state, ?success, ?failure, "Atomic::rmw");
state
.rmw(
&mut execution.threads,
index,
location,
success,
failure,
|num| f(T::from_u64(num)).map(T::into_u64),
)
.map(T::from_u64)
})
}
/// Access a mutable reference to value most recently stored.
///
/// `with_mut` must happen-after all stores to the cell.
pub(crate) fn with_mut<R>(&mut self, location: Location, f: impl FnOnce(&mut T) -> R) -> R {
let value = super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state
.unsync_mut_locations
.track(location, &execution.threads);
// Verify the mutation may happen
state.track_unsync_mut(&execution.threads);
state.is_mutating = true;
trace!(state = ?self.state, "Atomic::with_mut");
// Return the value of the most recent store
let index = index(state.cnt - 1);
T::from_u64(state.stores[index].value)
});
struct Reset<T: Numeric>(T, object::Ref<State>);
impl<T: Numeric> Drop for Reset<T> {
fn drop(&mut self) {
super::execution(|execution| {
let state = self.1.get_mut(&mut execution.objects);
// Make sure the state is as expected
assert!(state.is_mutating);
state.is_mutating = false;
// The value may have been mutated, so it must be placed
// back.
let index = index(state.cnt - 1);
state.stores[index].value = T::into_u64(self.0);
if !std::thread::panicking() {
state.track_unsync_mut(&execution.threads);
}
});
}
}
// Unset on exit
let mut reset = Reset(value, self.state);
f(&mut reset.0)
}
fn branch(&self, action: Action, location: Location) {
let r = self.state;
r.branch_action(action, location);
assert!(
r.ref_eq(self.state),
"Internal state mutated during branch. This is \
usually due to a bug in the algorithm being tested writing in \
an invalid memory location."
);
}
}
// ===== impl State =====
impl State {
fn new(threads: &mut thread::Set, value: u64, location: Location) -> State {
let mut state = State {
created_location: location,
loaded_at: VersionVec::new(),
loaded_locations: LocationSet::new(),
unsync_loaded_at: VersionVec::new(),
unsync_loaded_locations: LocationSet::new(),
stored_at: VersionVec::new(),
stored_locations: LocationSet::new(),
unsync_mut_at: VersionVec::new(),
unsync_mut_locations: LocationSet::new(),
is_mutating: false,
last_access: None,
last_non_load_access: None,
stores: Default::default(),
cnt: 0,
};
// All subsequent accesses must happen-after.
state.track_unsync_mut(threads);
// Store the initial thread
//
// The actual order shouldn't matter as operation on the atomic
// **should** already include the thread causality resulting in the
// creation of this atomic cell.
//
// This is verified using `cell`.
state.store(threads, Synchronize::new(), value, Ordering::Release);
state
}
fn load(
&mut self,
threads: &mut thread::Set,
index: usize,
location: Location,
ordering: Ordering,
) -> u64 {
self.loaded_locations.track(location, threads);
// Validate memory safety
self.track_load(threads);
// Apply coherence rules
self.apply_load_coherence(threads, index);
let store = &mut self.stores[index];
store.first_seen.touch(threads);
store.sync.sync_load(threads, ordering);
store.value
}
fn store(
&mut self,
threads: &mut thread::Set,
mut sync: Synchronize,
value: u64,
ordering: Ordering,
) {
let index = index(self.cnt);
// Increment the count
self.cnt += 1;
// The modification order is initialized to the thread's current
// causality. All reads / writes that happen before this store are
// ordered before the store.
let happens_before = threads.active().causality;
// Starting with the thread's causality covers WRITE-WRITE coherence
let mut modification_order = happens_before;
// Apply coherence rules
for i in 0..self.stores.len() {
// READ-WRITE coherence
if self.stores[i].first_seen.is_seen_by_current(threads) {
let mo = self.stores[i].modification_order;
modification_order.join(&mo);
}
}
sync.sync_store(threads, ordering);
let mut first_seen = FirstSeen::new();
first_seen.touch(threads);
// Track the store
self.stores[index] = Store {
value,
happens_before,
modification_order,
sync,
first_seen,
seq_cst: is_seq_cst(ordering),
};
}
fn rmw<E>(
&mut self,
threads: &mut thread::Set,
index: usize,
location: Location,
success: Ordering,
failure: Ordering,
f: impl FnOnce(u64) -> Result<u64, E>,
) -> Result<u64, E> {
self.loaded_locations.track(location, threads);
// Track the load is happening in order to ensure correct
// synchronization to the underlying cell.
self.track_load(threads);
// Apply coherence rules.
self.apply_load_coherence(threads, index);
self.stores[index].first_seen.touch(threads);
let prev = self.stores[index].value;
match f(prev) {
Ok(next) => {
self.stored_locations.track(location, threads);
// Track a store operation happened
self.track_store(threads);
// Perform load synchronization using the `success` ordering.
self.stores[index].sync.sync_load(threads, success);
// Store the new value, initializing with the `sync` value from
// the load. This is our (hacky) way to establish a release
// sequence.
let sync = self.stores[index].sync;
self.store(threads, sync, next, success);
Ok(prev)
}
Err(e) => {
self.stores[index].sync.sync_load(threads, failure);
Err(e)
}
}
}
fn apply_load_coherence(&mut self, threads: &mut thread::Set, index: usize) {
for i in 0..self.stores.len() {
// Skip if the is current.
if index == i {
continue;
}
// READ-READ coherence
if self.stores[i].first_seen.is_seen_by_current(threads) {
let mo = self.stores[i].modification_order;
self.stores[index].modification_order.join(&mo);
}
// WRITE-READ coherence
if self.stores[i].happens_before < threads.active().causality {
let mo = self.stores[i].modification_order;
self.stores[index].modification_order.join(&mo);
}
}
}
/// Track an atomic load
fn track_load(&mut self, threads: &thread::Set) {
assert!(!self.is_mutating, "atomic cell is in `with_mut` call");
let current = &threads.active().causality;
if let Some(mut_at) = current.ahead(&self.unsync_mut_at) {
location::panic("Causality violation: Concurrent load and mut accesses.")
.location("created", self.created_location)
.thread("with_mut", mut_at, self.unsync_mut_locations[mut_at])
.thread("load", threads.active_id(), self.loaded_locations[threads])
.fire();
}
self.loaded_at.join(current);
}
/// Track an unsynchronized load
fn track_unsync_load(&mut self, threads: &thread::Set) {
assert!(!self.is_mutating, "atomic cell is in `with_mut` call");
let current = &threads.active().causality;
if let Some(mut_at) = current.ahead(&self.unsync_mut_at) {
location::panic("Causality violation: Concurrent `unsync_load` and mut accesses.")
.location("created", self.created_location)
.thread("with_mut", mut_at, self.unsync_mut_locations[mut_at])
.thread(
"unsync_load",
threads.active_id(),
self.unsync_loaded_locations[threads],
)
.fire();
}
if let Some(stored) = current.ahead(&self.stored_at) {
location::panic("Causality violation: Concurrent `unsync_load` and atomic store.")
.location("created", self.created_location)
.thread("atomic store", stored, self.stored_locations[stored])
.thread(
"unsync_load",
threads.active_id(),
self.unsync_loaded_locations[threads],
)
.fire();
}
self.unsync_loaded_at.join(current);
}
/// Track an atomic store
fn track_store(&mut self, threads: &thread::Set) {
assert!(!self.is_mutating, "atomic cell is in `with_mut` call");
let current = &threads.active().causality;
if let Some(mut_at) = current.ahead(&self.unsync_mut_at) {
location::panic("Causality violation: Concurrent atomic store and mut accesses.")
.location("created", self.created_location)
.thread("with_mut", mut_at, self.unsync_mut_locations[mut_at])
.thread(
"atomic store",
threads.active_id(),
self.stored_locations[threads],
)
.fire();
}
if let Some(loaded) = current.ahead(&self.unsync_loaded_at) {
location::panic(
"Causality violation: Concurrent atomic store and `unsync_load` accesses.",
)
.location("created", self.created_location)
.thread("unsync_load", loaded, self.unsync_loaded_locations[loaded])
.thread(
"atomic store",
threads.active_id(),
self.stored_locations[threads],
)
.fire();
}
self.stored_at.join(current);
}
/// Track an unsynchronized mutation
fn track_unsync_mut(&mut self, threads: &thread::Set) {
assert!(!self.is_mutating, "atomic cell is in `with_mut` call");
let current = &threads.active().causality;
if let Some(loaded) = current.ahead(&self.loaded_at) {
location::panic("Causality violation: Concurrent atomic load and unsync mut accesses.")
.location("created", self.created_location)
.thread("atomic load", loaded, self.loaded_locations[loaded])
.thread(
"with_mut",
threads.active_id(),
self.unsync_mut_locations[threads],
)
.fire();
}
if let Some(loaded) = current.ahead(&self.unsync_loaded_at) {
location::panic(
"Causality violation: Concurrent `unsync_load` and unsync mut accesses.",
)
.location("created", self.created_location)
.thread("unsync_load", loaded, self.unsync_loaded_locations[loaded])
.thread(
"with_mut",
threads.active_id(),
self.unsync_mut_locations[threads],
)
.fire();
}
if let Some(stored) = current.ahead(&self.stored_at) {
location::panic(
"Causality violation: Concurrent atomic store and unsync mut accesses.",
)
.location("created", self.created_location)
.thread("atomic store", stored, self.stored_locations[stored])
.thread(
"with_mut",
threads.active_id(),
self.unsync_mut_locations[threads],
)
.fire();
}
if let Some(mut_at) = current.ahead(&self.unsync_mut_at) {
location::panic("Causality violation: Concurrent unsync mut accesses.")
.location("created", self.created_location)
.thread("with_mut one", mut_at, self.unsync_mut_locations[mut_at])
.thread(
"with_mut two",
threads.active_id(),
self.unsync_mut_locations[threads],
)
.fire();
}
self.unsync_mut_at.join(current);
}
/// Find all stores that could be returned by an atomic load.
fn match_load_to_stores(
&self,
threads: &thread::Set,
dst: &mut [u8],
ordering: Ordering,
) -> usize {
let mut n = 0;
let cnt = self.cnt as usize;
// We only need to consider loads as old as the **most** recent load
// seen by each thread in the current causality.
//
// This probably isn't the smartest way to implement this, but someone
// else can figure out how to improve on it if it turns out to be a
// bottleneck.
//
// Add all stores **unless** a newer store has already been seen by the
// current thread's causality.
'outer: for i in 0..self.stores.len() {
let store_i = &self.stores[i];
if i >= cnt {
// Not a real store
continue;
}
for j in 0..self.stores.len() {
let store_j = &self.stores[j];
if i == j || j >= cnt {
continue;
}
let mo_i = store_i.modification_order;
let mo_j = store_j.modification_order;
// TODO: this sometimes fails
assert_ne!(mo_i, mo_j);
if mo_i < mo_j {
if store_j.first_seen.is_seen_by_current(threads) {
// Store `j` is newer, so don't store the current one.
continue 'outer;
}
if store_i.first_seen.is_seen_before_yield(threads) {
// Saw this load before the previous yield. In order to
// advance the model, don't return it again.
continue 'outer;
}
if is_seq_cst(ordering) && store_i.seq_cst && store_j.seq_cst {
// There is a newer SeqCst store
continue 'outer;
}
}
}
// The load may return this store
dst[n] = i as u8;
n += 1;
}
n
}
fn match_rmw_to_stores(&self, dst: &mut [u8]) -> usize {
let mut n = 0;
let cnt = self.cnt as usize;
// Unlike `match_load_to_stores`, rmw operations only load "newest"
// stores, in terms of modification order.
'outer: for i in 0..self.stores.len() {
let store_i = &self.stores[i];
if i >= cnt {
// Not a real store
continue;
}
for j in 0..self.stores.len() {
let store_j = &self.stores[j];
if i == j || j >= cnt {
continue;
}
let mo_i = store_i.modification_order;
let mo_j = store_j.modification_order;
assert_ne!(mo_i, mo_j);
if mo_i < mo_j {
// There is a newer store.
continue 'outer;
}
}
// The load may return this store
dst[n] = i as u8;
n += 1;
}
n
}
fn stores_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Store> {
let (start, end) = range(self.cnt);
let (two, one) = self.stores[..end].split_at_mut(start);
one.iter_mut().chain(two.iter_mut())
}
/// Returns the last dependent access
pub(super) fn last_dependent_access(&self, action: Action) -> Option<&Access> {
match action {
Action::Load => self.last_non_load_access.as_ref(),
_ => self.last_access.as_ref(),
}
}
/// Sets the last dependent access
pub(super) fn set_last_access(&mut self, action: Action, path_id: usize, version: &VersionVec) {
// Always set `last_access`
Access::set_or_create(&mut self.last_access, path_id, version);
match action {
Action::Load => {}
_ => {
// Stores / RMWs
Access::set_or_create(&mut self.last_non_load_access, path_id, version);
}
}
}
}
// ===== impl Store =====
impl Default for Store {
fn default() -> Store {
Store {
value: 0,
happens_before: VersionVec::new(),
modification_order: VersionVec::new(),
sync: Synchronize::new(),
first_seen: FirstSeen::new(),
seq_cst: false,
}
}
}
// ===== impl FirstSeen =====
impl FirstSeen {
fn new() -> FirstSeen {
FirstSeen([u16::max_value(); MAX_THREADS])
}
fn touch(&mut self, threads: &thread::Set) {
if self.0[threads.active_id().as_usize()] == u16::max_value() {
self.0[threads.active_id().as_usize()] = threads.active_atomic_version();
}
}
fn is_seen_by_current(&self, threads: &thread::Set) -> bool {
for (thread_id, version) in threads.active().causality.versions(threads.execution_id()) {
match self.0[thread_id.as_usize()] {
u16::MAX => {}
v if v <= version => return true,
_ => {}
}
}
false
}
fn is_seen_before_yield(&self, threads: &thread::Set) -> bool {
let thread_id = threads.active_id();
let last_yield = match threads.active().last_yield {
Some(v) => v,
None => return false,
};
match self.0[thread_id.as_usize()] {
u16::MAX => false,
v => v <= last_yield,
}
}
}
fn is_seq_cst(order: Ordering) -> bool {
order == Ordering::SeqCst
}
fn range(cnt: u16) -> (usize, usize) {
let start = index(cnt.saturating_sub(MAX_ATOMIC_HISTORY as u16));
let mut end = index(cmp::min(cnt, MAX_ATOMIC_HISTORY as u16));
if end == 0 {
end = MAX_ATOMIC_HISTORY;
}
assert!(
start <= end,
"[loom internal bug] cnt = {}; start = {}; end = {}",
cnt,
start,
end
);
(start, end)
}
fn index(cnt: u16) -> usize {
cnt as usize % MAX_ATOMIC_HISTORY as usize
}

188
third-party/vendor/loom/src/rt/cell.rs vendored Normal file
View file

@ -0,0 +1,188 @@
use crate::rt::location::{self, Location, LocationSet};
use crate::rt::{self, object, thread, VersionVec};
/// Tracks immutable and mutable access to a single memory cell.
#[derive(Debug)]
pub(crate) struct Cell {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// Where the cell was created
created_location: Location,
/// Number of threads currently reading the cell
is_reading: usize,
/// `true` if in a `with_mut` closure.
is_writing: bool,
/// The transitive closure of all immutable accesses of `data`.
read_access: VersionVec,
/// Location for the *last* time a thread read from the cell.
read_locations: LocationSet,
/// The last mutable access of `data`.
write_access: VersionVec,
/// Location for the *last* time a thread wrote to the cell
write_locations: LocationSet,
}
#[derive(Debug)]
pub(crate) struct Reading {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(crate) struct Writing {
state: object::Ref<State>,
}
impl Cell {
pub(crate) fn new(location: Location) -> Cell {
rt::execution(|execution| {
let state = State::new(&execution.threads, location);
Cell {
state: execution.objects.insert(state),
}
})
}
pub(crate) fn start_read(&self, location: Location) -> Reading {
// Enter the read closure
rt::synchronize(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(!state.is_writing, "currently writing to cell");
state.is_reading += 1;
state.read_locations.track(location, &execution.threads);
state.track_read(&execution.threads);
Reading { state: self.state }
})
}
pub(crate) fn start_write(&self, location: Location) -> Writing {
// Enter the write closure
rt::synchronize(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.is_reading == 0, "currently reading from cell");
assert!(!state.is_writing, "currently writing to cell");
state.is_writing = true;
state.write_locations.track(location, &execution.threads);
state.track_write(&execution.threads);
Writing { state: self.state }
})
}
}
impl State {
fn new(threads: &thread::Set, location: Location) -> State {
let version = threads.active().causality;
State {
created_location: location,
is_reading: 0,
is_writing: false,
read_access: version,
read_locations: LocationSet::new(),
write_access: version,
write_locations: LocationSet::new(),
}
}
/// Perform a read access
fn track_read(&mut self, threads: &thread::Set) {
let current = &threads.active().causality;
// Check that there is no concurrent mutable access, i.e., the last
// mutable access must happen-before this immutable access.
if let Some(writer) = current.ahead(&self.write_access) {
location::panic("Causality violation: Concurrent read and write accesses.")
.location("created", self.created_location)
.thread("read", threads.active_id(), self.read_locations[threads])
.thread("write", writer, self.write_locations[writer])
.fire();
}
self.read_access.join(current);
}
fn track_write(&mut self, threads: &thread::Set) {
let current = &threads.active().causality;
// Check that there is no concurrent mutable access, i.e., the last
// mutable access must happen-before this mutable access.
if let Some(other) = current.ahead(&self.write_access) {
location::panic("Causality violation: Concurrent write accesses to `UnsafeCell`.")
.location("created", self.created_location)
.thread("write one", other, self.write_locations[other])
.thread(
"write two",
threads.active_id(),
self.write_locations[threads],
)
.fire();
}
// Check that there are no concurrent immutable accesses, i.e., every
// immutable access must happen-before this mutable access.
if let Some(reader) = current.ahead(&self.read_access) {
location::panic(
"Causality violation: Concurrent read and write accesses to `UnsafeCell`.",
)
.location("created", self.created_location)
.thread("read", reader, self.read_locations[reader])
.thread("write", threads.active_id(), self.write_locations[threads])
.fire();
}
self.write_access.join(current);
}
}
// === impl Reading ===
impl Drop for Reading {
fn drop(&mut self) {
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.is_reading > 0);
assert!(!state.is_writing);
state.is_reading -= 1;
if !std::thread::panicking() {
state.track_read(&execution.threads);
}
})
}
}
// === impl Writing ===
impl Drop for Writing {
fn drop(&mut self) {
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
assert!(state.is_writing);
assert!(state.is_reading == 0);
state.is_writing = false;
if !std::thread::panicking() {
state.track_write(&execution.threads);
}
})
}
}

View file

@ -0,0 +1,104 @@
use crate::rt::object;
use crate::rt::{self, thread, Access, Mutex, VersionVec};
use std::collections::VecDeque;
use tracing::trace;
use super::Location;
#[derive(Debug, Copy, Clone)]
pub(crate) struct Condvar {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// Tracks access to the mutex
last_access: Option<Access>,
/// Threads waiting on the condvar
waiters: VecDeque<thread::Id>,
}
impl Condvar {
/// Create a new condition variable object
pub(crate) fn new() -> Condvar {
super::execution(|execution| {
let state = execution.objects.insert(State {
last_access: None,
waiters: VecDeque::new(),
});
trace!(?state, "Condvar::new");
Condvar { state }
})
}
/// Blocks the current thread until this condition variable receives a notification.
pub(crate) fn wait(&self, mutex: &Mutex, location: Location) {
self.state.branch_opaque(location);
rt::execution(|execution| {
trace!(state = ?self.state, ?mutex, "Condvar::wait");
let state = self.state.get_mut(&mut execution.objects);
// Track the current thread as a waiter
state.waiters.push_back(execution.threads.active_id());
});
// Release the lock
mutex.release_lock();
// Disable the current thread
rt::park(location);
// Acquire the lock again
mutex.acquire_lock(location);
}
/// Wakes up one blocked thread on this condvar.
pub(crate) fn notify_one(&self, location: Location) {
self.state.branch_opaque(location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
// Notify the first waiter
let thread = state.waiters.pop_front();
trace!(state = ?self.state, ?thread, "Condvar::notify_one");
if let Some(thread) = thread {
execution.threads.unpark(thread);
}
})
}
/// Wakes up all blocked threads on this condvar.
pub(crate) fn notify_all(&self, location: Location) {
self.state.branch_opaque(location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
trace!(state = ?self.state, threads = ?state.waiters, "Condvar::notify_all");
for thread in state.waiters.drain(..) {
execution.threads.unpark(thread);
}
})
}
}
impl State {
pub(super) fn last_dependent_access(&self) -> Option<&Access> {
self.last_access.as_ref()
}
pub(crate) fn set_last_access(&mut self, path_id: usize, version: &VersionVec) {
Access::set_or_create(&mut self.last_access, path_id, version);
}
}

View file

@ -0,0 +1,285 @@
use crate::rt::alloc::Allocation;
use crate::rt::{lazy_static, object, thread, Path};
use std::collections::HashMap;
use std::convert::TryInto;
use std::fmt;
use tracing::info;
pub(crate) struct Execution {
/// Uniquely identifies an execution
pub(super) id: Id,
/// Execution path taken
pub(crate) path: Path,
pub(crate) threads: thread::Set,
pub(crate) lazy_statics: lazy_static::Set,
/// All loom aware objects part of this execution run.
pub(super) objects: object::Store,
/// Maps raw allocations to LeakTrack objects
pub(super) raw_allocations: HashMap<usize, Allocation>,
pub(crate) arc_objs: HashMap<*const (), std::sync::Arc<super::Arc>>,
/// Maximum number of concurrent threads
pub(super) max_threads: usize,
pub(super) max_history: usize,
/// Capture locations for significant events
pub(crate) location: bool,
/// Log execution output to STDOUT
pub(crate) log: bool,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]
pub(crate) struct Id(usize);
impl Execution {
/// Create a new execution.
///
/// This is only called at the start of a fuzz run. The same instance is
/// reused across permutations.
pub(crate) fn new(
max_threads: usize,
max_branches: usize,
preemption_bound: Option<usize>,
) -> Execution {
let id = Id::new();
let threads = thread::Set::new(id, max_threads);
let preemption_bound =
preemption_bound.map(|bound| bound.try_into().expect("preemption_bound too big"));
Execution {
id,
path: Path::new(max_branches, preemption_bound),
threads,
lazy_statics: lazy_static::Set::new(),
objects: object::Store::with_capacity(max_branches),
raw_allocations: HashMap::new(),
arc_objs: HashMap::new(),
max_threads,
max_history: 7,
location: false,
log: false,
}
}
/// Create state to track a new thread
pub(crate) fn new_thread(&mut self) -> thread::Id {
let thread_id = self.threads.new_thread();
let active_id = self.threads.active_id();
let (active, new) = self.threads.active2_mut(thread_id);
new.causality.join(&active.causality);
new.dpor_vv.join(&active.dpor_vv);
// Bump causality in order to ensure CausalCell accurately detects
// incorrect access when first action.
new.causality[thread_id] += 1;
active.causality[active_id] += 1;
thread_id
}
/// Resets the execution state for the next execution run
pub(crate) fn step(self) -> Option<Self> {
let id = Id::new();
let max_threads = self.max_threads;
let max_history = self.max_history;
let location = self.location;
let log = self.log;
let mut path = self.path;
let mut objects = self.objects;
let mut lazy_statics = self.lazy_statics;
let mut raw_allocations = self.raw_allocations;
let mut arc_objs = self.arc_objs;
let mut threads = self.threads;
if !path.step() {
return None;
}
objects.clear();
lazy_statics.reset();
raw_allocations.clear();
arc_objs.clear();
threads.clear(id);
Some(Execution {
id,
path,
threads,
objects,
lazy_statics,
raw_allocations,
arc_objs,
max_threads,
max_history,
location,
log,
})
}
/// Returns `true` if a switch is required
pub(crate) fn schedule(&mut self) -> bool {
use crate::rt::path::Thread;
// Implementation of the DPOR algorithm.
let curr_thread = self.threads.active_id();
for (th_id, th) in self.threads.iter() {
let operation = match th.operation {
Some(operation) => operation,
None => continue,
};
if let Some(access) = self.objects.last_dependent_access(operation) {
if access.happens_before(&th.dpor_vv) {
// The previous access happened before this access, thus
// there is no race.
continue;
}
// Get the point to backtrack to
let point = access.path_id();
// Track backtracking point
self.path.backtrack(point, th_id);
}
}
// It's important to avoid pre-emption as much as possible
let mut initial = Some(self.threads.active_id());
// If the thread is not runnable, then we can pick any arbitrary other
// runnable thread.
if !self.threads.active().is_runnable() {
initial = None;
for (i, th) in self.threads.iter() {
if !th.is_runnable() {
continue;
}
if let Some(ref mut init) = initial {
if th.yield_count < self.threads[*init].yield_count {
*init = i;
}
} else {
initial = Some(i)
}
}
}
let path_id = self.path.pos();
let next = self.path.branch_thread(self.id, {
self.threads.iter().map(|(i, th)| {
if initial.is_none() && th.is_runnable() {
initial = Some(i);
}
if initial == Some(i) {
Thread::Active
} else if th.is_yield() {
Thread::Yield
} else if !th.is_runnable() {
Thread::Disabled
} else {
Thread::Skip
}
})
});
let switched = Some(self.threads.active_id()) != next;
self.threads.set_active(next);
// There is no active thread. Unless all threads have terminated, the
// test has deadlocked.
if !self.threads.is_active() {
let terminal = self.threads.iter().all(|(_, th)| th.is_terminated());
assert!(
terminal,
"deadlock; threads = {:?}",
self.threads
.iter()
.map(|(i, th)| { (i, th.state) })
.collect::<Vec<_>>()
);
return true;
}
// TODO: refactor
if let Some(operation) = self.threads.active().operation {
let threads = &mut self.threads;
let th_id = threads.active_id();
if let Some(access) = self.objects.last_dependent_access(operation) {
threads.active_mut().dpor_vv.join(access.version());
}
threads.active_mut().dpor_vv[th_id] += 1;
self.objects
.set_last_access(operation, path_id, &threads.active().dpor_vv);
}
// Reactivate yielded threads, but only if the current active thread is
// not yielded.
for (id, th) in self.threads.iter_mut() {
if th.is_yield() && Some(id) != next {
th.set_runnable();
}
}
if switched {
info!("~~~~~~~~ THREAD {} ~~~~~~~~", self.threads.active_id());
}
curr_thread != self.threads.active_id()
}
/// Panics if any leaks were detected
pub(crate) fn check_for_leaks(&self) {
self.objects.check_for_leaks();
}
}
impl fmt::Debug for Execution {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Execution")
.field("path", &self.path)
.field("threads", &self.threads)
.finish()
}
}
impl Id {
pub(crate) fn new() -> Id {
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
// The number picked here is arbitrary. It is mostly to avoid collision
// with "zero" to aid with debugging.
static NEXT_ID: AtomicUsize = AtomicUsize::new(46_413_762);
let next = NEXT_ID.fetch_add(1, Relaxed);
Id(next)
}
}

View file

@ -0,0 +1,32 @@
use _futures::executor;
use rt::{self, ThreadHandle};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::Relaxed;
pub struct Notify {
thread: ThreadHandle,
flag: AtomicBool,
}
impl Notify {
pub fn new() -> Notify {
Notify {
thread: ThreadHandle::current(),
flag: AtomicBool::new(false),
}
}
pub fn consume_notify(&self) -> bool {
self.flag.swap(false, Relaxed)
}
}
impl executor::Notify for Notify {
fn notify(&self, _id: usize) {
rt::branch();
self.flag.store(true, Relaxed);
self.thread.unpark();
}
}

View file

@ -0,0 +1,87 @@
use crate::rt::synchronize::Synchronize;
use std::{any::Any, collections::HashMap};
pub(crate) struct Set {
/// Registered statics.
statics: Option<HashMap<StaticKeyId, StaticValue>>,
}
#[derive(Eq, PartialEq, Hash, Copy, Clone)]
pub(crate) struct StaticKeyId(usize);
pub(crate) struct StaticValue {
pub(crate) sync: Synchronize,
v: Box<dyn Any>,
}
impl Set {
/// Create an empty statics set.
pub(crate) fn new() -> Set {
Set {
statics: Some(HashMap::new()),
}
}
pub(crate) fn reset(&mut self) {
assert!(
self.statics.is_none(),
"lazy_static was not dropped during execution"
);
self.statics = Some(HashMap::new());
}
pub(crate) fn drop(&mut self) -> HashMap<StaticKeyId, StaticValue> {
self.statics
.take()
.expect("lazy_statics were dropped twice in one execution")
}
pub(crate) fn get_static<T: 'static>(
&mut self,
key: &'static crate::lazy_static::Lazy<T>,
) -> Option<&mut StaticValue> {
self.statics
.as_mut()
.expect("attempted to access lazy_static during shutdown")
.get_mut(&StaticKeyId::new(key))
}
pub(crate) fn init_static<T: 'static>(
&mut self,
key: &'static crate::lazy_static::Lazy<T>,
value: StaticValue,
) -> &mut StaticValue {
let v = self
.statics
.as_mut()
.expect("attempted to access lazy_static during shutdown")
.entry(StaticKeyId::new(key));
if let std::collections::hash_map::Entry::Occupied(_) = v {
unreachable!("told to init static, but it was already init'd");
}
v.or_insert(value)
}
}
impl StaticKeyId {
fn new<T>(key: &'static crate::lazy_static::Lazy<T>) -> Self {
Self(key as *const _ as usize)
}
}
impl StaticValue {
pub(crate) fn new<T: 'static>(value: T) -> Self {
Self {
sync: Synchronize::new(),
v: Box::new(value),
}
}
pub(crate) fn get<T: 'static>(&self) -> &T {
self.v
.downcast_ref::<T>()
.expect("lazy value must downcast to expected type")
}
}

View file

@ -0,0 +1,150 @@
pub(crate) use cfg::Location;
macro_rules! location {
() => {{
let enabled = crate::rt::execution(|execution| execution.location);
if enabled {
let location = crate::rt::Location::from(std::panic::Location::caller());
location
} else {
crate::rt::Location::disabled()
}
}};
}
use crate::rt::{thread, MAX_THREADS};
use std::ops;
#[derive(Debug)]
pub(super) struct LocationSet {
locations: [Location; MAX_THREADS],
}
pub(super) struct PanicBuilder {
msg: String,
locations: Vec<(String, Option<usize>, Location)>,
}
// ===== impl LocationSet ======
impl LocationSet {
pub(super) fn new() -> LocationSet {
LocationSet {
locations: Default::default(),
}
}
pub(super) fn track(&mut self, location: Location, threads: &thread::Set) {
let active_id = threads.active_id();
self.locations[active_id.as_usize()] = location;
}
}
impl ops::Index<usize> for LocationSet {
type Output = Location;
fn index(&self, index: usize) -> &Location {
self.locations.index(index)
}
}
impl ops::Index<&thread::Set> for LocationSet {
type Output = Location;
fn index(&self, threads: &thread::Set) -> &Location {
let active_id = threads.active_id();
self.locations.index(active_id.as_usize())
}
}
// ===== impl PanicBuilder =====
pub(super) fn panic(msg: impl ToString) -> PanicBuilder {
PanicBuilder {
msg: msg.to_string(),
locations: Vec::new(),
}
}
impl PanicBuilder {
pub(super) fn location(&mut self, key: &str, location: Location) -> &mut Self {
self.locations.push((key.to_string(), None, location));
self
}
pub(super) fn thread(
&mut self,
key: &str,
thread: impl Into<usize>,
location: Location,
) -> &mut Self {
self.locations
.push((key.to_string(), Some(thread.into()), location));
self
}
pub(super) fn fire(&self) {
let mut msg = self.msg.clone();
let width = self
.locations
.iter()
.filter(|(_, _, location)| location.is_captured())
.map(|(key, ..)| key.len())
.max();
if let Some(width) = width {
msg = format!("\n{}", msg);
for (key, thread, location) in &self.locations {
if !location.is_captured() {
continue;
}
let spaces: String = (0..width - key.len()).map(|_| " ").collect();
let th = thread
.map(|th| format!("thread #{} @ ", th))
.unwrap_or_else(String::new);
msg.push_str(&format!("\n {}{}: {}{}", spaces, key, th, location));
}
}
panic!("{}\n", msg);
}
}
// ===== impl Location cfg =====
mod cfg {
use std::fmt;
#[derive(Debug, Default, Clone, Copy)]
pub(crate) struct Location(Option<&'static std::panic::Location<'static>>);
impl Location {
pub(crate) fn from(location: &'static std::panic::Location<'static>) -> Location {
Location(Some(location))
}
pub(crate) fn disabled() -> Location {
Location(None)
}
pub(crate) fn is_captured(&self) -> bool {
self.0.is_some()
}
}
impl fmt::Display for Location {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(location) = &self.0 {
location.fmt(fmt)
} else {
write!(fmt, "")
}
}
}
}

197
third-party/vendor/loom/src/rt/mod.rs vendored Normal file
View file

@ -0,0 +1,197 @@
#[macro_use]
mod location;
pub(crate) use self::location::Location;
mod access;
use self::access::Access;
mod alloc;
pub(crate) use self::alloc::{alloc, dealloc, Allocation};
mod arc;
pub(crate) use self::arc::Arc;
mod atomic;
pub(crate) use self::atomic::{fence, Atomic};
pub(crate) mod cell;
pub(crate) use self::cell::Cell;
mod condvar;
pub(crate) use self::condvar::Condvar;
mod execution;
pub(crate) use self::execution::Execution;
mod notify;
pub(crate) use self::notify::Notify;
mod num;
pub(crate) use self::num::Numeric;
#[macro_use]
pub(crate) mod object;
mod mpsc;
pub(crate) use self::mpsc::Channel;
mod mutex;
pub(crate) use self::mutex::Mutex;
mod path;
pub(crate) use self::path::Path;
mod rwlock;
pub(crate) use self::rwlock::RwLock;
mod scheduler;
pub(crate) use self::scheduler::Scheduler;
mod synchronize;
pub(crate) use self::synchronize::Synchronize;
pub(crate) mod lazy_static;
pub(crate) mod thread;
mod vv;
pub(crate) use self::vv::VersionVec;
use tracing::trace;
/// Maximum number of threads that can be included in a model.
pub const MAX_THREADS: usize = 4;
/// Maximum number of atomic store history to track per-cell.
pub(crate) const MAX_ATOMIC_HISTORY: usize = 7;
pub(crate) fn spawn<F>(f: F) -> crate::rt::thread::Id
where
F: FnOnce() + 'static,
{
let id = execution(|execution| execution.new_thread());
trace!(thread = ?id, "spawn");
Scheduler::spawn(Box::new(move || {
f();
thread_done();
}));
id
}
/// Marks the current thread as blocked
pub(crate) fn park(location: Location) {
let switch = execution(|execution| {
use thread::State;
let thread = execution.threads.active_id();
let active = execution.threads.active_mut();
trace!(?thread, ?active.state, "park");
match active.state {
// The thread was previously unparked while it was active. Instead
// of parking, consume the unpark.
State::Runnable { unparked: true } => {
active.set_runnable();
return false;
}
// The thread doesn't have a saved unpark; set its state to blocked.
_ => active.set_blocked(location),
};
execution.threads.active_mut().set_blocked(location);
execution.threads.active_mut().operation = None;
execution.schedule()
});
if switch {
Scheduler::switch();
}
}
/// Add an execution branch point.
fn branch<F, R>(f: F) -> R
where
F: FnOnce(&mut Execution) -> R,
{
let (ret, switch) = execution(|execution| {
let ret = f(execution);
let switch = execution.schedule();
trace!(?switch, "branch");
(ret, switch)
});
if switch {
Scheduler::switch();
}
ret
}
fn synchronize<F, R>(f: F) -> R
where
F: FnOnce(&mut Execution) -> R,
{
execution(|execution| {
execution.threads.active_causality_inc();
trace!("synchronize");
f(execution)
})
}
/// Yield the thread.
///
/// This enables concurrent algorithms that require other threads to make
/// progress.
pub fn yield_now() {
let switch = execution(|execution| {
let thread = execution.threads.active_id();
execution.threads.active_mut().set_yield();
execution.threads.active_mut().operation = None;
let switch = execution.schedule();
trace!(?thread, ?switch, "yield_now");
switch
});
if switch {
Scheduler::switch();
}
}
pub(crate) fn execution<F, R>(f: F) -> R
where
F: FnOnce(&mut Execution) -> R,
{
Scheduler::with_execution(f)
}
pub fn thread_done() {
let locals = execution(|execution| {
let thread = execution.threads.active_id();
trace!(?thread, "thread_done: drop locals");
execution.threads.active_mut().drop_locals()
});
// Drop outside of the execution context
drop(locals);
execution(|execution| {
let thread = execution.threads.active_id();
execution.threads.active_mut().operation = None;
execution.threads.active_mut().set_terminated();
let switch = execution.schedule();
trace!(?thread, ?switch, "thread_done: terminate");
switch
});
}

178
third-party/vendor/loom/src/rt/mpsc.rs vendored Normal file
View file

@ -0,0 +1,178 @@
use crate::rt::{object, Access, Location, Synchronize, VersionVec};
use std::collections::VecDeque;
use std::sync::atomic::Ordering::{Acquire, Release};
#[derive(Debug)]
pub(crate) struct Channel {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// Count of messages in the channel.
msg_cnt: usize,
/// Last access that was a send operation.
last_send_access: Option<Access>,
/// Last access that was a receive operation.
last_recv_access: Option<Access>,
/// A synchronization point for synchronizing the sending threads and the
/// channel.
///
/// The `mpsc` channels have a guarantee that the messages will be received
/// in the same order in which they were sent. Therefore, if thread `t1`
/// managed to send `m1` before `t2` sent `m2`, the thread that received
/// `m2` can be sure that `m1` was already sent and received. In other
/// words, it is sound for the receiver of `m2` to know that `m1` happened
/// before `m2`. That is why we have a single `sender_synchronize` for
/// senders which we use to "timestamp" each message put in the channel.
/// However, in our example, the receiver of `m1` does not know whether `m2`
/// was already sent or not and, therefore, by reading from the channel it
/// should not learn any facts about `happens_before(send(m2), recv(m1))`.
/// That is why we cannot use single `Synchronize` for the entire channel
/// and on the receiver side we need to use `Synchronize` per message.
sender_synchronize: Synchronize,
/// A synchronization point per message synchronizing the receiving thread
/// with the channel state at the point when the received message was sent.
receiver_synchronize: VecDeque<Synchronize>,
created: Location,
}
/// Actions performed on the Channel.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(super) enum Action {
/// Send a message
MsgSend,
/// Receive a message
MsgRecv,
}
impl Channel {
pub(crate) fn new(location: Location) -> Self {
super::execution(|execution| {
let state = execution.objects.insert(State {
msg_cnt: 0,
last_send_access: None,
last_recv_access: None,
sender_synchronize: Synchronize::new(),
receiver_synchronize: VecDeque::new(),
created: location,
});
tracing::trace!(?state, %location, "mpsc::channel");
Self { state }
})
}
pub(crate) fn send(&self, location: Location) {
self.state.branch_action(Action::MsgSend, location);
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state.msg_cnt = state.msg_cnt.checked_add(1).expect("overflow");
state
.sender_synchronize
.sync_store(&mut execution.threads, Release);
state
.receiver_synchronize
.push_back(state.sender_synchronize);
if state.msg_cnt == 1 {
// Unblock all threads that are blocked waiting on this channel
let thread_id = execution.threads.active_id();
for (id, thread) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
let obj = thread
.operation
.as_ref()
.map(|operation| operation.object());
if obj == Some(self.state.erase()) {
thread.set_runnable();
}
}
}
})
}
pub(crate) fn recv(&self, location: Location) {
self.state
.branch_disable(Action::MsgRecv, self.is_empty(), location);
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
let thread_id = execution.threads.active_id();
state.msg_cnt = state
.msg_cnt
.checked_sub(1)
.expect("expected to be able to read the message");
let mut synchronize = state.receiver_synchronize.pop_front().unwrap();
dbg!(synchronize.sync_load(&mut execution.threads, Acquire));
if state.msg_cnt == 0 {
// Block all **other** threads attempting to read from the channel
for (id, thread) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
if let Some(operation) = thread.operation.as_ref() {
if operation.object() == self.state.erase()
&& operation.action() == object::Action::Channel(Action::MsgRecv)
{
let location = operation.location();
thread.set_blocked(location);
}
}
}
}
})
}
/// Returns `true` if the channel is currently empty
pub(crate) fn is_empty(&self) -> bool {
super::execution(|execution| self.get_state(&mut execution.objects).msg_cnt == 0)
}
fn get_state<'a>(&self, objects: &'a mut object::Store) -> &'a mut State {
self.state.get_mut(objects)
}
}
impl State {
pub(super) fn check_for_leaks(&self, index: usize) {
if self.msg_cnt != 0 {
if self.created.is_captured() {
panic!(
"Messages leaked.\n \
Channel created: {}\n \
Index: {}\n \
Messages: {}",
self.created, index, self.msg_cnt
);
} else {
panic!(
"Messages leaked.\n Index: {}\n Messages: {}",
index, self.msg_cnt
);
}
}
}
pub(super) fn last_dependent_access(&self, action: Action) -> Option<&Access> {
match action {
Action::MsgSend => self.last_send_access.as_ref(),
Action::MsgRecv => self.last_recv_access.as_ref(),
}
}
pub(super) fn set_last_access(&mut self, action: Action, path_id: usize, version: &VersionVec) {
match action {
Action::MsgSend => Access::set_or_create(&mut self.last_send_access, path_id, version),
Action::MsgRecv => Access::set_or_create(&mut self.last_recv_access, path_id, version),
}
}
}

155
third-party/vendor/loom/src/rt/mutex.rs vendored Normal file
View file

@ -0,0 +1,155 @@
use crate::rt::object;
use crate::rt::{thread, Access, Location, Synchronize, VersionVec};
use std::sync::atomic::Ordering::{Acquire, Release};
use tracing::trace;
#[derive(Debug, Copy, Clone)]
pub(crate) struct Mutex {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// If the mutex should establish sequential consistency.
seq_cst: bool,
/// `Some` when the mutex is in the locked state. The `thread::Id`
/// references the thread that currently holds the mutex.
lock: Option<thread::Id>,
/// Tracks access to the mutex
last_access: Option<Access>,
/// Causality transfers between threads
synchronize: Synchronize,
}
impl Mutex {
pub(crate) fn new(seq_cst: bool) -> Mutex {
super::execution(|execution| {
let state = execution.objects.insert(State {
seq_cst,
lock: None,
last_access: None,
synchronize: Synchronize::new(),
});
trace!(?state, ?seq_cst, "Mutex::new");
Mutex { state }
})
}
pub(crate) fn acquire_lock(&self, location: Location) {
self.state.branch_acquire(self.is_locked(), location);
assert!(self.post_acquire(), "expected to be able to acquire lock");
}
pub(crate) fn try_acquire_lock(&self, location: Location) -> bool {
self.state.branch_opaque(location);
self.post_acquire()
}
pub(crate) fn release_lock(&self) {
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
// Release the lock flag
state.lock = None;
// Execution has deadlocked, cleanup does not matter.
if !execution.threads.is_active() {
return;
}
state
.synchronize
.sync_store(&mut execution.threads, Release);
if state.seq_cst {
// Establish sequential consistency between the lock's operations.
execution.threads.seq_cst();
}
let thread_id = execution.threads.active_id();
for (id, thread) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
let obj = thread
.operation
.as_ref()
.map(|operation| operation.object());
if obj == Some(self.state.erase()) {
trace!(state = ?self.state, thread = ?id,
"Mutex::release_lock");
thread.set_runnable();
}
}
});
}
fn post_acquire(&self) -> bool {
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
let thread_id = execution.threads.active_id();
if state.lock.is_some() {
return false;
}
// Set the lock to the current thread
state.lock = Some(thread_id);
dbg!(state.synchronize.sync_load(&mut execution.threads, Acquire));
if state.seq_cst {
// Establish sequential consistency between locks
execution.threads.seq_cst();
}
// Block all **other** threads attempting to acquire the mutex
for (id, thread) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
if let Some(operation) = thread.operation.as_ref() {
if operation.object() == self.state.erase() {
let location = operation.location();
trace!(state = ?self.state, thread = ?id,
"Mutex::post_acquire");
thread.set_blocked(location);
}
}
}
true
})
}
/// Returns `true` if the mutex is currently locked
fn is_locked(&self) -> bool {
super::execution(|execution| {
let is_locked = self.state.get(&execution.objects).lock.is_some();
trace!(state = ?self.state, ?is_locked, "Mutex::is_locked");
is_locked
})
}
}
impl State {
pub(crate) fn last_dependent_access(&self) -> Option<&Access> {
self.last_access.as_ref()
}
pub(crate) fn set_last_access(&mut self, path_id: usize, version: &VersionVec) {
Access::set_or_create(&mut self.last_access, path_id, version);
}
}

148
third-party/vendor/loom/src/rt/notify.rs vendored Normal file
View file

@ -0,0 +1,148 @@
use crate::rt::object;
use crate::rt::{self, Access, Synchronize, VersionVec};
use std::sync::atomic::Ordering::{Acquire, Release};
use tracing::trace;
use super::Location;
#[derive(Debug, Copy, Clone)]
pub(crate) struct Notify {
state: object::Ref<State>,
}
#[derive(Debug)]
pub(super) struct State {
/// If true, spurious notifications are possible
spurious: bool,
/// True if the notify woke up spuriously last time
did_spur: bool,
/// When true, notification is sequential consistent.
seq_cst: bool,
/// `true` if there is a pending notification to consume.
notified: bool,
/// Tracks access to the notify object
last_access: Option<Access>,
/// Causality transfers between threads
synchronize: Synchronize,
}
impl Notify {
pub(crate) fn new(seq_cst: bool, spurious: bool) -> Notify {
super::execution(|execution| {
let state = execution.objects.insert(State {
spurious,
did_spur: false,
seq_cst,
notified: false,
last_access: None,
synchronize: Synchronize::new(),
});
trace!(?state, ?seq_cst, ?spurious, "Notify::new");
Notify { state }
})
}
pub(crate) fn notify(self, location: Location) {
self.state.branch_opaque(location);
rt::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state
.synchronize
.sync_store(&mut execution.threads, Release);
if state.seq_cst {
execution.threads.seq_cst();
}
state.notified = true;
let (active, inactive) = execution.threads.split_active();
for thread in inactive {
let obj = thread
.operation
.as_ref()
.map(|operation| operation.object());
if obj == Some(self.state.erase()) {
trace!(state = ?self.state, thread = ?thread.id, "Notify::notify");
thread.unpark(active);
}
}
});
}
pub(crate) fn wait(self, location: Location) {
let (notified, spurious) = rt::execution(|execution| {
let spurious = if self.state.get(&execution.objects).might_spur() {
execution.path.branch_spurious()
} else {
false
};
let state = self.state.get_mut(&mut execution.objects);
if spurious {
state.did_spur = true;
}
trace!(state = ?self.state, notified = ?state.notified, ?spurious, "Notify::wait 1");
dbg!((state.notified, spurious))
});
if spurious {
rt::yield_now();
return;
}
if notified {
self.state.branch_opaque(location);
} else {
// This should become branch_disable
self.state.branch_acquire(true, location)
}
// Thread was notified
super::execution(|execution| {
trace!(state = ?self.state, "Notify::wait 2");
let state = self.state.get_mut(&mut execution.objects);
assert!(state.notified);
state.synchronize.sync_load(&mut execution.threads, Acquire);
if state.seq_cst {
// Establish sequential consistency between locks
execution.threads.seq_cst();
}
state.notified = false;
});
}
}
impl State {
pub(crate) fn might_spur(&self) -> bool {
self.spurious && !self.did_spur
}
pub(crate) fn last_dependent_access(&self) -> Option<&Access> {
self.last_access.as_ref()
}
pub(crate) fn set_last_access(&mut self, path_id: usize, version: &VersionVec) {
Access::set_or_create(&mut self.last_access, path_id, version);
}
}

52
third-party/vendor/loom/src/rt/num.rs vendored Normal file
View file

@ -0,0 +1,52 @@
/// Numeric-like type can be represented by a `u64`.
///
/// Used by `Atomic` to store values.
pub(crate) trait Numeric: Sized + Copy + PartialEq {
/// Convert a value into `u64` representation
fn into_u64(self) -> u64;
/// Convert a `u64` representation into the value
fn from_u64(src: u64) -> Self;
}
macro_rules! impl_num {
( $($t:ty),* ) => {
$(
impl Numeric for $t {
fn into_u64(self) -> u64 {
self as u64
}
fn from_u64(src: u64) -> $t {
src as $t
}
}
)*
};
}
impl_num!(u8, u16, u32, u64, usize, i8, i16, i32, i64, isize);
impl<T> Numeric for *mut T {
fn into_u64(self) -> u64 {
self as u64
}
fn from_u64(src: u64) -> *mut T {
src as *mut T
}
}
impl Numeric for bool {
fn into_u64(self) -> u64 {
if self {
1
} else {
0
}
}
fn from_u64(src: u64) -> bool {
src != 0
}
}

465
third-party/vendor/loom/src/rt/object.rs vendored Normal file
View file

@ -0,0 +1,465 @@
use crate::rt;
use crate::rt::{Access, Execution, Location, VersionVec};
use std::fmt;
use std::marker::PhantomData;
use tracing::trace;
#[cfg(feature = "checkpoint")]
use serde::{Deserialize, Serialize};
/// Stores objects
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(super) struct Store<T = Entry> {
/// Stored state for all objects.
entries: Vec<T>,
}
pub(super) trait Object: Sized {
type Entry;
/// Convert an object into an entry
fn into_entry(self) -> Self::Entry;
/// Convert an entry ref into an object ref
fn get_ref(entry: &Self::Entry) -> Option<&Self>;
/// Convert a mutable entry ref into a mutable object ref
fn get_mut(entry: &mut Self::Entry) -> Option<&mut Self>;
}
/// References an object in the store.
///
/// The reference tracks the type it references. Using `()` indicates the type
/// is unknown.
#[derive(Eq, PartialEq)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(super) struct Ref<T = ()> {
/// Index in the store
index: usize,
_p: PhantomData<T>,
}
// TODO: mov to separate file
#[derive(Debug, Copy, Clone)]
pub(super) struct Operation {
obj: Ref,
action: Action,
location: Location,
}
// TODO: move to separate file
#[derive(Debug, Copy, Clone, PartialEq)]
pub(super) enum Action {
/// Action on an Arc object
Arc(rt::arc::Action),
/// Action on an atomic object
Atomic(rt::atomic::Action),
/// Action on a channel
Channel(rt::mpsc::Action),
/// Action on a RwLock
RwLock(rt::rwlock::Action),
/// Generic action with no specialized dependencies on access.
Opaque,
}
macro_rules! objects {
( $(#[$attrs:meta])* $e:ident, $( $name:ident($ty:path), )* ) => {
$(#[$attrs])*
pub(super) enum $e {
$(
$name($ty),
)*
}
$(
impl crate::rt::object::Object for $ty {
type Entry = $e;
fn into_entry(self) -> Entry {
$e::$name(self)
}
fn get_ref(entry: &Entry) -> Option<&$ty> {
match entry {
$e::$name(obj) => Some(obj),
_ => None,
}
}
fn get_mut(entry: &mut Entry) -> Option<&mut $ty> {
match entry {
$e::$name(obj) => Some(obj),
_ => None,
}
}
}
)*
};
}
objects! {
#[derive(Debug)]
// Many of the common variants of this enum are quite large --- only `Entry`
// and `Alloc` are significantly smaller than most other variants.
#[allow(clippy::large_enum_variant)]
Entry,
// State tracking allocations. Used for leak detection.
Alloc(rt::alloc::State),
// State associated with a modeled `Arc`.
Arc(rt::arc::State),
// State associated with an atomic cell
Atomic(rt::atomic::State),
// State associated with a mutex.
Mutex(rt::mutex::State),
// State associated with a modeled condvar.
Condvar(rt::condvar::State),
// State associated with a modeled thread notifier.
Notify(rt::notify::State),
// State associated with an RwLock
RwLock(rt::rwlock::State),
// State associated with a modeled channel.
Channel(rt::mpsc::State),
// Tracks access to a memory cell
Cell(rt::cell::State),
}
impl<T> Store<T> {
/// Create a new, empty, object store
pub(super) fn with_capacity(capacity: usize) -> Store<T> {
Store {
entries: Vec::with_capacity(capacity),
}
}
pub(super) fn len(&self) -> usize {
self.entries.len()
}
pub(super) fn capacity(&self) -> usize {
self.entries.capacity()
}
pub(super) fn reserve_exact(&mut self, additional: usize) {
self.entries.reserve_exact(additional);
}
/// Insert an object into the store
pub(super) fn insert<O>(&mut self, item: O) -> Ref<O>
where
O: Object<Entry = T>,
{
let index = self.entries.len();
self.entries.push(item.into_entry());
Ref {
index,
_p: PhantomData,
}
}
pub(crate) fn truncate<O>(&mut self, obj: Ref<O>) {
let target = obj.index + 1;
self.entries.truncate(target);
}
pub(crate) fn clear(&mut self) {
self.entries.clear();
}
pub(super) fn iter_ref<O>(&self) -> impl DoubleEndedIterator<Item = Ref<O>> + '_
where
O: Object<Entry = T>,
{
self.entries
.iter()
.enumerate()
.filter(|(_, e)| O::get_ref(e).is_some())
.map(|(index, _)| Ref {
index,
_p: PhantomData,
})
}
pub(super) fn iter_mut<'a, O>(&'a mut self) -> impl DoubleEndedIterator<Item = &mut O>
where
O: Object<Entry = T> + 'a,
{
self.entries.iter_mut().filter_map(O::get_mut)
}
}
impl Store {
pub(super) fn last_dependent_access(&self, operation: Operation) -> Option<&Access> {
match &self.entries[operation.obj.index] {
Entry::Arc(entry) => entry.last_dependent_access(operation.action.into()),
Entry::Atomic(entry) => entry.last_dependent_access(operation.action.into()),
Entry::Mutex(entry) => entry.last_dependent_access(),
Entry::Condvar(entry) => entry.last_dependent_access(),
Entry::Notify(entry) => entry.last_dependent_access(),
Entry::RwLock(entry) => entry.last_dependent_access(),
Entry::Channel(entry) => entry.last_dependent_access(operation.action.into()),
obj => panic!(
"object is not branchable {:?}; ref = {:?}",
obj, operation.obj
),
}
}
pub(super) fn set_last_access(
&mut self,
operation: Operation,
path_id: usize,
dpor_vv: &VersionVec,
) {
match &mut self.entries[operation.obj.index] {
Entry::Arc(entry) => entry.set_last_access(operation.action.into(), path_id, dpor_vv),
Entry::Atomic(entry) => {
entry.set_last_access(operation.action.into(), path_id, dpor_vv)
}
Entry::Mutex(entry) => entry.set_last_access(path_id, dpor_vv),
Entry::Condvar(entry) => entry.set_last_access(path_id, dpor_vv),
Entry::Notify(entry) => entry.set_last_access(path_id, dpor_vv),
Entry::RwLock(entry) => entry.set_last_access(path_id, dpor_vv),
Entry::Channel(entry) => {
entry.set_last_access(operation.action.into(), path_id, dpor_vv)
}
_ => panic!("object is not branchable"),
}
}
/// Panics if any leaks were detected
pub(crate) fn check_for_leaks(&self) {
for (index, entry) in self.entries.iter().enumerate() {
match entry {
Entry::Alloc(entry) => entry.check_for_leaks(index),
Entry::Arc(entry) => entry.check_for_leaks(index),
Entry::Channel(entry) => entry.check_for_leaks(index),
_ => {}
}
}
}
}
impl<T> Ref<T> {
/// Erase the type marker
pub(super) fn erase(self) -> Ref<()> {
Ref {
index: self.index,
_p: PhantomData,
}
}
pub(super) fn ref_eq(self, other: Ref<T>) -> bool {
self.index == other.index
}
}
impl<T: Object> Ref<T> {
/// Get a reference to the object associated with this reference from the store
pub(super) fn get(self, store: &Store<T::Entry>) -> &T {
T::get_ref(&store.entries[self.index])
.expect("[loom internal bug] unexpected object stored at reference")
}
/// Get a mutable reference to the object associated with this reference
/// from the store
pub(super) fn get_mut(self, store: &mut Store<T::Entry>) -> &mut T {
T::get_mut(&mut store.entries[self.index])
.expect("[loom internal bug] unexpected object stored at reference")
}
}
impl Ref {
/// Convert a store index `usize` into a ref
pub(super) fn from_usize(index: usize) -> Ref {
Ref {
index,
_p: PhantomData,
}
}
pub(super) fn downcast<T>(self, store: &Store<T::Entry>) -> Option<Ref<T>>
where
T: Object,
{
T::get_ref(&store.entries[self.index]).map(|_| Ref {
index: self.index,
_p: PhantomData,
})
}
}
impl<T> Clone for Ref<T> {
fn clone(&self) -> Ref<T> {
Ref {
index: self.index,
_p: PhantomData,
}
}
}
impl<T> Copy for Ref<T> {}
impl<T> fmt::Debug for Ref<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
use std::any::type_name;
write!(fmt, "Ref<{}>({})", type_name::<T>(), self.index)
}
}
// TODO: These fns shouldn't be on Ref
impl<T: Object<Entry = Entry>> Ref<T> {
// TODO: rename `branch_disable`
pub(super) fn branch_acquire(self, is_locked: bool, location: Location) {
super::branch(|execution| {
trace!(obj = ?self, ?is_locked, "Object::branch_acquire");
self.set_action(execution, Action::Opaque, location);
if is_locked {
// The mutex is currently blocked, cannot make progress
execution.threads.active_mut().set_blocked(location);
}
})
}
pub(super) fn branch_action(
self,
action: impl Into<Action> + std::fmt::Debug,
location: Location,
) {
super::branch(|execution| {
trace!(obj = ?self, ?action, "Object::branch_action");
self.set_action(execution, action.into(), location);
})
}
pub(super) fn branch_disable(
self,
action: impl Into<Action> + std::fmt::Debug,
disable: bool,
location: Location,
) {
super::branch(|execution| {
trace!(obj = ?self, ?action, ?disable, "Object::branch_disable");
self.set_action(execution, action.into(), location);
if disable {
// Cannot make progress.
execution.threads.active_mut().set_blocked(location);
}
})
}
pub(super) fn branch_opaque(self, location: Location) {
self.branch_action(Action::Opaque, location)
}
fn set_action(self, execution: &mut Execution, action: Action, location: Location) {
assert!(
T::get_ref(&execution.objects.entries[self.index]).is_some(),
"failed to get object for ref {:?}",
self
);
execution.threads.active_mut().operation = Some(Operation {
obj: self.erase(),
action,
location,
});
}
}
impl Operation {
pub(super) fn object(&self) -> Ref {
self.obj
}
pub(super) fn action(&self) -> Action {
self.action
}
pub(super) fn location(&self) -> Location {
self.location
}
}
impl From<Action> for rt::arc::Action {
fn from(action: Action) -> Self {
match action {
Action::Arc(action) => action,
_ => unreachable!(),
}
}
}
impl From<Action> for rt::atomic::Action {
fn from(action: Action) -> Self {
match action {
Action::Atomic(action) => action,
_ => unreachable!(),
}
}
}
impl From<Action> for rt::mpsc::Action {
fn from(action: Action) -> Self {
match action {
Action::Channel(action) => action,
_ => unreachable!(),
}
}
}
impl From<rt::arc::Action> for Action {
fn from(action: rt::arc::Action) -> Self {
Action::Arc(action)
}
}
impl From<rt::atomic::Action> for Action {
fn from(action: rt::atomic::Action) -> Self {
Action::Atomic(action)
}
}
impl From<rt::mpsc::Action> for Action {
fn from(action: rt::mpsc::Action) -> Self {
Action::Channel(action)
}
}
impl From<rt::rwlock::Action> for Action {
fn from(action: rt::rwlock::Action) -> Self {
Action::RwLock(action)
}
}
impl PartialEq<rt::rwlock::Action> for Action {
fn eq(&self, other: &rt::rwlock::Action) -> bool {
let other: Action = (*other).into();
*self == other
}
}

472
third-party/vendor/loom/src/rt/path.rs vendored Normal file
View file

@ -0,0 +1,472 @@
use crate::rt::{execution, object, thread, MAX_ATOMIC_HISTORY, MAX_THREADS};
#[cfg(feature = "checkpoint")]
use serde::{Deserialize, Serialize};
/// An execution path
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) struct Path {
preemption_bound: Option<u8>,
/// Current execution's position in the branches vec.
///
/// When the execution starts, this is zero, but `branches` might not be
/// empty.
///
/// In order to perform an exhaustive search, the execution is seeded with a
/// set of branches.
pos: usize,
/// List of all branches in the execution.
///
/// A branch is of type `Schedule`, `Load`, or `Spurious`
branches: object::Store<Entry>,
}
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) struct Schedule {
/// Number of times the thread leading to this branch point has been
/// pre-empted.
preemptions: u8,
/// The thread that was active first
initial_active: Option<u8>,
/// State of each thread
threads: [Thread; MAX_THREADS],
/// The previous schedule branch
prev: Option<object::Ref<Schedule>>,
}
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) struct Load {
/// All possible values
values: [u8; MAX_ATOMIC_HISTORY],
/// Current value
pos: u8,
/// Number of values in list
len: u8,
}
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) struct Spurious(bool);
objects! {
#[derive(Debug)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
Entry,
Schedule(Schedule),
Load(Load),
Spurious(Spurious),
}
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) enum Thread {
/// The thread is currently disabled
Disabled,
/// The thread should not be explored
Skip,
/// The thread is in a yield state.
Yield,
/// The thread is waiting to be explored
Pending,
/// The thread is currently being explored
Active,
/// The thread has been explored
Visited,
}
macro_rules! assert_path_len {
($branches:expr) => {{
assert!(
// if we are panicking, we may be performing a branch due to a
// `Drop` impl (e.g., for `Arc`, or for a user type that does an
// atomic operation in its `Drop` impl). if that's the case,
// asserting this again will double panic. therefore, short-circuit
// the assertion if the thread is panicking.
$branches.len() < $branches.capacity() || std::thread::panicking(),
"Model exceeded maximum number of branches. This is often caused \
by an algorithm requiring the processor to make progress, e.g. \
spin locks.",
);
}};
}
impl Path {
/// Create a new, blank, configured to branch at most `max_branches` times
/// and at most `preemption_bound` thread preemptions.
pub(crate) fn new(max_branches: usize, preemption_bound: Option<u8>) -> Path {
Path {
preemption_bound,
pos: 0,
branches: object::Store::with_capacity(max_branches),
}
}
pub(crate) fn set_max_branches(&mut self, max_branches: usize) {
self.branches
.reserve_exact(max_branches - self.branches.len());
}
/// Returns `true` if the execution has reached a point where the known path
/// has been traversed and has reached a new branching point.
pub(super) fn is_traversed(&self) -> bool {
self.pos == self.branches.len()
}
pub(super) fn pos(&self) -> usize {
self.pos
}
/// Push a new atomic-load branch
pub(super) fn push_load(&mut self, seed: &[u8]) {
assert_path_len!(self.branches);
let load_ref = self.branches.insert(Load {
values: [0; MAX_ATOMIC_HISTORY],
pos: 0,
len: 0,
});
let load = load_ref.get_mut(&mut self.branches);
for (i, &store) in seed.iter().enumerate() {
assert!(
store < MAX_ATOMIC_HISTORY as u8,
"[loom internal bug] store = {}; max = {}",
store,
MAX_ATOMIC_HISTORY
);
assert!(
i < MAX_ATOMIC_HISTORY,
"[loom internal bug] i = {}; max = {}",
i,
MAX_ATOMIC_HISTORY
);
load.values[i] = store as u8;
load.len += 1;
}
}
/// Returns the atomic write to read
pub(super) fn branch_load(&mut self) -> usize {
assert!(!self.is_traversed(), "[loom internal bug]");
let load = object::Ref::from_usize(self.pos)
.downcast::<Load>(&self.branches)
.expect("Reached unexpected exploration state. Is the model fully deterministic?")
.get(&self.branches);
self.pos += 1;
load.values[load.pos as usize] as usize
}
/// Branch on spurious notifications
pub(super) fn branch_spurious(&mut self) -> bool {
if self.is_traversed() {
assert_path_len!(self.branches);
self.branches.insert(Spurious(false));
}
let spurious = object::Ref::from_usize(self.pos)
.downcast::<Spurious>(&self.branches)
.expect("Reached unexpected exploration state. Is the model fully deterministic?")
.get(&self.branches)
.0;
self.pos += 1;
spurious
}
/// Returns the thread identifier to schedule
pub(super) fn branch_thread(
&mut self,
execution_id: execution::Id,
seed: impl ExactSizeIterator<Item = Thread>,
) -> Option<thread::Id> {
if self.is_traversed() {
assert_path_len!(self.branches);
// Find the last thread scheduling branch in the path
let prev = self.last_schedule();
// Entering a new exploration space.
//
// Initialize a new branch. The initial field values don't matter
// as they will be updated below.
let schedule_ref = self.branches.insert(Schedule {
preemptions: 0,
initial_active: None,
threads: [Thread::Disabled; MAX_THREADS],
prev,
});
// Get a reference to the branch in the object store.
let schedule = schedule_ref.get_mut(&mut self.branches);
assert!(seed.len() <= MAX_THREADS, "[loom internal bug]");
// Currently active thread
let mut active = None;
for (i, v) in seed.enumerate() {
// Initialize thread states
schedule.threads[i] = v;
if v.is_active() {
assert!(
active.is_none(),
"[loom internal bug] only one thread should start as active"
);
active = Some(i as u8);
}
}
// Ensure at least one thread is active, otherwise toggle a yielded
// thread.
if active.is_none() {
for (i, th) in schedule.threads.iter_mut().enumerate() {
if *th == Thread::Yield {
*th = Thread::Active;
active = Some(i as u8);
break;
}
}
}
let mut initial_active = active;
if let Some(prev) = prev {
if initial_active != prev.get(&self.branches).active_thread_index() {
initial_active = None;
}
}
let preemptions = prev
.map(|prev| prev.get(&self.branches).preemptions())
.unwrap_or(0);
debug_assert!(
self.preemption_bound.is_none() || Some(preemptions) <= self.preemption_bound,
"[loom internal bug] max = {:?}; curr = {}",
self.preemption_bound,
preemptions,
);
let schedule = schedule_ref.get_mut(&mut self.branches);
schedule.initial_active = initial_active;
schedule.preemptions = preemptions;
}
let schedule = object::Ref::from_usize(self.pos)
.downcast::<Schedule>(&self.branches)
.expect("Reached unexpected exploration state. Is the model fully deterministic?")
.get(&self.branches);
self.pos += 1;
schedule
.threads
.iter()
.enumerate()
.find(|&(_, th)| th.is_active())
.map(|(i, _)| thread::Id::new(execution_id, i))
}
pub(super) fn backtrack(&mut self, point: usize, thread_id: thread::Id) {
let schedule = object::Ref::from_usize(point)
.downcast::<Schedule>(&self.branches)
.unwrap()
.get_mut(&mut self.branches);
// Exhaustive DPOR only requires adding this backtrack point
schedule.backtrack(thread_id, self.preemption_bound);
let mut curr = if let Some(curr) = schedule.prev {
curr
} else {
return;
};
if self.preemption_bound.is_some() {
loop {
// Preemption bounded DPOR requires conservatively adding
// another backtrack point to cover cases missed by the bounds.
if let Some(prev) = curr.get(&self.branches).prev {
let active_a = curr.get(&self.branches).active_thread_index();
let active_b = prev.get(&self.branches).active_thread_index();
if active_a != active_b {
curr.get_mut(&mut self.branches)
.backtrack(thread_id, self.preemption_bound);
return;
}
curr = prev;
} else {
// This is the very first schedule
curr.get_mut(&mut self.branches)
.backtrack(thread_id, self.preemption_bound);
return;
}
}
}
}
/// Reset the path to prepare for the next exploration of the model.
///
/// This function will also trim the object store, dropping any objects that
/// are created in pruned sections of the path.
pub(super) fn step(&mut self) -> bool {
// Reset the position to zero, the path will start traversing from the
// beginning
self.pos = 0;
// Set the final branch to try the next option. If all options have been
// traversed, pop the final branch and try again w/ the one under it.
//
// This is depth-first tree traversal.
//
for last in (0..self.branches.len()).rev() {
let last = object::Ref::from_usize(last);
// Remove all objects that were created **after** this branch
self.branches.truncate(last);
if let Some(schedule_ref) = last.downcast::<Schedule>(&self.branches) {
let schedule = schedule_ref.get_mut(&mut self.branches);
// Transition the active thread to visited.
if let Some(thread) = schedule.threads.iter_mut().find(|th| th.is_active()) {
*thread = Thread::Visited;
}
// Find a pending thread and transition it to active
let rem = schedule
.threads
.iter_mut()
.find(|th| th.is_pending())
.map(|th| {
*th = Thread::Active;
})
.is_some();
if rem {
return true;
}
} else if let Some(load_ref) = last.downcast::<Load>(&self.branches) {
let load = load_ref.get_mut(&mut self.branches);
load.pos += 1;
if load.pos < load.len {
return true;
}
} else if let Some(spurious_ref) = last.downcast::<Spurious>(&self.branches) {
let spurious = spurious_ref.get_mut(&mut self.branches);
if !spurious.0 {
spurious.0 = true;
return true;
}
} else {
unreachable!();
}
}
false
}
fn last_schedule(&self) -> Option<object::Ref<Schedule>> {
self.branches.iter_ref::<Schedule>().rev().next()
}
}
impl Schedule {
/// Returns the index of the currently active thread
fn active_thread_index(&self) -> Option<u8> {
self.threads
.iter()
.enumerate()
.find(|(_, th)| th.is_active())
.map(|(index, _)| index as u8)
}
/// Compute the number of preemptions for the current state of the branch
fn preemptions(&self) -> u8 {
if self.initial_active.is_some() && self.initial_active != self.active_thread_index() {
return self.preemptions + 1;
}
self.preemptions
}
fn backtrack(&mut self, thread_id: thread::Id, preemption_bound: Option<u8>) {
if let Some(bound) = preemption_bound {
assert!(
self.preemptions <= bound,
"[loom internal bug] actual = {}, bound = {}",
self.preemptions,
bound
);
if self.preemptions == bound {
return;
}
}
let thread_id = thread_id.as_usize();
if thread_id >= self.threads.len() {
return;
}
if self.threads[thread_id].is_enabled() {
self.threads[thread_id].explore();
} else {
for th in &mut self.threads {
th.explore();
}
}
}
}
impl Thread {
fn explore(&mut self) {
if *self == Thread::Skip {
*self = Thread::Pending;
}
}
fn is_pending(&self) -> bool {
*self == Thread::Pending
}
fn is_active(&self) -> bool {
*self == Thread::Active
}
fn is_enabled(&self) -> bool {
!self.is_disabled()
}
fn is_disabled(&self) -> bool {
*self == Thread::Disabled
}
}

270
third-party/vendor/loom/src/rt/rwlock.rs vendored Normal file
View file

@ -0,0 +1,270 @@
use crate::rt::object;
use crate::rt::{thread, Access, Execution, Location, Synchronize, VersionVec};
use std::collections::HashSet;
use std::sync::atomic::Ordering::{Acquire, Release};
#[derive(Debug, Copy, Clone)]
pub(crate) struct RwLock {
state: object::Ref<State>,
}
#[derive(Debug, PartialEq)]
enum Locked {
Read(HashSet<thread::Id>),
Write(thread::Id),
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub(super) enum Action {
/// Read lock
Read,
/// Write lock
Write,
}
#[derive(Debug)]
pub(super) struct State {
/// A single `thread::Id` when Write locked.
/// A set of `thread::Id` when Read locked.
lock: Option<Locked>,
/// Tracks write access to the rwlock.
last_access: Option<Access>,
/// Causality transfers between threads
synchronize: Synchronize,
}
impl RwLock {
/// Common RwLock function
pub(crate) fn new() -> RwLock {
super::execution(|execution| {
let state = execution.objects.insert(State {
lock: None,
last_access: None,
synchronize: Synchronize::new(),
});
RwLock { state }
})
}
/// Acquire the read lock.
/// Fail to acquire read lock if already *write* locked.
pub(crate) fn acquire_read_lock(&self, location: Location) {
self.state
.branch_disable(Action::Read, self.is_write_locked(), location);
assert!(
self.post_acquire_read_lock(),
"expected to be able to acquire read lock"
);
}
/// Acquire write lock.
/// Fail to acquire write lock if either read or write locked.
pub(crate) fn acquire_write_lock(&self, location: Location) {
self.state.branch_disable(
Action::Write,
self.is_write_locked() || self.is_read_locked(),
location,
);
assert!(
self.post_acquire_write_lock(),
"expected to be able to acquire write lock"
);
}
pub(crate) fn try_acquire_read_lock(&self, location: Location) -> bool {
self.state.branch_action(Action::Read, location);
self.post_acquire_read_lock()
}
pub(crate) fn try_acquire_write_lock(&self, location: Location) -> bool {
self.state.branch_action(Action::Write, location);
self.post_acquire_write_lock()
}
pub(crate) fn release_read_lock(&self) {
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
let thread_id = execution.threads.active_id();
state
.synchronize
.sync_store(&mut execution.threads, Release);
// Establish sequential consistency between the lock's operations.
execution.threads.seq_cst();
let readers = match &mut state.lock {
Some(Locked::Read(readers)) => readers,
_ => panic!("invalid internal loom state"),
};
readers.remove(&thread_id);
if readers.is_empty() {
state.lock = None;
self.unlock_threads(execution, thread_id);
}
});
}
pub(crate) fn release_write_lock(&self) {
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
state.lock = None;
state
.synchronize
.sync_store(&mut execution.threads, Release);
// Establish sequential consistency between the lock's operations.
execution.threads.seq_cst();
let thread_id = execution.threads.active_id();
self.unlock_threads(execution, thread_id);
});
}
fn unlock_threads(&self, execution: &mut Execution, thread_id: thread::Id) {
// TODO: This and the above function look very similar.
// Refactor the two to DRY the code.
for (id, thread) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
let obj = thread
.operation
.as_ref()
.map(|operation| operation.object());
if obj == Some(self.state.erase()) {
thread.set_runnable();
}
}
}
/// Returns `true` if RwLock is read locked
fn is_read_locked(&self) -> bool {
super::execution(|execution| {
let lock = &self.state.get(&execution.objects).lock;
matches!(lock, Some(Locked::Read(_)))
})
}
/// Returns `true` if RwLock is write locked.
fn is_write_locked(&self) -> bool {
super::execution(|execution| {
let lock = &self.state.get(&execution.objects).lock;
matches!(lock, Some(Locked::Write(_)))
})
}
fn post_acquire_read_lock(&self) -> bool {
super::execution(|execution| {
let mut state = self.state.get_mut(&mut execution.objects);
let thread_id = execution.threads.active_id();
// Set the lock to the current thread
let mut already_locked = false;
state.lock = match state.lock.take() {
None => {
let mut threads: HashSet<thread::Id> = HashSet::new();
threads.insert(thread_id);
Some(Locked::Read(threads))
}
Some(Locked::Read(mut threads)) => {
threads.insert(thread_id);
Some(Locked::Read(threads))
}
Some(Locked::Write(writer)) => {
already_locked = true;
Some(Locked::Write(writer))
}
};
// The RwLock is already Write locked, so we cannot acquire a read lock on it.
if already_locked {
return false;
}
dbg!(state.synchronize.sync_load(&mut execution.threads, Acquire));
execution.threads.seq_cst();
// Block all writer threads from attempting to acquire the RwLock
for (id, th) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
let op = match th.operation.as_ref() {
Some(op) if op.object() == self.state.erase() => op,
_ => continue,
};
if op.action() == Action::Write {
let location = op.location();
th.set_blocked(location);
}
}
true
})
}
fn post_acquire_write_lock(&self) -> bool {
super::execution(|execution| {
let state = self.state.get_mut(&mut execution.objects);
let thread_id = execution.threads.active_id();
// Set the lock to the current thread
state.lock = match state.lock {
Some(Locked::Read(_)) => return false,
_ => Some(Locked::Write(thread_id)),
};
state.synchronize.sync_load(&mut execution.threads, Acquire);
// Establish sequential consistency between locks
execution.threads.seq_cst();
// Block all other threads attempting to acquire rwlock
// Block all writer threads from attempting to acquire the RwLock
for (id, th) in execution.threads.iter_mut() {
if id == thread_id {
continue;
}
match th.operation.as_ref() {
Some(op) if op.object() == self.state.erase() => {
let location = op.location();
th.set_blocked(location);
}
_ => continue,
};
}
true
})
}
}
impl State {
pub(crate) fn last_dependent_access(&self) -> Option<&Access> {
self.last_access.as_ref()
}
pub(crate) fn set_last_access(&mut self, path_id: usize, version: &VersionVec) {
Access::set_or_create(&mut self.last_access, path_id, version)
}
}

View file

@ -0,0 +1,162 @@
#![allow(deprecated)]
use crate::rt::{thread, Execution};
use generator::{self, Generator, Gn};
use scoped_tls::scoped_thread_local;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::fmt;
pub(crate) struct Scheduler {
/// Threads
threads: Vec<Thread>,
next_thread: usize,
queued_spawn: VecDeque<Box<dyn FnOnce()>>,
}
type Thread = Generator<'static, Option<Box<dyn FnOnce()>>, ()>;
scoped_thread_local! {
static STATE: RefCell<State<'_>>
}
struct State<'a> {
execution: &'a mut Execution,
queued_spawn: &'a mut VecDeque<Box<dyn FnOnce()>>,
}
impl Scheduler {
/// Create an execution
pub(crate) fn new(capacity: usize) -> Scheduler {
let threads = spawn_threads(capacity);
Scheduler {
threads,
next_thread: 0,
queued_spawn: VecDeque::new(),
}
}
/// Access the execution
pub(crate) fn with_execution<F, R>(f: F) -> R
where
F: FnOnce(&mut Execution) -> R,
{
Self::with_state(|state| f(state.execution))
}
/// Perform a context switch
pub(crate) fn switch() {
use std::future::Future;
use std::pin::Pin;
use std::ptr;
use std::task::{Context, RawWaker, RawWakerVTable, Waker};
unsafe fn noop_clone(_: *const ()) -> RawWaker {
unreachable!()
}
unsafe fn noop(_: *const ()) {}
// Wrapping with an async block deals with the thread-local context
// `std` uses to manage async blocks
let mut switch = async { generator::yield_with(()) };
let switch = unsafe { Pin::new_unchecked(&mut switch) };
let raw_waker = RawWaker::new(
ptr::null(),
&RawWakerVTable::new(noop_clone, noop, noop, noop),
);
let waker = unsafe { Waker::from_raw(raw_waker) };
let mut cx = Context::from_waker(&waker);
assert!(switch.poll(&mut cx).is_ready());
}
pub(crate) fn spawn(f: Box<dyn FnOnce()>) {
Self::with_state(|state| state.queued_spawn.push_back(f));
}
pub(crate) fn run<F>(&mut self, execution: &mut Execution, f: F)
where
F: FnOnce() + Send + 'static,
{
self.next_thread = 1;
self.threads[0].set_para(Some(Box::new(f)));
self.threads[0].resume();
loop {
if !execution.threads.is_active() {
return;
}
let active = execution.threads.active_id();
self.tick(active, execution);
while let Some(th) = self.queued_spawn.pop_front() {
let thread_id = self.next_thread;
self.next_thread += 1;
self.threads[thread_id].set_para(Some(th));
self.threads[thread_id].resume();
}
}
}
fn tick(&mut self, thread: thread::Id, execution: &mut Execution) {
let state = RefCell::new(State {
execution,
queued_spawn: &mut self.queued_spawn,
});
let threads = &mut self.threads;
STATE.set(unsafe { transmute_lt(&state) }, || {
threads[thread.as_usize()].resume();
});
}
fn with_state<F, R>(f: F) -> R
where
F: FnOnce(&mut State<'_>) -> R,
{
if !STATE.is_set() {
panic!("cannot access Loom execution state from outside a Loom model. \
are you accessing a Loom synchronization primitive from outside a Loom test (a call to `model` or `check`)?")
}
STATE.with(|state| f(&mut state.borrow_mut()))
}
}
impl fmt::Debug for Scheduler {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Schedule")
.field("threads", &self.threads)
.finish()
}
}
fn spawn_threads(n: usize) -> Vec<Thread> {
(0..n)
.map(|_| {
let mut g = Gn::new(move || {
loop {
let f: Option<Box<dyn FnOnce()>> = generator::yield_(()).unwrap();
generator::yield_with(());
f.unwrap()();
}
// done!();
});
g.resume();
g
})
.collect()
}
unsafe fn transmute_lt<'a, 'b>(state: &'a RefCell<State<'b>>) -> &'a RefCell<State<'static>> {
::std::mem::transmute(state)
}

View file

@ -0,0 +1,63 @@
use crate::rt::{thread, VersionVec};
use std::sync::atomic::Ordering::{self, *};
/// A synchronization point between two threads.
///
/// Threads synchronize with this point using any of the available orderings. On
/// loads, the thread's causality is updated using the synchronization point's
/// stored causality. On stores, the synchronization point's causality is
/// updated with the threads.
#[derive(Debug, Clone, Copy)]
pub(crate) struct Synchronize {
happens_before: VersionVec,
}
impl Synchronize {
pub fn new() -> Self {
Synchronize {
happens_before: VersionVec::new(),
}
}
pub fn sync_load(&mut self, threads: &mut thread::Set, order: Ordering) {
match order {
Relaxed | Release => {
// Nothing happens!
}
Acquire | AcqRel => {
self.sync_acq(threads);
}
SeqCst => {
self.sync_acq(threads);
threads.seq_cst();
}
order => unimplemented!("unimplemented ordering {:?}", order),
}
}
pub fn sync_store(&mut self, threads: &mut thread::Set, order: Ordering) {
self.happens_before.join(&threads.active().released);
match order {
Relaxed | Acquire => {
// Nothing happens!
}
Release | AcqRel => {
self.sync_rel(threads);
}
SeqCst => {
self.sync_rel(threads);
threads.seq_cst();
}
order => unimplemented!("unimplemented ordering {:?}", order),
}
}
fn sync_acq(&mut self, threads: &mut thread::Set) {
threads.active_mut().causality.join(&self.happens_before);
}
fn sync_rel(&mut self, threads: &thread::Set) {
self.happens_before.join(&threads.active().causality);
}
}

475
third-party/vendor/loom/src/rt/thread.rs vendored Normal file
View file

@ -0,0 +1,475 @@
use crate::rt::execution;
use crate::rt::object::Operation;
use crate::rt::vv::VersionVec;
use std::{any::Any, collections::HashMap, fmt, ops};
use super::Location;
pub(crate) struct Thread {
pub id: Id,
/// If the thread is runnable, blocked, or terminated.
pub state: State,
/// True if the thread is in a critical section
pub critical: bool,
/// The operation the thread is about to take
pub(super) operation: Option<Operation>,
/// Tracks observed causality
pub causality: VersionVec,
/// Tracks the the view of the lastest release fence
pub released: VersionVec,
/// Tracks DPOR relations
pub dpor_vv: VersionVec,
/// Version at which the thread last yielded
pub last_yield: Option<u16>,
/// Number of times the thread yielded
pub yield_count: usize,
locals: LocalMap,
/// `tracing` span used to associate diagnostics with the current thread.
span: tracing::Span,
}
#[derive(Debug)]
pub(crate) struct Set {
/// Unique execution identifier
execution_id: execution::Id,
/// Set of threads
threads: Vec<Thread>,
/// Currently scheduled thread.
///
/// `None` signifies that no thread is runnable.
active: Option<usize>,
/// Sequential consistency causality. All sequentially consistent operations
/// synchronize with this causality.
pub seq_cst_causality: VersionVec,
/// `tracing` span used as the parent for new thread spans.
iteration_span: tracing::Span,
}
#[derive(Eq, PartialEq, Hash, Copy, Clone)]
pub(crate) struct Id {
execution_id: execution::Id,
id: usize,
}
impl Id {
/// Returns an integer ID unique to this current execution (for use in
/// [`thread::ThreadId`]'s `Debug` impl)
pub(crate) fn public_id(&self) -> usize {
self.id
}
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum State {
Runnable { unparked: bool },
Blocked(Location),
Yield,
Terminated,
}
type LocalMap = HashMap<LocalKeyId, LocalValue>;
#[derive(Eq, PartialEq, Hash, Copy, Clone)]
struct LocalKeyId(usize);
struct LocalValue(Option<Box<dyn Any>>);
impl Thread {
fn new(id: Id, parent_span: &tracing::Span) -> Thread {
Thread {
id,
span: tracing::info_span!(parent: parent_span.id(), "thread", id = id.id),
state: State::Runnable { unparked: false },
critical: false,
operation: None,
causality: VersionVec::new(),
released: VersionVec::new(),
dpor_vv: VersionVec::new(),
last_yield: None,
yield_count: 0,
locals: HashMap::new(),
}
}
pub(crate) fn is_runnable(&self) -> bool {
matches!(self.state, State::Runnable { .. })
}
pub(crate) fn set_runnable(&mut self) {
self.state = State::Runnable { unparked: false };
}
pub(crate) fn set_blocked(&mut self, location: Location) {
self.state = State::Blocked(location);
}
pub(crate) fn is_blocked(&self) -> bool {
matches!(self.state, State::Blocked(..))
}
pub(crate) fn is_yield(&self) -> bool {
matches!(self.state, State::Yield)
}
pub(crate) fn set_yield(&mut self) {
self.state = State::Yield;
self.last_yield = Some(self.causality[self.id]);
self.yield_count += 1;
}
pub(crate) fn is_terminated(&self) -> bool {
matches!(self.state, State::Terminated)
}
pub(crate) fn set_terminated(&mut self) {
self.state = State::Terminated;
}
pub(crate) fn drop_locals(&mut self) -> Box<dyn std::any::Any> {
let mut locals = Vec::with_capacity(self.locals.len());
// run the Drop impls of any mock thread-locals created by this thread.
for local in self.locals.values_mut() {
locals.push(local.0.take());
}
Box::new(locals)
}
pub(crate) fn unpark(&mut self, unparker: &Thread) {
self.causality.join(&unparker.causality);
self.set_unparked();
}
/// Unpark a thread's state. If it is already runnable, store the unpark for
/// a future call to `park`.
fn set_unparked(&mut self) {
if self.is_blocked() || self.is_yield() {
self.set_runnable();
} else if self.is_runnable() {
self.state = State::Runnable { unparked: true }
}
}
}
impl fmt::Debug for Thread {
// Manual debug impl is necessary because thread locals are represented as
// `dyn Any`, which does not implement `Debug`.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Thread")
.field("id", &self.id)
.field("state", &self.state)
.field("critical", &self.critical)
.field("operation", &self.operation)
.field("causality", &self.causality)
.field("released", &self.released)
.field("dpor_vv", &self.dpor_vv)
.field("last_yield", &self.last_yield)
.field("yield_count", &self.yield_count)
.field("locals", &format_args!("[..locals..]"))
.finish()
}
}
impl Set {
/// Create an empty thread set.
///
/// The set may contain up to `max_threads` threads.
pub(crate) fn new(execution_id: execution::Id, max_threads: usize) -> Set {
let mut threads = Vec::with_capacity(max_threads);
// Capture the current iteration's span to be used as each thread
// span's parent.
let iteration_span = tracing::Span::current();
// Push initial thread
threads.push(Thread::new(Id::new(execution_id, 0), &iteration_span));
Set {
execution_id,
threads,
active: Some(0),
seq_cst_causality: VersionVec::new(),
iteration_span,
}
}
pub(crate) fn execution_id(&self) -> execution::Id {
self.execution_id
}
/// Create a new thread
pub(crate) fn new_thread(&mut self) -> Id {
assert!(self.threads.len() < self.max());
// Get the identifier for the thread about to be created
let id = self.threads.len();
// Push the thread onto the stack
self.threads.push(Thread::new(
Id::new(self.execution_id, id),
&self.iteration_span,
));
Id::new(self.execution_id, id)
}
pub(crate) fn max(&self) -> usize {
self.threads.capacity()
}
pub(crate) fn is_active(&self) -> bool {
self.active.is_some()
}
pub(crate) fn active_id(&self) -> Id {
Id::new(self.execution_id, self.active.unwrap())
}
pub(crate) fn active(&self) -> &Thread {
&self.threads[self.active.unwrap()]
}
pub(crate) fn set_active(&mut self, id: Option<Id>) {
tracing::dispatcher::get_default(|subscriber| {
if let Some(span_id) = self.active().span.id() {
subscriber.exit(&span_id)
}
if let Some(span_id) = id.and_then(|id| self.threads.get(id.id)?.span.id()) {
subscriber.enter(&span_id);
}
});
self.active = id.map(Id::as_usize);
}
pub(crate) fn active_mut(&mut self) -> &mut Thread {
&mut self.threads[self.active.unwrap()]
}
/// Get the active thread and second thread
pub(crate) fn active2_mut(&mut self, other: Id) -> (&mut Thread, &mut Thread) {
let active = self.active.unwrap();
let other = other.id;
if other >= active {
let (l, r) = self.threads.split_at_mut(other);
(&mut l[active], &mut r[0])
} else {
let (l, r) = self.threads.split_at_mut(active);
(&mut r[0], &mut l[other])
}
}
pub(crate) fn active_causality_inc(&mut self) {
let id = self.active_id();
self.active_mut().causality.inc(id);
}
pub(crate) fn active_atomic_version(&self) -> u16 {
let id = self.active_id();
self.active().causality[id]
}
pub(crate) fn unpark(&mut self, id: Id) {
if id == self.active_id() {
// The thread is unparking itself. We don't have to join its
// causality with the unparker's causality in this case, since the
// thread *is* the unparker. Just unpark its state.
self.active_mut().set_unparked();
return;
}
// Synchronize memory
let (active, th) = self.active2_mut(id);
th.unpark(active);
}
/// Insert a point of sequential consistency
/// TODO
/// - Deprecate SeqCst accesses and allow SeqCst fences only. The semantics of SeqCst accesses
/// is complex and difficult to implement correctly. On the other hand, SeqCst fence has
/// well-understood and clear semantics in the absence of SeqCst accesses, and can be used
/// for enforcing the read-after-write (RAW) ordering which is probably what the user want to
/// achieve with SeqCst.
/// - Revisit the other uses of this function. They probably don't require sequential
/// consistency. E.g. see https://en.cppreference.com/w/cpp/named_req/Mutex
///
/// References
/// - The "scfix" paper, which proposes a memory model called RC11 that fixes SeqCst
/// semantics. of C11. https://plv.mpi-sws.org/scfix/
/// - Some fixes from the "scfix" paper has been incorporated into C/C++20:
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html
/// - The "promising semantics" paper, which propose an intuitive semantics of SeqCst fence in
/// the absence of SC accesses. https://sf.snu.ac.kr/promise-concurrency/
pub(crate) fn seq_cst(&mut self) {
// The previous implementation of sequential consistency was incorrect (though it's correct
// for `fence(SeqCst)`-only scenario; use `seq_cst_fence` for `fence(SeqCst)`).
// As a quick fix, just disable it. This may fail to model correct code,
// but will not silently allow bugs.
}
pub(crate) fn seq_cst_fence(&mut self) {
self.threads[self.active.unwrap()]
.causality
.join(&self.seq_cst_causality);
self.seq_cst_causality
.join(&self.threads[self.active.unwrap()].causality);
}
pub(crate) fn clear(&mut self, execution_id: execution::Id) {
self.iteration_span = tracing::Span::current();
self.threads.clear();
self.threads
.push(Thread::new(Id::new(execution_id, 0), &self.iteration_span));
self.execution_id = execution_id;
self.active = Some(0);
self.seq_cst_causality = VersionVec::new();
}
pub(crate) fn iter(&self) -> impl ExactSizeIterator<Item = (Id, &Thread)> + '_ {
let execution_id = self.execution_id;
self.threads
.iter()
.enumerate()
.map(move |(id, thread)| (Id::new(execution_id, id), thread))
}
pub(crate) fn iter_mut(&mut self) -> impl ExactSizeIterator<Item = (Id, &mut Thread)> + '_ {
let execution_id = self.execution_id;
self.threads
.iter_mut()
.enumerate()
.map(move |(id, thread)| (Id::new(execution_id, id), thread))
}
/// Split the set of threads into the active thread and an iterator of all
/// other threads.
pub(crate) fn split_active(&mut self) -> (&mut Thread, impl Iterator<Item = &mut Thread>) {
let active = self.active.unwrap();
let (one, two) = self.threads.split_at_mut(active);
let (active, two) = two.split_at_mut(1);
let iter = one.iter_mut().chain(two.iter_mut());
(&mut active[0], iter)
}
pub(crate) fn local<T: 'static>(
&mut self,
key: &'static crate::thread::LocalKey<T>,
) -> Option<Result<&T, AccessError>> {
self.active_mut()
.locals
.get(&LocalKeyId::new(key))
.map(|local_value| local_value.get())
}
pub(crate) fn local_init<T: 'static>(
&mut self,
key: &'static crate::thread::LocalKey<T>,
value: T,
) {
assert!(self
.active_mut()
.locals
.insert(LocalKeyId::new(key), LocalValue::new(value))
.is_none())
}
}
impl ops::Index<Id> for Set {
type Output = Thread;
fn index(&self, index: Id) -> &Thread {
&self.threads[index.id]
}
}
impl ops::IndexMut<Id> for Set {
fn index_mut(&mut self, index: Id) -> &mut Thread {
&mut self.threads[index.id]
}
}
impl Id {
pub(crate) fn new(execution_id: execution::Id, id: usize) -> Id {
Id { execution_id, id }
}
pub(crate) fn as_usize(self) -> usize {
self.id
}
}
impl From<Id> for usize {
fn from(src: Id) -> usize {
src.id
}
}
impl fmt::Display for Id {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.id.fmt(fmt)
}
}
impl fmt::Debug for Id {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "Id({})", self.id)
}
}
impl LocalKeyId {
fn new<T>(key: &'static crate::thread::LocalKey<T>) -> Self {
Self(key as *const _ as usize)
}
}
impl LocalValue {
fn new<T: 'static>(value: T) -> Self {
Self(Some(Box::new(value)))
}
fn get<T: 'static>(&self) -> Result<&T, AccessError> {
self.0
.as_ref()
.ok_or(AccessError { _private: () })
.map(|val| {
val.downcast_ref::<T>()
.expect("local value must downcast to expected type")
})
}
}
/// An error returned by [`LocalKey::try_with`](struct.LocalKey.html#method.try_with).
pub struct AccessError {
_private: (),
}
impl fmt::Debug for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AccessError").finish()
}
}
impl fmt::Display for AccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt("already destroyed", f)
}
}

86
third-party/vendor/loom/src/rt/vv.rs vendored Normal file
View file

@ -0,0 +1,86 @@
use crate::rt::{execution, thread, MAX_THREADS};
#[cfg(feature = "checkpoint")]
use serde::{Deserialize, Serialize};
use std::cmp;
use std::ops;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
pub(crate) struct VersionVec {
versions: [u16; MAX_THREADS],
}
impl VersionVec {
pub(crate) fn new() -> VersionVec {
VersionVec {
versions: [0; MAX_THREADS],
}
}
pub(crate) fn versions(
&self,
execution_id: execution::Id,
) -> impl Iterator<Item = (thread::Id, u16)> + '_ {
self.versions
.iter()
.enumerate()
.map(move |(thread_id, &version)| (thread::Id::new(execution_id, thread_id), version))
}
pub(crate) fn inc(&mut self, id: thread::Id) {
self.versions[id.as_usize()] += 1;
}
pub(crate) fn join(&mut self, other: &VersionVec) {
for (i, &version) in other.versions.iter().enumerate() {
self.versions[i] = cmp::max(self.versions[i], version);
}
}
/// Returns the thread ID, if any, that is ahead of the current version.
pub(crate) fn ahead(&self, other: &VersionVec) -> Option<usize> {
for (i, &version) in other.versions.iter().enumerate() {
if self.versions[i] < version {
return Some(i);
}
}
None
}
}
impl cmp::PartialOrd for VersionVec {
fn partial_cmp(&self, other: &VersionVec) -> Option<cmp::Ordering> {
use cmp::Ordering::*;
let mut ret = Equal;
for i in 0..MAX_THREADS {
let a = self.versions[i];
let b = other.versions[i];
match a.cmp(&b) {
Equal => {}
Less if ret == Greater => return None,
Greater if ret == Less => return None,
ordering => ret = ordering,
}
}
Some(ret)
}
}
impl ops::Index<thread::Id> for VersionVec {
type Output = u16;
fn index(&self, index: thread::Id) -> &u16 {
self.versions.index(index.as_usize())
}
}
impl ops::IndexMut<thread::Id> for VersionVec {
fn index_mut(&mut self, index: thread::Id) -> &mut u16 {
self.versions.index_mut(index.as_usize())
}
}

269
third-party/vendor/loom/src/sync/arc.rs vendored Normal file
View file

@ -0,0 +1,269 @@
use crate::rt;
use std::pin::Pin;
use std::{mem, ops, ptr};
/// Mock implementation of `std::sync::Arc`.
#[derive(Debug)]
pub struct Arc<T: ?Sized> {
obj: std::sync::Arc<rt::Arc>,
value: std::sync::Arc<T>,
}
impl<T> Arc<T> {
/// Constructs a new `Arc<T>`.
#[track_caller]
pub fn new(value: T) -> Arc<T> {
let std = std::sync::Arc::new(value);
Arc::from_std(std)
}
/// Constructs a new `Pin<Arc<T>>`.
pub fn pin(data: T) -> Pin<Arc<T>> {
unsafe { Pin::new_unchecked(Arc::new(data)) }
}
/// Returns the inner value, if the `Arc` has exactly one strong reference.
#[track_caller]
pub fn try_unwrap(this: Arc<T>) -> Result<T, Arc<T>> {
if !this.obj.get_mut(location!()) {
return Err(this);
}
assert_eq!(1, std::sync::Arc::strong_count(&this.value));
// work around our inability to destruct the object normally,
// because of the `Drop` presense.
this.obj.ref_dec(location!());
this.unregister();
// Use the same pattern of unwrapping as `std` does.
// We can't normally move the field out of the object
// because it implements `drop`.
let arc_value = unsafe {
let _arc_obj = ptr::read(&this.obj);
let arc_value = ptr::read(&this.value);
mem::forget(this);
arc_value
};
match std::sync::Arc::try_unwrap(arc_value) {
Ok(value) => Ok(value),
Err(_) => unreachable!(),
}
}
}
impl<T: ?Sized> Arc<T> {
/// Converts `std::sync::Arc` to `loom::sync::Arc`.
///
/// This is needed to create a `loom::sync::Arc<T>` where `T: !Sized`.
///
/// ## Panics
///
/// If the provided `Arc` has copies (i.e., if it is not unique).
///
/// ## Examples
///
/// While `std::sync::Arc` with `T: !Sized` can be created by coercing an
/// `std::sync::Arc` with a sized value:
///
/// ```rust
/// let sized: std::sync::Arc<[u8; 3]> = std::sync::Arc::new([1, 2, 3]);
/// let _unsized: std::sync::Arc<[u8]> = sized; // coercion
/// ```
///
/// `loom::sync::Arc` can't be created in the same way:
///
/// ```compile_fail,E0308
/// use loom::sync::Arc;
///
/// let sized: Arc<[u8; 3]> = Arc::new([1, 2, 3]);
/// let _unsized: Arc<[u8]> = sized; // error: mismatched types
/// ```
///
/// This is because `std::sync::Arc` uses an unstable trait called `CoerceUnsized`
/// that loom can't use. To create `loom::sync::Arc` with an unsized inner value
/// first create a `std::sync::Arc` of an appropriate type and then use this method:
///
/// ```rust
/// use loom::sync::Arc;
///
/// # loom::model::model(|| {
/// let std: std::sync::Arc<[u8]> = std::sync::Arc::new([1, 2, 3]);
/// let loom: Arc<[u8]> = Arc::from_std(std);
///
/// let std: std::sync::Arc<dyn Send + Sync> = std::sync::Arc::new([1, 2, 3]);
/// let loom: Arc<dyn Send + Sync> = Arc::from_std(std);
/// # });
/// ```
#[track_caller]
pub fn from_std(mut std: std::sync::Arc<T>) -> Self {
assert!(
std::sync::Arc::get_mut(&mut std).is_some(),
"Arc provided to `from_std` is not unique"
);
let obj = std::sync::Arc::new(rt::Arc::new(location!()));
let objc = std::sync::Arc::clone(&obj);
rt::execution(|e| {
e.arc_objs
.insert(std::sync::Arc::as_ptr(&std) as *const (), objc);
});
Arc { obj, value: std }
}
/// Gets the number of strong (`Arc`) pointers to this value.
#[track_caller]
pub fn strong_count(this: &Self) -> usize {
this.obj.strong_count()
}
/// Increments the strong reference count on the `Arc<T>` associated with the
/// provided pointer by one.
///
/// # Safety
///
/// The pointer must have been obtained through `Arc::into_raw`, and the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
/// least 1) for the duration of this method.
#[track_caller]
pub unsafe fn increment_strong_count(ptr: *const T) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr));
// Now increase refcount, but don't drop new refcount either
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}
/// Decrements the strong reference count on the `Arc<T>` associated with the
/// provided pointer by one.
///
/// # Safety
///
/// The pointer must have been obtained through `Arc::into_raw`, and the
/// associated `Arc` instance must be valid (i.e. the strong count must be at
/// least 1) when invoking this method. This method can be used to release the final
/// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
/// released.
#[track_caller]
pub unsafe fn decrement_strong_count(ptr: *const T) {
mem::drop(Arc::from_raw(ptr));
}
/// Returns a mutable reference to the inner value, if there are
/// no other `Arc` pointers to the same value.
#[track_caller]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if this.obj.get_mut(location!()) {
assert_eq!(1, std::sync::Arc::strong_count(&this.value));
Some(std::sync::Arc::get_mut(&mut this.value).unwrap())
} else {
None
}
}
/// Returns `true` if the two `Arc`s point to the same value (not
/// just values that compare as equal).
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
std::sync::Arc::ptr_eq(&this.value, &other.value)
}
/// Consumes the `Arc`, returning the wrapped pointer.
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
mem::forget(this);
ptr
}
/// Provides a raw pointer to the data.
pub fn as_ptr(this: &Self) -> *const T {
std::sync::Arc::as_ptr(&this.value)
}
/// Constructs an `Arc` from a raw pointer.
///
/// # Safety
///
/// The raw pointer must have been previously returned by a call to
/// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
/// alignment as `T`. This is trivially true if `U` is `T`.
/// Note that if `U` is not `T` but has the same size and alignment, this is
/// basically like transmuting references of different types. See
/// [`mem::transmute`][transmute] for more information on what
/// restrictions apply in this case.
///
/// The user of `from_raw` has to make sure a specific value of `T` is only
/// dropped once.
///
/// This function is unsafe because improper use may lead to memory unsafety,
/// even if the returned `Arc<T>` is never accessed.
///
/// [into_raw]: Arc::into_raw
/// [transmute]: core::mem::transmute
#[track_caller]
pub unsafe fn from_raw(ptr: *const T) -> Self {
let inner = std::sync::Arc::from_raw(ptr);
let obj = rt::execution(|e| std::sync::Arc::clone(&e.arc_objs[&ptr.cast()]));
Arc { value: inner, obj }
}
/// Unregister this object before it's gone.
fn unregister(&self) {
rt::execution(|e| {
e.arc_objs
.remove(&std::sync::Arc::as_ptr(&self.value).cast())
.expect("Arc object was removed before dropping last Arc");
});
}
}
impl<T: ?Sized> ops::Deref for Arc<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T: ?Sized> Clone for Arc<T> {
#[track_caller]
fn clone(&self) -> Arc<T> {
self.obj.ref_inc(location!());
Arc {
value: self.value.clone(),
obj: self.obj.clone(),
}
}
}
impl<T: ?Sized> Drop for Arc<T> {
#[track_caller]
fn drop(&mut self) {
if self.obj.ref_dec(location!()) {
assert_eq!(
1,
std::sync::Arc::strong_count(&self.value),
"something odd is going on"
);
self.unregister();
}
}
}
impl<T: Default> Default for Arc<T> {
#[track_caller]
fn default() -> Arc<T> {
Arc::new(Default::default())
}
}
impl<T> From<T> for Arc<T> {
#[track_caller]
fn from(t: T) -> Self {
Arc::new(t)
}
}

View file

@ -0,0 +1,117 @@
use crate::rt;
use std::sync::atomic::Ordering;
#[derive(Debug)]
pub(crate) struct Atomic<T> {
/// Atomic object
state: rt::Atomic<T>,
}
impl<T> Atomic<T>
where
T: rt::Numeric,
{
pub(crate) fn new(value: T, location: rt::Location) -> Atomic<T> {
let state = rt::Atomic::new(value, location);
Atomic { state }
}
#[track_caller]
pub(crate) unsafe fn unsync_load(&self) -> T {
self.state.unsync_load(location!())
}
#[track_caller]
pub(crate) fn load(&self, order: Ordering) -> T {
self.state.load(location!(), order)
}
#[track_caller]
pub(crate) fn store(&self, value: T, order: Ordering) {
self.state.store(location!(), value, order)
}
#[track_caller]
pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut T) -> R) -> R {
self.state.with_mut(location!(), f)
}
/// Read-modify-write
///
/// Always reads the most recent write
#[track_caller]
pub(crate) fn rmw<F>(&self, f: F, order: Ordering) -> T
where
F: FnOnce(T) -> T,
{
self.try_rmw::<_, ()>(order, order, |v| Ok(f(v))).unwrap()
}
#[track_caller]
fn try_rmw<F, E>(&self, success: Ordering, failure: Ordering, f: F) -> Result<T, E>
where
F: FnOnce(T) -> Result<T, E>,
{
self.state.rmw(location!(), success, failure, f)
}
#[track_caller]
pub(crate) fn swap(&self, val: T, order: Ordering) -> T {
self.rmw(|_| val, order)
}
#[track_caller]
pub(crate) fn compare_and_swap(&self, current: T, new: T, order: Ordering) -> T {
use self::Ordering::*;
let failure = match order {
Relaxed | Release => Relaxed,
Acquire | AcqRel => Acquire,
_ => SeqCst,
};
match self.compare_exchange(current, new, order, failure) {
Ok(v) => v,
Err(v) => v,
}
}
#[track_caller]
pub(crate) fn compare_exchange(
&self,
current: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
self.try_rmw(success, failure, |actual| {
if actual == current {
Ok(new)
} else {
Err(actual)
}
})
}
#[track_caller]
pub(crate) fn fetch_update<F>(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F,
) -> Result<T, T>
where
F: FnMut(T) -> Option<T>,
{
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange(prev, next, set_order, fetch_order) {
Ok(x) => return Ok(x),
Err(next_prev) => prev = next_prev,
}
}
Err(prev)
}
}

View file

@ -0,0 +1,131 @@
use super::Atomic;
use std::sync::atomic::Ordering;
/// Mock implementation of `std::sync::atomic::AtomicBool`.
///
/// NOTE: Unlike `std::sync::atomic::AtomicBool`, this type has a different
/// in-memory representation than `bool`.
#[derive(Debug)]
pub struct AtomicBool(Atomic<bool>);
impl AtomicBool {
/// Creates a new instance of `AtomicBool`.
#[track_caller]
pub fn new(v: bool) -> AtomicBool {
AtomicBool(Atomic::new(v, location!()))
}
/// Load the value without any synchronization.
///
/// # Safety
///
/// An unsynchronized atomic load technically always has undefined behavior.
/// However, if the atomic value is not currently visible by other threads,
/// this *should* always be equivalent to a non-atomic load of an un-shared
/// `bool` value.
#[track_caller]
pub unsafe fn unsync_load(&self) -> bool {
self.0.unsync_load()
}
/// Loads a value from the atomic bool.
#[track_caller]
pub fn load(&self, order: Ordering) -> bool {
self.0.load(order)
}
/// Stores a value into the atomic bool.
#[track_caller]
pub fn store(&self, val: bool, order: Ordering) {
self.0.store(val, order)
}
/// Stores a value into the atomic bool, returning the previous value.
#[track_caller]
pub fn swap(&self, val: bool, order: Ordering) -> bool {
self.0.swap(val, order)
}
/// Stores a value into the atomic bool if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
self.0.compare_and_swap(current, new, order)
}
/// Stores a value into the atomic if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_exchange(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
self.0.compare_exchange(current, new, success, failure)
}
/// Stores a value into the atomic if the current value is the same as the current value.
#[track_caller]
pub fn compare_exchange_weak(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
self.compare_exchange(current, new, success, failure)
}
/// Logical "and" with the current value.
#[track_caller]
pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
self.0.rmw(|v| v & val, order)
}
/// Logical "nand" with the current value.
#[track_caller]
pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
self.0.rmw(|v| !(v & val), order)
}
/// Logical "or" with the current value.
#[track_caller]
pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
self.0.rmw(|v| v | val, order)
}
/// Logical "xor" with the current value.
#[track_caller]
pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
self.0.rmw(|v| v ^ val, order)
}
/// Fetches the value, and applies a function to it that returns an optional new value. Returns
/// a [`Result`] of [`Ok`]`(previous_value)` if the function returned [`Some`]`(_)`, else
/// [`Err`]`(previous_value)`.
#[track_caller]
pub fn fetch_update<F>(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: F,
) -> Result<bool, bool>
where
F: FnMut(bool) -> Option<bool>,
{
self.0.fetch_update(set_order, fetch_order, f)
}
}
impl Default for AtomicBool {
fn default() -> AtomicBool {
AtomicBool::new(Default::default())
}
}
impl From<bool> for AtomicBool {
fn from(b: bool) -> Self {
Self::new(b)
}
}

View file

@ -0,0 +1,201 @@
use super::Atomic;
use std::sync::atomic::Ordering;
// TODO: use `#[doc = concat!()]` directly once `extended_key_value_attributes` stable.
macro_rules! doc_comment {
($doc:expr, $($tt:tt)*) => {
#[doc = $doc]
$($tt)*
};
}
#[rustfmt::skip] // rustfmt cannot properly format multi-line concat!.
macro_rules! atomic_int {
($name: ident, $atomic_type: ty) => {
doc_comment! {
concat!(
" Mock implementation of `std::sync::atomic::", stringify!($name), "`.\n\n\
NOTE: Unlike `std::sync::atomic::", stringify!($name), "`, \
this type has a different in-memory representation than `",
stringify!($atomic_type), "`.",
),
#[derive(Debug)]
pub struct $name(Atomic<$atomic_type>);
}
impl $name {
doc_comment! {
concat!(" Creates a new instance of `", stringify!($name), "`."),
#[track_caller]
pub fn new(v: $atomic_type) -> Self {
Self(Atomic::new(v, location!()))
}
}
/// Get access to a mutable reference to the inner value.
#[track_caller]
pub fn with_mut<R>(&mut self, f: impl FnOnce(&mut $atomic_type) -> R) -> R {
self.0.with_mut(f)
}
/// Load the value without any synchronization.
///
/// # Safety
///
/// An unsynchronized atomic load technically always has undefined behavior.
/// However, if the atomic value is not currently visible by other threads,
/// this *should* always be equivalent to a non-atomic load of an un-shared
/// integer value.
#[track_caller]
pub unsafe fn unsync_load(&self) -> $atomic_type {
self.0.unsync_load()
}
/// Loads a value from the atomic integer.
#[track_caller]
pub fn load(&self, order: Ordering) -> $atomic_type {
self.0.load(order)
}
/// Stores a value into the atomic integer.
#[track_caller]
pub fn store(&self, val: $atomic_type, order: Ordering) {
self.0.store(val, order)
}
/// Stores a value into the atomic integer, returning the previous value.
#[track_caller]
pub fn swap(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.swap(val, order)
}
/// Stores a value into the atomic integer if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_and_swap(
&self,
current: $atomic_type,
new: $atomic_type,
order: Ordering,
) -> $atomic_type {
self.0.compare_and_swap(current, new, order)
}
/// Stores a value into the atomic if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_exchange(
&self,
current: $atomic_type,
new: $atomic_type,
success: Ordering,
failure: Ordering,
) -> Result<$atomic_type, $atomic_type> {
self.0.compare_exchange(current, new, success, failure)
}
/// Stores a value into the atomic if the current value is the same as the current value.
#[track_caller]
pub fn compare_exchange_weak(
&self,
current: $atomic_type,
new: $atomic_type,
success: Ordering,
failure: Ordering,
) -> Result<$atomic_type, $atomic_type> {
self.compare_exchange(current, new, success, failure)
}
/// Adds to the current value, returning the previous value.
#[track_caller]
pub fn fetch_add(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v.wrapping_add(val), order)
}
/// Subtracts from the current value, returning the previous value.
#[track_caller]
pub fn fetch_sub(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v.wrapping_sub(val), order)
}
/// Bitwise "and" with the current value.
#[track_caller]
pub fn fetch_and(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v & val, order)
}
/// Bitwise "nand" with the current value.
#[track_caller]
pub fn fetch_nand(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| !(v & val), order)
}
/// Bitwise "or" with the current value.
#[track_caller]
pub fn fetch_or(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v | val, order)
}
/// Bitwise "xor" with the current value.
#[track_caller]
pub fn fetch_xor(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v ^ val, order)
}
/// Stores the maximum of the current and provided value, returning the previous value
#[track_caller]
pub fn fetch_max(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v.max(val), order)
}
/// Stores the minimum of the current and provided value, returning the previous value
#[track_caller]
pub fn fetch_min(&self, val: $atomic_type, order: Ordering) -> $atomic_type {
self.0.rmw(|v| v.min(val), order)
}
/// Fetches the value, and applies a function to it that returns an optional new value.
/// Returns a [`Result`] of [`Ok`]`(previous_value)` if the function returned
/// [`Some`]`(_)`, else [`Err`]`(previous_value)`.
#[track_caller]
pub fn fetch_update<F>(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: F,
) -> Result<$atomic_type, $atomic_type>
where
F: FnMut($atomic_type) -> Option<$atomic_type>,
{
self.0.fetch_update(set_order, fetch_order, f)
}
}
impl Default for $name {
fn default() -> Self {
Self::new(Default::default())
}
}
impl From<$atomic_type> for $name {
fn from(v: $atomic_type) -> Self {
Self::new(v)
}
}
};
}
atomic_int!(AtomicU8, u8);
atomic_int!(AtomicU16, u16);
atomic_int!(AtomicU32, u32);
atomic_int!(AtomicUsize, usize);
atomic_int!(AtomicI8, i8);
atomic_int!(AtomicI16, i16);
atomic_int!(AtomicI32, i32);
atomic_int!(AtomicIsize, isize);
#[cfg(target_pointer_width = "64")]
atomic_int!(AtomicU64, u64);
#[cfg(target_pointer_width = "64")]
atomic_int!(AtomicI64, i64);

View file

@ -0,0 +1,27 @@
//! Mock implementation of `std::sync::atomic`.
#[allow(clippy::module_inception)]
mod atomic;
use self::atomic::Atomic;
mod bool;
pub use self::bool::AtomicBool;
mod int;
pub use self::int::{AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize};
pub use self::int::{AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize};
mod ptr;
pub use self::ptr::AtomicPtr;
pub use std::sync::atomic::Ordering;
/// Signals the processor that it is entering a busy-wait spin-loop.
pub fn spin_loop_hint() {
crate::thread::yield_now();
}
/// An atomic fence.
pub fn fence(order: Ordering) {
crate::rt::fence(order);
}

View file

@ -0,0 +1,118 @@
use super::Atomic;
use std::sync::atomic::Ordering;
/// Mock implementation of `std::sync::atomic::AtomicPtr`.
///
/// NOTE: Unlike `std::sync::atomic::AtomicPtr`, this type has a different
/// in-memory representation than `*mut T`.
pub struct AtomicPtr<T>(Atomic<*mut T>);
impl<T> std::fmt::Debug for AtomicPtr<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl<T> AtomicPtr<T> {
/// Creates a new instance of `AtomicPtr`.
#[track_caller]
pub fn new(v: *mut T) -> AtomicPtr<T> {
AtomicPtr(Atomic::new(v, location!()))
}
/// Load the value without any synchronization.
///
/// # Safety
///
/// An unsynchronized atomic load technically always has undefined behavior.
/// However, if the atomic value is not currently visible by other threads,
/// this *should* always be equivalent to a non-atomic load of an un-shared
/// `*mut T` value.
pub unsafe fn unsync_load(&self) -> *mut T {
self.0.unsync_load()
}
/// Get access to a mutable reference to the inner value.
#[track_caller]
pub fn with_mut<R>(&mut self, f: impl FnOnce(&mut *mut T) -> R) -> R {
self.0.with_mut(f)
}
/// Loads a value from the pointer.
#[track_caller]
pub fn load(&self, order: Ordering) -> *mut T {
self.0.load(order)
}
/// Stores a value into the pointer.
#[track_caller]
pub fn store(&self, val: *mut T, order: Ordering) {
self.0.store(val, order)
}
/// Stores a value into the pointer, returning the previous value.
#[track_caller]
pub fn swap(&self, val: *mut T, order: Ordering) -> *mut T {
self.0.swap(val, order)
}
/// Stores a value into the pointer if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
self.0.compare_and_swap(current, new, order)
}
/// Stores a value into the pointer if the current value is the same as the `current` value.
#[track_caller]
pub fn compare_exchange(
&self,
current: *mut T,
new: *mut T,
success: Ordering,
failure: Ordering,
) -> Result<*mut T, *mut T> {
self.0.compare_exchange(current, new, success, failure)
}
/// Stores a value into the atomic if the current value is the same as the current value.
#[track_caller]
pub fn compare_exchange_weak(
&self,
current: *mut T,
new: *mut T,
success: Ordering,
failure: Ordering,
) -> Result<*mut T, *mut T> {
self.compare_exchange(current, new, success, failure)
}
/// Fetches the value, and applies a function to it that returns an optional new value. Returns
/// a [`Result`] of [`Ok`]`(previous_value)` if the function returned [`Some`]`(_)`, else
/// [`Err`]`(previous_value)`.
#[track_caller]
pub fn fetch_update<F>(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: F,
) -> Result<*mut T, *mut T>
where
F: FnMut(*mut T) -> Option<*mut T>,
{
self.0.fetch_update(set_order, fetch_order, f)
}
}
impl<T> Default for AtomicPtr<T> {
fn default() -> AtomicPtr<T> {
use std::ptr;
AtomicPtr::new(ptr::null_mut())
}
}
impl<T> From<*mut T> for AtomicPtr<T> {
fn from(p: *mut T) -> Self {
Self::new(p)
}
}

View file

@ -0,0 +1,19 @@
//! A stub for `std::sync::Barrier`.
#[derive(Debug)]
/// `std::sync::Barrier` is not supported yet in Loom. This stub is provided just
/// to make the code to compile.
pub struct Barrier {}
impl Barrier {
/// `std::sync::Barrier` is not supported yet in Loom. This stub is provided just
/// to make the code to compile.
pub fn new(_n: usize) -> Self {
unimplemented!("std::sync::Barrier is not supported yet in Loom.")
}
/// `std::sync::Barrier` is not supported yet in Loom. This stub is provided just
/// to make the code to compile.
pub fn wait(&self) -> std::sync::BarrierWaitResult {
unimplemented!("std::sync::Barrier is not supported yet in Loom.")
}
}

View file

@ -0,0 +1,79 @@
use super::{LockResult, MutexGuard};
use crate::rt;
use std::sync::PoisonError;
use std::time::Duration;
/// Mock implementation of `std::sync::Condvar`.
#[derive(Debug)]
pub struct Condvar {
object: rt::Condvar,
}
/// A type indicating whether a timed wait on a condition variable returned due
/// to a time out or not.
#[derive(Debug)]
pub struct WaitTimeoutResult(bool);
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and notified.
pub fn new() -> Condvar {
Condvar {
object: rt::Condvar::new(),
}
}
/// Blocks the current thread until this condition variable receives a notification.
#[track_caller]
pub fn wait<'a, T>(&self, mut guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
// Release the RefCell borrow guard allowing another thread to lock the
// data
guard.unborrow();
// Wait until notified
self.object.wait(guard.rt(), location!());
// Borrow the mutex guarded data again
guard.reborrow();
Ok(guard)
}
/// Waits on this condition variable for a notification, timing out after a
/// specified duration.
pub fn wait_timeout<'a, T>(
&self,
guard: MutexGuard<'a, T>,
_dur: Duration,
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
// TODO: implement timing out
self.wait(guard)
.map(|guard| (guard, WaitTimeoutResult(false)))
.map_err(|err| PoisonError::new((err.into_inner(), WaitTimeoutResult(false))))
}
/// Wakes up one blocked thread on this condvar.
#[track_caller]
pub fn notify_one(&self) {
self.object.notify_one(location!());
}
/// Wakes up all blocked threads on this condvar.
#[track_caller]
pub fn notify_all(&self) {
self.object.notify_all(location!());
}
}
impl WaitTimeoutResult {
/// Returns `true` if the wait was known to have timed out.
pub fn timed_out(&self) -> bool {
self.0
}
}
impl Default for Condvar {
fn default() -> Self {
Self::new()
}
}

19
third-party/vendor/loom/src/sync/mod.rs vendored Normal file
View file

@ -0,0 +1,19 @@
//! Mock implementation of `std::sync`.
mod arc;
pub mod atomic;
mod barrier;
mod condvar;
pub mod mpsc;
mod mutex;
mod notify;
mod rwlock;
pub use self::arc::Arc;
pub use self::barrier::Barrier;
pub use self::condvar::{Condvar, WaitTimeoutResult};
pub use self::mutex::{Mutex, MutexGuard};
pub use self::notify::Notify;
pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use std::sync::{LockResult, TryLockResult};

View file

@ -0,0 +1,89 @@
//! A stub for `std::sync::mpsc`.
use crate::rt;
/// Mock implementation of `std::sync::mpsc::channel`.
#[track_caller]
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
let location = location!();
let (sender_channel, receiver_channel) = std::sync::mpsc::channel();
let channel = std::sync::Arc::new(rt::Channel::new(location));
let sender = Sender {
object: std::sync::Arc::clone(&channel),
sender: sender_channel,
};
let receiver = Receiver {
object: std::sync::Arc::clone(&channel),
receiver: receiver_channel,
};
(sender, receiver)
}
#[derive(Debug)]
/// Mock implementation of `std::sync::mpsc::Sender`.
pub struct Sender<T> {
object: std::sync::Arc<rt::Channel>,
sender: std::sync::mpsc::Sender<T>,
}
impl<T> Sender<T> {
/// Attempts to send a value on this channel, returning it back if it could
/// not be sent.
#[track_caller]
pub fn send(&self, msg: T) -> Result<(), std::sync::mpsc::SendError<T>> {
self.object.send(location!());
self.sender.send(msg)
}
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Sender<T> {
Sender {
object: std::sync::Arc::clone(&self.object),
sender: self.sender.clone(),
}
}
}
#[derive(Debug)]
/// Mock implementation of `std::sync::mpsc::Receiver`.
pub struct Receiver<T> {
object: std::sync::Arc<rt::Channel>,
receiver: std::sync::mpsc::Receiver<T>,
}
impl<T> Receiver<T> {
/// Attempts to wait for a value on this receiver, returning an error if the
/// corresponding channel has hung up.
#[track_caller]
pub fn recv(&self) -> Result<T, std::sync::mpsc::RecvError> {
self.object.recv(location!());
self.receiver.recv()
}
/// Attempts to wait for a value on this receiver, returning an error if the
/// corresponding channel has hung up, or if it waits more than `timeout`.
pub fn recv_timeout(
&self,
_timeout: std::time::Duration,
) -> Result<T, std::sync::mpsc::RecvTimeoutError> {
unimplemented!("std::sync::mpsc::Receiver::recv_timeout is not supported yet in Loom.")
}
/// Attempts to return a pending value on this receiver without blocking.
pub fn try_recv(&self) -> Result<T, std::sync::mpsc::TryRecvError> {
if self.object.is_empty() {
return Err(std::sync::mpsc::TryRecvError::Empty);
} else {
self.recv().map_err(|e| e.into())
}
}
}
impl<T> Drop for Receiver<T> {
fn drop(&mut self) {
// Drain the channel.
while !self.object.is_empty() {
self.recv().unwrap();
}
}
}

View file

@ -0,0 +1,115 @@
use crate::rt;
use std::ops;
use std::sync::{LockResult, TryLockError, TryLockResult};
/// Mock implementation of `std::sync::Mutex`.
#[derive(Debug)]
pub struct Mutex<T> {
object: rt::Mutex,
data: std::sync::Mutex<T>,
}
/// Mock implementation of `std::sync::MutexGuard`.
#[derive(Debug)]
pub struct MutexGuard<'a, T> {
lock: &'a Mutex<T>,
data: Option<std::sync::MutexGuard<'a, T>>,
}
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
pub fn new(data: T) -> Mutex<T> {
Mutex {
data: std::sync::Mutex::new(data),
object: rt::Mutex::new(true),
}
}
}
impl<T> Mutex<T> {
/// Acquires a mutex, blocking the current thread until it is able to do so.
#[track_caller]
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
self.object.acquire_lock(location!());
Ok(MutexGuard {
lock: self,
data: Some(self.data.lock().unwrap()),
})
}
/// Attempts to acquire this lock.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned. The lock will be unlocked when the
/// guard is dropped.
///
/// This function does not block.
#[track_caller]
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
if self.object.try_acquire_lock(location!()) {
Ok(MutexGuard {
lock: self,
data: Some(self.data.lock().unwrap()),
})
} else {
Err(TryLockError::WouldBlock)
}
}
/// Consumes this mutex, returning the underlying data.
pub fn into_inner(self) -> LockResult<T> {
Ok(self.data.into_inner().unwrap())
}
}
impl<T: ?Sized + Default> Default for Mutex<T> {
/// Creates a `Mutex<T>`, with the `Default` value for T.
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T> From<T> for Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
/// This is equivalent to [`Mutex::new`].
fn from(t: T) -> Self {
Self::new(t)
}
}
impl<'a, T: 'a> MutexGuard<'a, T> {
pub(super) fn unborrow(&mut self) {
self.data = None;
}
pub(super) fn reborrow(&mut self) {
self.data = Some(self.lock.data.lock().unwrap());
}
pub(super) fn rt(&self) -> &rt::Mutex {
&self.lock.object
}
}
impl<'a, T> ops::Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data.as_ref().unwrap().deref()
}
}
impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data.as_mut().unwrap().deref_mut()
}
}
impl<'a, T: 'a> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
self.data = None;
self.lock.object.release_lock();
}
}

View file

@ -0,0 +1,51 @@
use crate::rt;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::SeqCst;
/// Implements the park / unpark pattern directly using Loom's internal
/// primitives.
///
/// Notification establishes an acquire / release synchronization point.
///
/// Using this type is useful to mock out constructs when using loom tests.
#[derive(Debug)]
pub struct Notify {
object: rt::Notify,
/// Enforces the single waiter invariant
waiting: AtomicBool,
}
impl Notify {
/// Create a new `Notify`.
pub fn new() -> Notify {
Notify {
object: rt::Notify::new(false, true),
waiting: AtomicBool::new(false),
}
}
/// Notify the waiter
#[track_caller]
pub fn notify(&self) {
self.object.notify(location!());
}
/// Wait for a notification
#[track_caller]
pub fn wait(&self) {
self.waiting
.compare_exchange(false, true, SeqCst, SeqCst)
.expect("only a single thread may wait on `Notify`");
self.object.wait(location!());
self.waiting.store(false, SeqCst);
}
}
impl Default for Notify {
fn default() -> Self {
Self::new()
}
}

View file

@ -0,0 +1,164 @@
use crate::rt;
use std::ops;
use std::sync::{LockResult, TryLockError, TryLockResult};
/// Mock implementation of `std::sync::RwLock`
#[derive(Debug)]
pub struct RwLock<T> {
object: rt::RwLock,
data: std::sync::RwLock<T>,
}
/// Mock implementation of `std::sync::RwLockReadGuard`
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T> {
lock: &'a RwLock<T>,
data: Option<std::sync::RwLockReadGuard<'a, T>>,
}
/// Mock implementation of `std::sync::rwLockWriteGuard`
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T> {
lock: &'a RwLock<T>,
/// `data` is an Option so that the Drop impl can drop the std guard and release the std lock
/// before releasing the loom mock lock, as that might cause another thread to acquire the lock
data: Option<std::sync::RwLockWriteGuard<'a, T>>,
}
impl<T> RwLock<T> {
/// Creates a new rwlock in an unlocked state ready for use.
pub fn new(data: T) -> RwLock<T> {
RwLock {
data: std::sync::RwLock::new(data),
object: rt::RwLock::new(),
}
}
/// Locks this rwlock with shared read access, blocking the current
/// thread until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers
/// which hold the lock. There may be other readers currently inside the
/// lock when this method returns. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
#[track_caller]
pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
self.object.acquire_read_lock(location!());
Ok(RwLockReadGuard {
lock: self,
data: Some(self.data.try_read().expect("loom::RwLock state corrupt")),
})
}
/// Attempts to acquire this rwlock with shared read access.
///
/// If the access could not be granted at this time, then Err is returned.
/// Otherwise, an RAII guard is returned which will release the shared
/// access when it is dropped.
///
/// This function does not block.
#[track_caller]
pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
if self.object.try_acquire_read_lock(location!()) {
Ok(RwLockReadGuard {
lock: self,
data: Some(self.data.try_read().expect("loom::RwLock state corrupt")),
})
} else {
Err(TryLockError::WouldBlock)
}
}
/// Locks this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
#[track_caller]
pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
self.object.acquire_write_lock(location!());
Ok(RwLockWriteGuard {
lock: self,
data: Some(self.data.try_write().expect("loom::RwLock state corrupt")),
})
}
/// Attempts to lock this rwlock with exclusive write access.
///
/// If the lock could not be acquired at this time, then Err is returned.
/// Otherwise, an RAII guard is returned which will release the lock when
/// it is dropped.
///
/// This function does not block.
#[track_caller]
pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
if self.object.try_acquire_write_lock(location!()) {
Ok(RwLockWriteGuard {
lock: self,
data: Some(self.data.try_write().expect("loom::RwLock state corrupt")),
})
} else {
Err(TryLockError::WouldBlock)
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> LockResult<T> {
Ok(self.data.into_inner().expect("loom::RwLock state corrupt"))
}
}
impl<T: Default> Default for RwLock<T> {
/// Creates a `RwLock<T>`, with the `Default` value for T.
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T> From<T> for RwLock<T> {
/// Creates a new rwlock in an unlocked state ready for use.
/// This is equivalent to [`RwLock::new`].
fn from(t: T) -> Self {
Self::new(t)
}
}
impl<'a, T> ops::Deref for RwLockReadGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data.as_ref().unwrap().deref()
}
}
impl<'a, T: 'a> Drop for RwLockReadGuard<'a, T> {
fn drop(&mut self) {
self.data = None;
self.lock.object.release_read_lock()
}
}
impl<'a, T> ops::Deref for RwLockWriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data.as_ref().unwrap().deref()
}
}
impl<'a, T> ops::DerefMut for RwLockWriteGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data.as_mut().unwrap().deref_mut()
}
}
impl<'a, T: 'a> Drop for RwLockWriteGuard<'a, T> {
fn drop(&mut self) {
self.data = None;
self.lock.object.release_write_lock()
}
}

302
third-party/vendor/loom/src/thread.rs vendored Normal file
View file

@ -0,0 +1,302 @@
//! Mock implementation of `std::thread`.
pub use crate::rt::thread::AccessError;
pub use crate::rt::yield_now;
use crate::rt::{self, Execution, Location};
pub use std::thread::panicking;
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
use std::{fmt, io};
use tracing::trace;
/// Mock implementation of `std::thread::JoinHandle`.
pub struct JoinHandle<T> {
result: Arc<Mutex<Option<std::thread::Result<T>>>>,
notify: rt::Notify,
thread: Thread,
}
/// Mock implementation of `std::thread::Thread`.
#[derive(Clone, Debug)]
pub struct Thread {
id: ThreadId,
name: Option<String>,
}
impl Thread {
/// Returns a unique identifier for this thread
pub fn id(&self) -> ThreadId {
self.id
}
/// Returns the (optional) name of this thread
pub fn name(&self) -> Option<&str> {
self.name.as_deref()
}
/// Mock implementation of [`std::thread::Thread::unpark`].
///
/// Atomically makes the handle's token available if it is not already.
///
/// Every thread is equipped with some basic low-level blocking support, via
/// the [`park`][park] function and the `unpark()` method. These can be
/// used as a more CPU-efficient implementation of a spinlock.
///
/// See the [park documentation][park] for more details.
pub fn unpark(&self) {
rt::execution(|execution| execution.threads.unpark(self.id.id));
}
}
/// Mock implementation of `std::thread::ThreadId`.
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
pub struct ThreadId {
id: crate::rt::thread::Id,
}
impl std::fmt::Debug for ThreadId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ThreadId({})", self.id.public_id())
}
}
/// Mock implementation of `std::thread::LocalKey`.
pub struct LocalKey<T> {
// Sadly, these fields have to be public, since function pointers in const
// fns are unstable. When fn pointer arguments to const fns stabilize, these
// should be made private and replaced with a `const fn new`.
//
// User code should not rely on the existence of these fields.
#[doc(hidden)]
pub init: fn() -> T,
#[doc(hidden)]
pub _p: PhantomData<fn(T)>,
}
/// Thread factory, which can be used in order to configure the properties of
/// a new thread.
#[derive(Debug)]
pub struct Builder {
name: Option<String>,
}
static CURRENT_THREAD_KEY: LocalKey<Thread> = LocalKey {
init: || unreachable!(),
_p: PhantomData,
};
fn init_current(execution: &mut Execution, name: Option<String>) -> Thread {
let id = execution.threads.active_id();
let thread = Thread {
id: ThreadId { id },
name,
};
execution
.threads
.local_init(&CURRENT_THREAD_KEY, thread.clone());
thread
}
/// Returns a handle to the current thread.
pub fn current() -> Thread {
rt::execution(|execution| {
let thread = execution.threads.local(&CURRENT_THREAD_KEY);
if let Some(thread) = thread {
thread.unwrap().clone()
} else {
// Lazily initialize the current() Thread. This is done to help
// handle the initial (unnamed) bootstrap thread.
init_current(execution, None)
}
})
}
/// Mock implementation of `std::thread::spawn`.
///
/// Note that you may only have [`MAX_THREADS`](crate::MAX_THREADS) threads in a given loom tests
/// _including_ the main thread.
#[track_caller]
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T,
F: 'static,
T: 'static,
{
spawn_internal(f, None, location!())
}
/// Mock implementation of `std::thread::park`.
///
/// Blocks unless or until the current thread's token is made available.
///
/// A call to `park` does not guarantee that the thread will remain parked
/// forever, and callers should be prepared for this possibility.
#[track_caller]
pub fn park() {
rt::park(location!());
}
fn spawn_internal<F, T>(f: F, name: Option<String>, location: Location) -> JoinHandle<T>
where
F: FnOnce() -> T,
F: 'static,
T: 'static,
{
let result = Arc::new(Mutex::new(None));
let notify = rt::Notify::new(true, false);
let id = {
let name = name.clone();
let result = result.clone();
rt::spawn(move || {
rt::execution(|execution| {
init_current(execution, name);
});
*result.lock().unwrap() = Some(Ok(f()));
notify.notify(location);
})
};
JoinHandle {
result,
notify,
thread: Thread {
id: ThreadId { id },
name,
},
}
}
impl Builder {
/// Generates the base configuration for spawning a thread, from which
/// configuration methods can be chained.
// `std::thread::Builder` does not implement `Default`, so this type does
// not either, as it's a mock version of the `std` type.
#[allow(clippy::new_without_default)]
pub fn new() -> Builder {
Builder { name: None }
}
/// Names the thread-to-be. Currently the name is used for identification
/// only in panic messages.
pub fn name(mut self, name: String) -> Builder {
self.name = Some(name);
self
}
/// Sets the size of the stack (in bytes) for the new thread.
pub fn stack_size(self, _size: usize) -> Builder {
self
}
/// Spawns a new thread by taking ownership of the `Builder`, and returns an
/// `io::Result` to its `JoinHandle`.
#[track_caller]
pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
Ok(spawn_internal(f, self.name, location!()))
}
}
impl<T> JoinHandle<T> {
/// Waits for the associated thread to finish.
#[track_caller]
pub fn join(self) -> std::thread::Result<T> {
self.notify.wait(location!());
self.result.lock().unwrap().take().unwrap()
}
/// Gets a handle to the underlying [`Thread`]
pub fn thread(&self) -> &Thread {
&self.thread
}
}
impl<T: fmt::Debug> fmt::Debug for JoinHandle<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("JoinHandle").finish()
}
}
fn _assert_traits() {
fn assert<T: Send + Sync>() {}
assert::<JoinHandle<()>>();
}
impl<T: 'static> LocalKey<T> {
/// Mock implementation of `std::thread::LocalKey::with`.
pub fn with<F, R>(&'static self, f: F) -> R
where
F: FnOnce(&T) -> R,
{
self.try_with(f)
.expect("cannot access a (mock) TLS value during or after it is destroyed")
}
/// Mock implementation of `std::thread::LocalKey::try_with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
where
F: FnOnce(&T) -> R,
{
let value = match unsafe { self.get() } {
Some(v) => v?,
None => {
// Init the value out of the `rt::execution`
let value = (self.init)();
rt::execution(|execution| {
trace!("LocalKey::try_with");
execution.threads.local_init(self, value);
});
unsafe { self.get() }.expect("bug")?
}
};
Ok(f(value))
}
unsafe fn get(&'static self) -> Option<Result<&T, AccessError>> {
unsafe fn transmute_lt<'a, 'b, T>(t: &'a T) -> &'b T {
std::mem::transmute::<&'a T, &'b T>(t)
}
rt::execution(|execution| {
trace!("LocalKey::get");
let res = execution.threads.local(self)?;
let local = match res {
Ok(l) => l,
Err(e) => return Some(Err(e)),
};
// This is, sadly, necessary to allow nested `with` blocks to access
// different thread locals. The borrow on the thread-local needs to
// escape the lifetime of the borrow on `execution`, since
// `rt::execution` mutably borrows a RefCell, and borrowing it twice will
// cause a panic. This should be safe, as we know the function being
// passed the thread local will not outlive the thread on which
// it's executing, by construction --- it's just kind of unfortunate.
Some(Ok(transmute_lt(local)))
})
}
}
impl<T: 'static> fmt::Debug for LocalKey<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalKey { .. }")
}
}