Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

130
third-party/vendor/loom/tests/arc.rs vendored Normal file
View file

@ -0,0 +1,130 @@
#![deny(warnings, rust_2018_idioms)]
use loom::cell::UnsafeCell;
use loom::sync::atomic::AtomicBool;
use loom::sync::atomic::Ordering::{Acquire, Release};
use loom::sync::Arc;
use loom::sync::Notify;
use loom::thread;
struct State {
data: UnsafeCell<usize>,
guard: AtomicBool,
}
impl Drop for State {
fn drop(&mut self) {
self.data.with(|ptr| unsafe {
assert_eq!(1, *ptr);
});
}
}
#[test]
fn basic_usage() {
loom::model(|| {
let num = Arc::new(State {
data: UnsafeCell::new(0),
guard: AtomicBool::new(false),
});
let num2 = num.clone();
thread::spawn(move || {
num2.data.with_mut(|ptr| unsafe { *ptr = 1 });
num2.guard.store(true, Release);
});
loop {
if num.guard.load(Acquire) {
num.data.with(|ptr| unsafe {
assert_eq!(1, *ptr);
});
break;
}
thread::yield_now();
}
});
}
#[test]
fn sync_in_drop() {
loom::model(|| {
let num = Arc::new(State {
data: UnsafeCell::new(0),
guard: AtomicBool::new(false),
});
let num2 = num.clone();
thread::spawn(move || {
num2.data.with_mut(|ptr| unsafe { *ptr = 1 });
num2.guard.store(true, Release);
drop(num2);
});
drop(num);
});
}
#[test]
#[should_panic]
fn detect_mem_leak() {
loom::model(|| {
let num = Arc::new(State {
data: UnsafeCell::new(0),
guard: AtomicBool::new(false),
});
std::mem::forget(num);
});
}
#[test]
fn try_unwrap_succeeds() {
loom::model(|| {
let num = Arc::new(0usize);
let num2 = Arc::clone(&num);
drop(num2);
let _ = Arc::try_unwrap(num).unwrap();
});
}
#[test]
fn try_unwrap_fails() {
loom::model(|| {
let num = Arc::new(0usize);
let num2 = Arc::clone(&num);
let num = Arc::try_unwrap(num).unwrap_err();
drop(num2);
let _ = Arc::try_unwrap(num).unwrap();
});
}
#[test]
fn try_unwrap_multithreaded() {
loom::model(|| {
let num = Arc::new(0usize);
let num2 = Arc::clone(&num);
let can_drop = Arc::new(Notify::new());
let thread = {
let can_drop = can_drop.clone();
thread::spawn(move || {
can_drop.wait();
drop(num2);
})
};
// The other thread is holding the other arc clone, so we can't unwrap the arc.
let num = Arc::try_unwrap(num).unwrap_err();
// Allow the thread to proceed.
can_drop.notify();
// After the thread drops the other clone, the arc should be
// unwrappable.
thread.join().unwrap();
let _ = Arc::try_unwrap(num).unwrap();
});
}

125
third-party/vendor/loom/tests/atomic.rs vendored Normal file
View file

@ -0,0 +1,125 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use std::sync::Arc;
loom::lazy_static! {
static ref A: AtomicUsize = AtomicUsize::new(0);
static ref NO_LEAK: loom::sync::Arc<usize> = Default::default();
static ref ARC_WITH_SLOW_CONSTRUCTOR: loom::sync::Arc<usize> = { thread::yield_now(); Default::default() };
}
loom::thread_local! {
static B: usize = A.load(Relaxed);
}
#[test]
#[should_panic]
fn lazy_static_arc_shutdown() {
loom::model(|| {
// note that we are not waiting for this thread,
// so it may access the static during shutdown,
// which is not okay.
thread::spawn(|| {
assert_eq!(**NO_LEAK, 0);
});
});
}
#[test]
fn lazy_static_arc_race() {
loom::model(|| {
let jh = thread::spawn(|| {
assert_eq!(**ARC_WITH_SLOW_CONSTRUCTOR, 0);
});
assert_eq!(**ARC_WITH_SLOW_CONSTRUCTOR, 0);
jh.join().unwrap();
});
}
#[test]
fn lazy_static_arc_doesnt_leak() {
loom::model(|| {
assert_eq!(**NO_LEAK, 0);
});
}
#[test]
fn legal_load_after_lazy_static() {
loom::model(|| {
let t1 = thread::spawn(|| {
B.try_with(|h| *h).unwrap_or_else(|_| A.load(Relaxed));
});
let t2 = thread::spawn(|| {
B.try_with(|h| *h).unwrap_or_else(|_| A.load(Relaxed));
});
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
#[should_panic]
fn invalid_unsync_load_relaxed() {
loom::model(|| {
let a = Arc::new(AtomicUsize::new(0));
let b = a.clone();
let thread = thread::spawn(move || {
unsafe { a.unsync_load() };
});
b.store(1, Relaxed);
thread.join().unwrap();
});
}
#[test]
#[ignore]
#[should_panic]
fn compare_and_swap_reads_old_values() {
loom::model(|| {
let a = Arc::new(AtomicUsize::new(0));
let b = Arc::new(AtomicUsize::new(0));
let a2 = a.clone();
let b2 = b.clone();
let th = thread::spawn(move || {
a2.store(1, Release);
b2.compare_and_swap(0, 2, AcqRel);
});
b.store(1, Release);
a.compare_and_swap(0, 2, AcqRel);
th.join().unwrap();
let a_val = a.load(Acquire);
let b_val = b.load(Acquire);
if a_val == 2 && b_val == 2 {
panic!();
}
});
}
#[test]
fn fetch_add_atomic() {
loom::model(|| {
let a1 = Arc::new(AtomicUsize::new(0));
let a2 = a1.clone();
let th = thread::spawn(move || a2.fetch_add(1, Relaxed));
let v1 = a1.fetch_add(1, Relaxed);
let v2 = th.join().unwrap();
assert_ne!(v1, v2);
});
}

View file

@ -0,0 +1,113 @@
#![deny(warnings, rust_2018_idioms)]
macro_rules! test_int {
($name:ident, $int:ty, $atomic:ty) => {
mod $name {
use loom::sync::atomic::*;
use std::sync::atomic::Ordering::SeqCst;
const NUM_A: u64 = 11641914933775430211;
const NUM_B: u64 = 13209405719799650717;
#[test]
fn xor() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
let prev = atomic.fetch_xor(b, SeqCst);
assert_eq!(a, prev, "prev did not match");
assert_eq!(a ^ b, atomic.load(SeqCst), "load failed");
});
}
#[test]
fn max() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
let prev = atomic.fetch_max(b, SeqCst);
assert_eq!(a, prev, "prev did not match");
assert_eq!(a.max(b), atomic.load(SeqCst), "load failed");
});
}
#[test]
fn min() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
let prev = atomic.fetch_min(b, SeqCst);
assert_eq!(a, prev, "prev did not match");
assert_eq!(a.min(b), atomic.load(SeqCst), "load failed");
});
}
#[test]
fn compare_exchange() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
assert_eq!(Err(a), atomic.compare_exchange(b, a, SeqCst, SeqCst));
assert_eq!(Ok(a), atomic.compare_exchange(a, b, SeqCst, SeqCst));
assert_eq!(b, atomic.load(SeqCst));
});
}
#[test]
#[ignore]
fn compare_exchange_weak() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
assert_eq!(Err(a), atomic.compare_exchange_weak(b, a, SeqCst, SeqCst));
assert_eq!(Ok(a), atomic.compare_exchange_weak(a, b, SeqCst, SeqCst));
assert_eq!(b, atomic.load(SeqCst));
});
}
#[test]
fn fetch_update() {
loom::model(|| {
let a: $int = NUM_A as $int;
let b: $int = NUM_B as $int;
let atomic = <$atomic>::new(a);
assert_eq!(Ok(a), atomic.fetch_update(SeqCst, SeqCst, |_| Some(b)));
assert_eq!(Err(b), atomic.fetch_update(SeqCst, SeqCst, |_| None));
assert_eq!(b, atomic.load(SeqCst));
});
}
}
};
}
test_int!(atomic_u8, u8, AtomicU8);
test_int!(atomic_u16, u16, AtomicU16);
test_int!(atomic_u32, u32, AtomicU32);
test_int!(atomic_usize, usize, AtomicUsize);
test_int!(atomic_i8, i8, AtomicI8);
test_int!(atomic_i16, i16, AtomicI16);
test_int!(atomic_i32, i32, AtomicI32);
test_int!(atomic_isize, isize, AtomicIsize);
#[cfg(target_pointer_width = "64")]
test_int!(atomic_u64, u64, AtomicU64);
#[cfg(target_pointer_width = "64")]
test_int!(atomic_i64, i64, AtomicI64);

View file

@ -0,0 +1,93 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use std::sync::Arc;
#[test]
fn compare_and_swap() {
loom::model(|| {
let num = Arc::new(AtomicUsize::new(0));
let ths: Vec<_> = (0..2)
.map(|_| {
let num = num.clone();
thread::spawn(move || {
let mut curr = num.load(Relaxed);
loop {
let actual = num.compare_and_swap(curr, curr + 1, Relaxed);
if actual == curr {
return;
}
curr = actual;
}
})
})
.collect();
for th in ths {
th.join().unwrap();
}
assert_eq!(2, num.load(Relaxed));
});
}
#[test]
fn check_ordering_valid() {
loom::model(|| {
let n1 = Arc::new((AtomicUsize::new(0), AtomicUsize::new(0)));
let n2 = n1.clone();
thread::spawn(move || {
n1.0.store(1, Relaxed);
n1.1.store(1, Release);
});
if 1 == n2.1.load(Acquire) {
assert_eq!(1, n2.0.load(Relaxed));
}
});
}
#[test]
#[should_panic]
fn check_ordering_invalid_1() {
loom::model(|| {
let n1 = Arc::new((AtomicUsize::new(0), AtomicUsize::new(0)));
let n2 = n1.clone();
thread::spawn(move || {
n1.0.store(1, Relaxed);
n1.1.store(1, Release);
});
if 1 == n2.1.load(Relaxed) {
assert_eq!(1, n2.0.load(Relaxed));
}
});
}
#[test]
#[should_panic]
fn check_ordering_invalid_2() {
loom::model(|| {
let n1 = Arc::new((AtomicUsize::new(0), AtomicUsize::new(0)));
let n2 = n1.clone();
thread::spawn(move || {
n1.0.store(1, Relaxed);
n1.1.store(1, Relaxed);
});
if 1 == n2.1.load(Relaxed) {
assert_eq!(1, n2.0.load(Relaxed));
}
});
}

View file

@ -0,0 +1,82 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::sync::{Condvar, Mutex};
use loom::thread;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
#[test]
fn notify_one() {
loom::model(|| {
let inc = Arc::new(Inc::new());
for _ in 0..1 {
let inc = inc.clone();
thread::spawn(move || inc.inc());
}
inc.wait();
});
}
#[test]
fn notify_all() {
loom::model(|| {
let inc = Arc::new(Inc::new());
let mut waiters = Vec::new();
for _ in 0..2 {
let inc = inc.clone();
waiters.push(thread::spawn(move || inc.wait()));
}
thread::spawn(move || inc.inc_all()).join().expect("inc");
for th in waiters {
th.join().expect("waiter");
}
});
}
struct Inc {
num: AtomicUsize,
mutex: Mutex<()>,
condvar: Condvar,
}
impl Inc {
fn new() -> Inc {
Inc {
num: AtomicUsize::new(0),
mutex: Mutex::new(()),
condvar: Condvar::new(),
}
}
fn wait(&self) {
let mut guard = self.mutex.lock().unwrap();
loop {
let val = self.num.load(SeqCst);
if 1 == val {
break;
}
guard = self.condvar.wait(guard).unwrap();
}
}
fn inc(&self) {
self.num.store(1, SeqCst);
drop(self.mutex.lock().unwrap());
self.condvar.notify_one();
}
fn inc_all(&self) {
self.num.store(1, SeqCst);
drop(self.mutex.lock().unwrap());
self.condvar.notify_all();
}
}

View file

@ -0,0 +1,35 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::Mutex;
use loom::thread;
use std::rc::Rc;
#[test]
#[should_panic]
fn two_mutexes_deadlock() {
loom::model(|| {
let a = Rc::new(Mutex::new(1));
let b = Rc::new(Mutex::new(2));
let th1 = {
let a = a.clone();
let b = b.clone();
thread::spawn(move || {
let a_lock = a.lock().unwrap();
let b_lock = b.lock().unwrap();
assert_eq!(*a_lock + *b_lock, 3);
})
};
let th2 = {
thread::spawn(move || {
let b_lock = b.lock().unwrap();
let a_lock = a.lock().unwrap();
assert_eq!(*a_lock + *b_lock, 3);
})
};
th1.join().unwrap();
th2.join().unwrap();
});
}

View file

@ -0,0 +1,13 @@
#[test]
#[should_panic]
fn double_panic_at_branch_max() {
let mut builder = loom::model::Builder::new();
builder.max_branches = 2;
builder.check(|| {
let _arc = loom::sync::Arc::new(());
loom::thread::yield_now();
loom::thread::yield_now();
loom::thread::yield_now();
});
}

217
third-party/vendor/loom/tests/fence.rs vendored Normal file
View file

@ -0,0 +1,217 @@
#![deny(warnings, rust_2018_idioms)]
use loom::cell::UnsafeCell;
use loom::sync::atomic::{fence, AtomicBool};
use loom::thread;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
use std::sync::Arc;
#[test]
fn fence_sw_base() {
loom::model(|| {
let data = Arc::new(UnsafeCell::new(0));
let flag = Arc::new(AtomicBool::new(false));
let th = {
let (data, flag) = (data.clone(), flag.clone());
thread::spawn(move || {
data.with_mut(|ptr| unsafe { *ptr = 42 });
fence(Release);
flag.store(true, Relaxed);
})
};
if flag.load(Relaxed) {
fence(Acquire);
assert_eq!(42, data.with_mut(|ptr| unsafe { *ptr }));
}
th.join().unwrap();
});
}
#[test]
fn fence_sw_collapsed_store() {
loom::model(|| {
let data = Arc::new(UnsafeCell::new(0));
let flag = Arc::new(AtomicBool::new(false));
let th = {
let (data, flag) = (data.clone(), flag.clone());
thread::spawn(move || {
data.with_mut(|ptr| unsafe { *ptr = 42 });
flag.store(true, Release);
})
};
if flag.load(Relaxed) {
fence(Acquire);
assert_eq!(42, data.with_mut(|ptr| unsafe { *ptr }));
}
th.join().unwrap();
});
}
#[test]
fn fence_sw_collapsed_load() {
loom::model(|| {
let data = Arc::new(UnsafeCell::new(0));
let flag = Arc::new(AtomicBool::new(false));
let th = {
let (data, flag) = (data.clone(), flag.clone());
thread::spawn(move || {
data.with_mut(|ptr| unsafe { *ptr = 42 });
fence(Release);
flag.store(true, Relaxed);
})
};
if flag.load(Acquire) {
assert_eq!(42, data.with_mut(|ptr| unsafe { *ptr }));
}
th.join().unwrap();
});
}
// SB+fences from the Promising Semantics paper (https://sf.snu.ac.kr/promise-concurrency/)
#[test]
fn sb_fences() {
loom::model(|| {
let x = Arc::new(AtomicBool::new(false));
let y = Arc::new(AtomicBool::new(false));
let a = {
let (x, y) = (x.clone(), y.clone());
thread::spawn(move || {
x.store(true, Relaxed);
fence(SeqCst);
y.load(Relaxed)
})
};
y.store(true, Relaxed);
fence(SeqCst);
let b = x.load(Relaxed);
if !a.join().unwrap() {
assert!(b);
}
});
}
#[test]
fn fence_hazard_pointer() {
loom::model(|| {
let reachable = Arc::new(AtomicBool::new(true));
let protected = Arc::new(AtomicBool::new(false));
let allocated = Arc::new(AtomicBool::new(true));
let th = {
let (reachable, protected, allocated) =
(reachable.clone(), protected.clone(), allocated.clone());
thread::spawn(move || {
// put in protected list
protected.store(true, Relaxed);
fence(SeqCst);
// validate, then access
if reachable.load(Relaxed) {
assert!(allocated.load(Relaxed));
}
})
};
// unlink/retire
reachable.store(false, Relaxed);
fence(SeqCst);
// reclaim unprotected
if !protected.load(Relaxed) {
allocated.store(false, Relaxed);
}
th.join().unwrap();
});
}
// RWC+syncs from the SCFix paper (https://plv.mpi-sws.org/scfix/)
// The specified behavior was allowed in C/C++11, which later turned out to be too weak.
// C/C++20 and all the implementations of C/C++11 disallow this behavior.
#[test]
fn rwc_syncs() {
// ... what else would you call them?
#![allow(clippy::many_single_char_names)]
loom::model(|| {
let x = Arc::new(AtomicBool::new(false));
let y = Arc::new(AtomicBool::new(false));
let t2 = {
let (x, y) = (x.clone(), y.clone());
thread::spawn(move || {
let a = x.load(Relaxed);
fence(SeqCst);
let b = y.load(Relaxed);
(a, b)
})
};
let t3 = {
let x = x.clone();
thread::spawn(move || {
y.store(true, Relaxed);
fence(SeqCst);
x.load(Relaxed)
})
};
x.store(true, Relaxed);
let (a, b) = t2.join().unwrap();
let c = t3.join().unwrap();
if a && !b && !c {
panic!();
}
});
}
// W+RWC from the SCFix paper (https://plv.mpi-sws.org/scfix/)
// The specified behavior was allowed in C/C++11, which later turned out to be too weak.
// C/C++20 and most of the implementations of C/C++11 disallow this behavior.
#[test]
fn w_rwc() {
#![allow(clippy::many_single_char_names)]
loom::model(|| {
let x = Arc::new(AtomicBool::new(false));
let y = Arc::new(AtomicBool::new(false));
let z = Arc::new(AtomicBool::new(false));
let t2 = {
let (y, z) = (y.clone(), z.clone());
thread::spawn(move || {
let a = z.load(Acquire);
fence(SeqCst);
let b = y.load(Relaxed);
(a, b)
})
};
let t3 = {
let x = x.clone();
thread::spawn(move || {
y.store(true, Relaxed);
fence(SeqCst);
x.load(Relaxed)
})
};
x.store(true, Relaxed);
z.store(true, Release);
let (a, b) = t2.join().unwrap();
let c = t3.join().unwrap();
if a && !b && !c {
panic!();
}
});
}

View file

@ -0,0 +1,92 @@
#![cfg(feature = "futures")]
#![deny(warnings, rust_2018_idioms)]
use loom::future::{block_on, AtomicWaker};
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use futures_util::future::poll_fn;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::Arc;
use std::task::Poll;
struct Chan {
num: AtomicUsize,
task: AtomicWaker,
}
#[test]
fn atomic_waker_valid() {
use std::task::Poll::*;
const NUM_NOTIFY: usize = 2;
loom::model(|| {
let chan = Arc::new(Chan {
num: AtomicUsize::new(0),
task: AtomicWaker::new(),
});
for _ in 0..NUM_NOTIFY {
let chan = chan.clone();
thread::spawn(move || {
chan.num.fetch_add(1, Relaxed);
chan.task.wake();
});
}
block_on(poll_fn(move |cx| {
chan.task.register_by_ref(cx.waker());
if NUM_NOTIFY == chan.num.load(Relaxed) {
return Ready(());
}
Pending
}));
});
}
// Tests futures spuriously poll as this is a very common pattern
#[test]
fn spurious_poll() {
use loom::sync::atomic::AtomicBool;
use loom::sync::atomic::Ordering::{Acquire, Release};
let poll_thrice = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let actual = poll_thrice.clone();
loom::model(move || {
let gate = Arc::new(AtomicBool::new(false));
let mut cnt = 0;
let num_poll = block_on(poll_fn(|cx| {
if cnt == 0 {
let gate = gate.clone();
let waker = cx.waker().clone();
thread::spawn(move || {
gate.store(true, Release);
waker.wake();
});
}
cnt += 1;
if gate.load(Acquire) {
Poll::Ready(cnt)
} else {
Poll::Pending
}
}));
if num_poll == 3 {
poll_thrice.store(true, Release);
}
assert!(num_poll > 0 && num_poll <= 3, "actual = {}", num_poll);
});
assert!(actual.load(Acquire));
}

59
third-party/vendor/loom/tests/litmus.rs vendored Normal file
View file

@ -0,0 +1,59 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::collections::HashSet;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{Arc, Mutex};
// Loom currently does not support load buffering.
#[test]
#[ignore]
fn load_buffering() {
let values = Arc::new(Mutex::new(HashSet::new()));
let values_ = values.clone();
loom::model(move || {
let x = Arc::new(AtomicUsize::new(0));
let y = Arc::new(AtomicUsize::new(0));
let th = {
let (x, y) = (x.clone(), y.clone());
thread::spawn(move || {
x.store(y.load(Relaxed), Relaxed);
})
};
let a = x.load(Relaxed);
y.store(1, Relaxed);
th.join().unwrap();
values.lock().unwrap().insert(a);
});
assert!(values_.lock().unwrap().contains(&1));
}
#[test]
fn store_buffering() {
let values = Arc::new(Mutex::new(HashSet::new()));
let values_ = values.clone();
loom::model(move || {
let x = Arc::new(AtomicUsize::new(0));
let y = Arc::new(AtomicUsize::new(0));
let a = {
let (x, y) = (x.clone(), y.clone());
thread::spawn(move || {
x.store(1, Relaxed);
y.load(Relaxed)
})
};
y.store(1, Relaxed);
let b = x.load(Relaxed);
let a = a.join().unwrap();
values.lock().unwrap().insert((a, b));
});
assert!(values_.lock().unwrap().contains(&(0, 0)));
}

89
third-party/vendor/loom/tests/mpsc.rs vendored Normal file
View file

@ -0,0 +1,89 @@
use loom::sync::mpsc::channel;
use loom::thread;
#[test]
fn basic_sequential_usage() {
loom::model(|| {
let (s, r) = channel();
s.send(5).unwrap();
let val = r.recv().unwrap();
assert_eq!(val, 5);
});
}
#[test]
fn basic_parallel_usage() {
loom::model(|| {
let (s, r) = channel();
thread::spawn(move || {
s.send(5).unwrap();
});
let val = r.recv().unwrap();
assert_eq!(val, 5);
});
}
#[test]
fn commutative_senders() {
loom::model(|| {
let (s, r) = channel();
let s2 = s.clone();
thread::spawn(move || {
s.send(5).unwrap();
});
thread::spawn(move || {
s2.send(6).unwrap();
});
let mut val = r.recv().unwrap();
val += r.recv().unwrap();
assert_eq!(val, 11);
});
}
fn ignore_result<A, B>(_: Result<A, B>) {}
#[test]
#[should_panic]
fn non_commutative_senders1() {
loom::model(|| {
let (s, r) = channel();
let s2 = s.clone();
thread::spawn(move || {
ignore_result(s.send(5));
});
thread::spawn(move || {
ignore_result(s2.send(6));
});
let val = r.recv().unwrap();
assert_eq!(val, 5);
ignore_result(r.recv());
});
}
#[test]
#[should_panic]
fn non_commutative_senders2() {
loom::model(|| {
let (s, r) = channel();
let s2 = s.clone();
thread::spawn(move || {
ignore_result(s.send(5));
});
thread::spawn(move || {
ignore_result(s2.send(6));
});
let val = r.recv().unwrap();
assert_eq!(val, 6);
ignore_result(r.recv());
});
}
#[test]
fn drop_receiver() {
loom::model(|| {
let (s, r) = channel();
s.send(1).unwrap();
s.send(2).unwrap();
assert_eq!(r.recv().unwrap(), 1);
});
}

93
third-party/vendor/loom/tests/mutex.rs vendored Normal file
View file

@ -0,0 +1,93 @@
#![deny(warnings, rust_2018_idioms)]
use loom::cell::UnsafeCell;
use loom::sync::atomic::AtomicUsize;
use loom::sync::Mutex;
use loom::thread;
use std::rc::Rc;
use std::sync::atomic::Ordering::SeqCst;
#[test]
fn mutex_enforces_mutal_exclusion() {
loom::model(|| {
let data = Rc::new((Mutex::new(0), AtomicUsize::new(0)));
let ths: Vec<_> = (0..2)
.map(|_| {
let data = data.clone();
thread::spawn(move || {
let mut locked = data.0.lock().unwrap();
let prev = data.1.fetch_add(1, SeqCst);
assert_eq!(prev, *locked);
*locked += 1;
})
})
.collect();
for th in ths {
th.join().unwrap();
}
let locked = data.0.lock().unwrap();
assert_eq!(*locked, data.1.load(SeqCst));
});
}
#[test]
fn mutex_establishes_seq_cst() {
loom::model(|| {
struct Data {
cell: UnsafeCell<usize>,
flag: Mutex<bool>,
}
let data = Rc::new(Data {
cell: UnsafeCell::new(0),
flag: Mutex::new(false),
});
{
let data = data.clone();
thread::spawn(move || {
unsafe { data.cell.with_mut(|v| *v = 1) };
*data.flag.lock().unwrap() = true;
});
}
let flag = *data.flag.lock().unwrap();
if flag {
let v = unsafe { data.cell.with(|v| *v) };
assert_eq!(v, 1);
}
});
}
#[test]
fn mutex_into_inner() {
loom::model(|| {
let lock = Rc::new(Mutex::new(0));
let ths: Vec<_> = (0..2)
.map(|_| {
let lock = lock.clone();
thread::spawn(move || {
*lock.lock().unwrap() += 1;
})
})
.collect();
for th in ths {
th.join().unwrap();
}
let lock = Rc::try_unwrap(lock).unwrap().into_inner().unwrap();
assert_eq!(lock, 2);
})
}

102
third-party/vendor/loom/tests/rwlock.rs vendored Normal file
View file

@ -0,0 +1,102 @@
use loom::sync::{Arc, RwLock};
use loom::thread;
use std::rc::Rc;
#[test]
fn rwlock_read_one() {
loom::model(|| {
let lock = Arc::new(RwLock::new(1));
let c_lock = lock.clone();
let n = lock.read().unwrap();
assert_eq!(*n, 1);
thread::spawn(move || {
let r = c_lock.read();
assert!(r.is_ok());
})
.join()
.unwrap();
});
}
#[test]
fn rwlock_read_two_write_one() {
loom::model(|| {
let lock = Arc::new(RwLock::new(1));
for _ in 0..2 {
let lock = lock.clone();
thread::spawn(move || {
let _l = lock.read().unwrap();
thread::yield_now();
});
}
let _l = lock.write().unwrap();
thread::yield_now();
});
}
#[test]
fn rwlock_try_read() {
loom::model(|| {
let lock = RwLock::new(1);
match lock.try_read() {
Ok(n) => assert_eq!(*n, 1),
Err(_) => unreachable!(),
};
});
}
#[test]
fn rwlock_write() {
loom::model(|| {
let lock = RwLock::new(1);
let mut n = lock.write().unwrap();
*n = 2;
assert!(lock.try_read().is_err());
});
}
#[test]
fn rwlock_try_write() {
loom::model(|| {
let lock = RwLock::new(1);
let n = lock.read().unwrap();
assert_eq!(*n, 1);
assert!(lock.try_write().is_err());
});
}
#[test]
fn rwlock_into_inner() {
loom::model(|| {
let lock = Rc::new(RwLock::new(0));
let ths: Vec<_> = (0..2)
.map(|_| {
let lock = lock.clone();
thread::spawn(move || {
*lock.write().unwrap() += 1;
})
})
.collect();
for th in ths {
th.join().unwrap();
}
let lock = Rc::try_unwrap(lock).unwrap().into_inner().unwrap();
assert_eq!(lock, 2);
})
}

View file

@ -0,0 +1,32 @@
use loom::{
sync::{
atomic::{AtomicUsize, Ordering},
Arc, RwLock,
},
thread,
};
#[test]
fn rwlock_two_writers() {
loom::model(|| {
let lock = Arc::new(RwLock::new(1));
let c_lock = lock.clone();
let c_lock2 = lock;
let atomic = Arc::new(AtomicUsize::new(0));
let c_atomic = atomic.clone();
let c_atomic2 = atomic;
thread::spawn(move || {
let mut w = c_lock.write().unwrap();
*w += 1;
c_atomic.fetch_add(1, Ordering::Relaxed);
});
thread::spawn(move || {
let mut w = c_lock2.write().unwrap();
*w += 1;
c_atomic2.fetch_add(1, Ordering::Relaxed);
});
});
}

45
third-party/vendor/loom/tests/smoke.rs vendored Normal file
View file

@ -0,0 +1,45 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use std::sync::Arc;
#[test]
#[should_panic]
fn checks_fail() {
struct BuggyInc {
num: AtomicUsize,
}
impl BuggyInc {
fn new() -> BuggyInc {
BuggyInc {
num: AtomicUsize::new(0),
}
}
fn inc(&self) {
let curr = self.num.load(Acquire);
self.num.store(curr + 1, Release);
}
}
loom::model(|| {
let buggy_inc = Arc::new(BuggyInc::new());
let ths: Vec<_> = (0..2)
.map(|_| {
let buggy_inc = buggy_inc.clone();
thread::spawn(move || buggy_inc.inc())
})
.collect();
for th in ths {
th.join().unwrap();
}
assert_eq!(2, buggy_inc.num.load(Relaxed));
});
}

163
third-party/vendor/loom/tests/spec.rs vendored Normal file
View file

@ -0,0 +1,163 @@
//! These tests are converted from the [C11 memory ordering page][spec].
//!
//!
//! [spec]: https://en.cppreference.com/w/cpp/atomic/memory_order
/// https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
///
/// This test is ignored because loom cannot fully model `Ordering::Relaxed`.
#[test]
#[should_panic]
#[ignore]
fn relaxed() {
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::Relaxed;
loom::model(|| {
let x1: &'static _ = Box::leak(Box::new(AtomicUsize::new(0)));
let x2 = x1;
let y1: &'static _ = Box::leak(Box::new(AtomicUsize::new(0)));
let y2 = y1;
let t1 = thread::spawn(move || {
let r1 = y1.load(Relaxed);
x1.store(r1, Relaxed);
r1
});
let t2 = thread::spawn(move || {
let r2 = x2.load(Relaxed);
y2.store(42, Relaxed);
r2
});
let r1 = t1.join().unwrap();
let r2 = t2.join().unwrap();
if r1 == 42 && r2 == 42 {
panic!("This case is possible with Relaxed, so we should hit this panic.");
}
});
}
/// https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
///
/// This is the SeqCst example modified to use AcqRel to see that we indeed exercise all the
/// possible executions.
#[test]
fn acq_rel() {
use loom::sync::atomic::AtomicBool;
use loom::thread;
use std::sync::atomic::Ordering;
let mut builder = loom::model::Builder::new();
// The yield loop makes loom really sad without this:
builder.preemption_bound = Some(1);
let seen: &'static _ = Box::leak(Box::new(std::sync::Mutex::new(
std::collections::HashSet::new(),
)));
builder.check(move || {
let x: &'static _ = Box::leak(Box::new(AtomicBool::new(false)));
let y: &'static _ = Box::leak(Box::new(AtomicBool::new(false)));
let z: &'static _ = Box::leak(Box::new(std::sync::atomic::AtomicUsize::new(0)));
// NOTE: done in this thread after spawning
// thread::spawn(move || {
// x.store(true, Ordering::Release);
// });
thread::spawn(move || {
y.store(true, Ordering::Release);
});
let t1 = thread::spawn(move || {
while !x.load(Ordering::Acquire) {
loom::thread::yield_now();
}
if y.load(Ordering::Acquire) {
z.fetch_add(1, Ordering::Relaxed);
}
});
let t2 = thread::spawn(move || {
while !y.load(Ordering::Acquire) {
loom::thread::yield_now();
}
if x.load(Ordering::Acquire) {
z.fetch_add(1, Ordering::Relaxed);
}
});
x.store(true, Ordering::Release);
t1.join().unwrap();
t2.join().unwrap();
// Read z but not while holding the lock, since the read goes into loom innards.
let z = z.load(Ordering::SeqCst);
seen.lock().unwrap().insert(z);
});
let seen = seen.lock().unwrap();
assert!(seen.contains(&0));
assert!(seen.contains(&1));
assert!(seen.contains(&2));
assert_eq!(seen.len(), 3);
}
/// https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
///
/// This test currently fails because loom executes a permutation that isn't legal under `SeqCst`
/// according to the spec in which `z == 0`.
#[test]
#[ignore]
fn test_seq_cst() {
use loom::sync::atomic::AtomicBool;
use loom::thread;
use std::sync::atomic::Ordering;
let mut builder = loom::model::Builder::new();
// The yield loop makes loom really sad without this:
builder.preemption_bound = Some(1);
let seen: &'static _ = Box::leak(Box::new(std::sync::Mutex::new(
std::collections::HashSet::new(),
)));
builder.check(move || {
let x: &'static _ = Box::leak(Box::new(AtomicBool::new(false)));
let y: &'static _ = Box::leak(Box::new(AtomicBool::new(false)));
let z: &'static _ = Box::leak(Box::new(std::sync::atomic::AtomicUsize::new(0)));
// NOTE: done in this thread after spawning
// thread::spawn(move || {
// x.store(true, Ordering::SeqCst);
// });
thread::spawn(move || {
y.store(true, Ordering::SeqCst);
});
let t1 = thread::spawn(move || {
while !x.load(Ordering::SeqCst) {
loom::thread::yield_now();
}
if y.load(Ordering::SeqCst) {
z.fetch_add(1, Ordering::Relaxed);
}
});
let t2 = thread::spawn(move || {
while !y.load(Ordering::SeqCst) {
loom::thread::yield_now();
}
if x.load(Ordering::SeqCst) {
z.fetch_add(1, Ordering::Relaxed);
}
});
x.store(true, Ordering::SeqCst);
t1.join().unwrap();
t2.join().unwrap();
// Read z but not while holding the lock, since the read goes into loom innards.
let z = z.load(Ordering::SeqCst);
assert_ne!(z, 0, "z == 0 is not possible with SeqCst");
seen.lock().unwrap().insert(z);
});
let seen = seen.lock().unwrap();
assert!(seen.contains(&1));
assert!(seen.contains(&2));
assert_eq!(seen.len(), 2);
}

View file

@ -0,0 +1,125 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::mpsc::channel;
use loom::thread;
#[test]
fn initial_thread() {
loom::model(|| {
thread::current().id(); // can call id()
assert_eq!(None, thread::current().name());
});
}
#[test]
fn many_joins() {
loom::model(|| {
let mut handles = vec![];
let mutex = loom::sync::Arc::new(loom::sync::Mutex::new(()));
let lock = mutex.lock().unwrap();
for _ in 1..3 {
let mutex = mutex.clone();
handles.push(thread::spawn(move || {
mutex.lock().unwrap();
}));
}
std::mem::drop(lock);
for handle in handles.into_iter() {
let _ = handle.join();
}
})
}
#[test]
fn alt_join() {
loom::model(|| {
use loom::sync::{Arc, Mutex};
let arcmut: Arc<Mutex<Option<thread::JoinHandle<()>>>> = Arc::new(Mutex::new(None));
let lock = arcmut.lock().unwrap();
let arcmut2 = arcmut.clone();
let th1 = thread::spawn(|| {});
let th2 = thread::spawn(move || {
arcmut2.lock().unwrap();
let _ = th1.join();
});
let th3 = thread::spawn(move || {});
std::mem::drop(lock);
let _ = th3.join();
let _ = th2.join();
})
}
#[test]
fn threads_have_unique_ids() {
loom::model(|| {
let (tx, rx) = channel();
let th1 = thread::spawn(move || tx.send(thread::current().id()));
let thread_id_1 = rx.recv().unwrap();
assert_eq!(th1.thread().id(), thread_id_1);
assert_ne!(thread::current().id(), thread_id_1);
let _ = th1.join();
let (tx, rx) = channel();
let th2 = thread::spawn(move || tx.send(thread::current().id()));
let thread_id_2 = rx.recv().unwrap();
assert_eq!(th2.thread().id(), thread_id_2);
assert_ne!(thread::current().id(), thread_id_2);
assert_ne!(thread_id_1, thread_id_2);
let _ = th2.join();
})
}
#[test]
fn thread_names() {
loom::model(|| {
let (tx, rx) = channel();
let th = thread::spawn(move || tx.send(thread::current().name().map(|s| s.to_string())));
assert_eq!(None, rx.recv().unwrap());
assert_eq!(None, th.thread().name());
let _ = th.join();
let (tx, rx) = channel();
let th = thread::Builder::new()
.spawn(move || tx.send(thread::current().name().map(|s| s.to_string())))
.unwrap();
assert_eq!(None, rx.recv().unwrap());
assert_eq!(None, th.thread().name());
let _ = th.join();
let (tx, rx) = channel();
let th = thread::Builder::new()
.name("foobar".to_string())
.spawn(move || tx.send(thread::current().name().map(|s| s.to_string())))
.unwrap();
assert_eq!(Some("foobar".to_string()), rx.recv().unwrap());
assert_eq!(Some("foobar"), th.thread().name());
let _ = th.join();
})
}
#[test]
fn park_unpark_loom() {
loom::model(|| {
println!("unpark");
thread::current().unpark();
println!("park");
thread::park();
println!("it did not deadlock");
});
}
#[test]
fn park_unpark_std() {
println!("unpark");
std::thread::current().unpark();
println!("park");
std::thread::park();
println!("it did not deadlock");
}

View file

@ -0,0 +1,109 @@
#![deny(warnings, rust_2018_idioms)]
use loom::thread;
use std::cell::RefCell;
use std::sync::atomic::{AtomicUsize, Ordering};
#[test]
fn thread_local() {
loom::thread_local! {
static THREAD_LOCAL: RefCell<usize> = RefCell::new(1);
}
fn do_test(n: usize) {
THREAD_LOCAL.with(|local| {
assert_eq!(*local.borrow(), 1);
});
THREAD_LOCAL.with(|local| {
assert_eq!(*local.borrow(), 1);
*local.borrow_mut() = n;
assert_eq!(*local.borrow(), n);
});
THREAD_LOCAL.with(|local| {
assert_eq!(*local.borrow(), n);
});
}
loom::model(|| {
let t1 = thread::spawn(|| do_test(2));
let t2 = thread::spawn(|| do_test(3));
do_test(4);
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn nested_with() {
loom::thread_local! {
static LOCAL1: RefCell<usize> = RefCell::new(1);
static LOCAL2: RefCell<usize> = RefCell::new(2);
}
loom::model(|| {
LOCAL1.with(|local1| *local1.borrow_mut() = LOCAL2.with(|local2| *local2.borrow()));
});
}
#[test]
fn drop() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct CountDrops {
drops: &'static AtomicUsize,
dummy: bool,
}
impl Drop for CountDrops {
fn drop(&mut self) {
self.drops.fetch_add(1, Ordering::Release);
}
}
impl CountDrops {
fn new(drops: &'static AtomicUsize) -> Self {
Self { drops, dummy: true }
}
}
loom::thread_local! {
static DROPPED_LOCAL: CountDrops = CountDrops::new(&DROPS);
}
loom::model(|| {
assert_eq!(DROPS.load(Ordering::Acquire), 0);
thread::spawn(|| {
// force access to the thread local so that it's initialized.
DROPPED_LOCAL.with(|local| assert!(local.dummy));
assert_eq!(DROPS.load(Ordering::Acquire), 0);
})
.join()
.unwrap();
// When the first spawned thread completed, its copy of the thread local
// should have been dropped.
assert_eq!(DROPS.load(Ordering::Acquire), 1);
thread::spawn(|| {
// force access to the thread local so that it's initialized.
DROPPED_LOCAL.with(|local| assert!(local.dummy));
assert_eq!(DROPS.load(Ordering::Acquire), 1);
})
.join()
.unwrap();
// When the second spawned thread completed, its copy of the thread local
// should have been dropped as well.
assert_eq!(DROPS.load(Ordering::Acquire), 2);
// force access to the thread local so that it's initialized.
DROPPED_LOCAL.with(|local| assert!(local.dummy));
});
// Finally, when the model's "main thread" completes, its copy of the local
// should also be dropped.
assert_eq!(DROPS.load(Ordering::Acquire), 3);
}

View file

@ -0,0 +1,335 @@
#![deny(warnings, rust_2018_idioms)]
use loom::cell::UnsafeCell;
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::{Acquire, Release};
use std::sync::Arc;
#[test]
fn atomic_causality_success() {
struct Chan {
data: UnsafeCell<usize>,
guard: AtomicUsize,
}
impl Chan {
fn set(&self) {
unsafe {
self.data.with_mut(|v| {
*v += 123;
});
}
self.guard.store(1, Release);
}
fn get(&self) {
if 0 == self.guard.load(Acquire) {
return;
}
unsafe {
self.data.with(|v| {
assert_eq!(*v, 123);
});
}
}
}
loom::model(|| {
let chan = Arc::new(Chan {
data: UnsafeCell::new(0),
guard: AtomicUsize::new(0),
});
let th = {
let chan = chan.clone();
thread::spawn(move || {
chan.set();
})
};
// Try getting before joining
chan.get();
th.join().unwrap();
chan.get();
});
}
#[test]
#[should_panic]
fn atomic_causality_fail() {
struct Chan {
data: UnsafeCell<usize>,
guard: AtomicUsize,
}
impl Chan {
fn set(&self) {
unsafe {
self.data.with_mut(|v| {
*v += 123;
});
}
self.guard.store(1, Release);
}
fn get(&self) {
unsafe {
self.data.with(|v| {
assert_eq!(*v, 123);
});
}
}
}
loom::model(|| {
let chan = Arc::new(Chan {
data: UnsafeCell::new(0),
guard: AtomicUsize::new(0),
});
let th = {
let chan = chan.clone();
thread::spawn(move || chan.set())
};
// Try getting before joining
chan.get();
th.join().unwrap();
chan.get();
});
}
#[derive(Clone)]
struct Data(Arc<UnsafeCell<usize>>);
impl Data {
fn new(v: usize) -> Self {
Data(Arc::new(UnsafeCell::new(v)))
}
fn get(&self) -> usize {
self.0.with(|v| unsafe { *v })
}
fn inc(&self) -> usize {
self.0.with_mut(|v| unsafe {
*v += 1;
*v
})
}
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_mut_1() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let th1 = thread::spawn(move || x.inc());
y.inc();
th1.join().unwrap();
assert_eq!(4, y.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_mut_2() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let z = x.clone();
let th1 = thread::spawn(move || x.inc());
let th2 = thread::spawn(move || y.inc());
th1.join().unwrap();
th2.join().unwrap();
assert_eq!(4, z.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_immut_1() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let th1 = thread::spawn(move || assert_eq!(2, x.inc()));
y.get();
th1.join().unwrap();
assert_eq!(3, y.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_immut_2() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let th1 = thread::spawn(move || x.get());
assert_eq!(2, y.inc());
th1.join().unwrap();
assert_eq!(3, y.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_immut_3() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let z = x.clone();
let th1 = thread::spawn(move || assert_eq!(2, x.inc()));
let th2 = thread::spawn(move || y.get());
th1.join().unwrap();
th2.join().unwrap();
assert_eq!(3, z.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_immut_4() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let z = x.clone();
let th1 = thread::spawn(move || x.get());
let th2 = thread::spawn(move || assert_eq!(2, y.inc()));
th1.join().unwrap();
th2.join().unwrap();
assert_eq!(3, z.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_race_mut_immut_5() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let z = x.clone();
let th1 = thread::spawn(move || x.get());
let th2 = thread::spawn(move || {
assert_eq!(1, y.get());
assert_eq!(2, y.inc());
});
th1.join().unwrap();
th2.join().unwrap();
assert_eq!(3, z.inc());
});
}
#[test]
fn unsafe_cell_ok_1() {
loom::model(|| {
let x = Data::new(1);
assert_eq!(2, x.inc());
let th1 = thread::spawn(move || {
assert_eq!(3, x.inc());
x
});
let x = th1.join().unwrap();
assert_eq!(4, x.inc());
});
}
#[test]
fn unsafe_cell_ok_2() {
loom::model(|| {
let x = Data::new(1);
assert_eq!(1, x.get());
assert_eq!(2, x.inc());
let th1 = thread::spawn(move || {
assert_eq!(2, x.get());
assert_eq!(3, x.inc());
x
});
let x = th1.join().unwrap();
assert_eq!(3, x.get());
assert_eq!(4, x.inc());
});
}
#[test]
fn unsafe_cell_ok_3() {
loom::model(|| {
let x = Data::new(1);
let y = x.clone();
let th1 = thread::spawn(move || {
assert_eq!(1, x.get());
let z = x.clone();
let th2 = thread::spawn(move || {
assert_eq!(1, z.get());
});
assert_eq!(1, x.get());
th2.join().unwrap();
});
assert_eq!(1, y.get());
th1.join().unwrap();
assert_eq!(2, y.inc());
});
}
#[test]
#[should_panic]
fn unsafe_cell_access_after_sync() {
loom::model(|| {
let s1 = Arc::new((AtomicUsize::new(0), UnsafeCell::new(0)));
let s2 = s1.clone();
thread::spawn(move || {
s1.0.store(1, Release);
s1.1.with_mut(|ptr| unsafe { *ptr = 1 });
});
if 1 == s2.0.load(Acquire) {
s2.1.with_mut(|ptr| unsafe { *ptr = 2 });
}
});
}

29
third-party/vendor/loom/tests/yield.rs vendored Normal file
View file

@ -0,0 +1,29 @@
#![deny(warnings, rust_2018_idioms)]
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::Arc;
#[test]
fn yield_completes() {
loom::model(|| {
let inc = Arc::new(AtomicUsize::new(0));
{
let inc = inc.clone();
thread::spawn(move || {
inc.store(1, Relaxed);
});
}
loop {
if 1 == inc.load(Relaxed) {
return;
}
loom::thread::yield_now();
}
});
}