Vendor dependencies

Let's see how I like this workflow.
This commit is contained in:
John Doty 2022-12-19 08:27:18 -08:00
parent 34d1830413
commit 9c435dc440
7500 changed files with 1665121 additions and 99 deletions

2
vendor/tokio/tests/_require_full.rs vendored Normal file
View file

@ -0,0 +1,2 @@
#![cfg(not(any(feature = "full", tokio_wasm)))]
compile_error!("run main Tokio tests with `--features full`");

724
vendor/tokio/tests/async_send_sync.rs vendored Normal file
View file

@ -0,0 +1,724 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![allow(clippy::type_complexity, clippy::diverging_sub_expression)]
use std::cell::Cell;
use std::future::Future;
use std::io::SeekFrom;
use std::net::SocketAddr;
use std::pin::Pin;
use std::rc::Rc;
use tokio::net::TcpStream;
use tokio::time::{Duration, Instant};
// The names of these structs behaves better when sorted.
// Send: Yes, Sync: Yes
#[derive(Clone)]
struct YY {}
// Send: Yes, Sync: No
#[derive(Clone)]
struct YN {
_value: Cell<u8>,
}
// Send: No, Sync: No
#[derive(Clone)]
struct NN {
_value: Rc<u8>,
}
#[allow(dead_code)]
type BoxFutureSync<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send + Sync>>;
#[allow(dead_code)]
type BoxFutureSend<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T> + Send>>;
#[allow(dead_code)]
type BoxFuture<T> = std::pin::Pin<Box<dyn std::future::Future<Output = T>>>;
#[allow(dead_code)]
type BoxAsyncRead = std::pin::Pin<Box<dyn tokio::io::AsyncBufRead + Send + Sync>>;
#[allow(dead_code)]
type BoxAsyncSeek = std::pin::Pin<Box<dyn tokio::io::AsyncSeek + Send + Sync>>;
#[allow(dead_code)]
type BoxAsyncWrite = std::pin::Pin<Box<dyn tokio::io::AsyncWrite + Send + Sync>>;
#[allow(dead_code)]
fn require_send<T: Send>(_t: &T) {}
#[allow(dead_code)]
fn require_sync<T: Sync>(_t: &T) {}
#[allow(dead_code)]
fn require_unpin<T: Unpin>(_t: &T) {}
#[allow(dead_code)]
struct Invalid;
trait AmbiguousIfSend<A> {
fn some_item(&self) {}
}
impl<T: ?Sized> AmbiguousIfSend<()> for T {}
impl<T: ?Sized + Send> AmbiguousIfSend<Invalid> for T {}
trait AmbiguousIfSync<A> {
fn some_item(&self) {}
}
impl<T: ?Sized> AmbiguousIfSync<()> for T {}
impl<T: ?Sized + Sync> AmbiguousIfSync<Invalid> for T {}
trait AmbiguousIfUnpin<A> {
fn some_item(&self) {}
}
impl<T: ?Sized> AmbiguousIfUnpin<()> for T {}
impl<T: ?Sized + Unpin> AmbiguousIfUnpin<Invalid> for T {}
macro_rules! into_todo {
($typ:ty) => {{
let x: $typ = todo!();
x
}};
}
macro_rules! async_assert_fn_send {
(Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
require_send(&$value);
};
(!Send & $(!)?Sync & $(!)?Unpin, $value:expr) => {
AmbiguousIfSend::some_item(&$value);
};
}
macro_rules! async_assert_fn_sync {
($(!)?Send & Sync & $(!)?Unpin, $value:expr) => {
require_sync(&$value);
};
($(!)?Send & !Sync & $(!)?Unpin, $value:expr) => {
AmbiguousIfSync::some_item(&$value);
};
}
macro_rules! async_assert_fn_unpin {
($(!)?Send & $(!)?Sync & Unpin, $value:expr) => {
require_unpin(&$value);
};
($(!)?Send & $(!)?Sync & !Unpin, $value:expr) => {
AmbiguousIfUnpin::some_item(&$value);
};
}
macro_rules! async_assert_fn {
($($f:ident $(< $($generic:ty),* > )? )::+($($arg:ty),*): $($tok:tt)*) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f = $($f $(::<$($generic),*>)? )::+( $( into_todo!($arg) ),* );
async_assert_fn_send!($($tok)*, f);
async_assert_fn_sync!($($tok)*, f);
async_assert_fn_unpin!($($tok)*, f);
};
};
}
macro_rules! assert_value {
($type:ty: $($tok:tt)*) => {
#[allow(unreachable_code)]
#[allow(unused_variables)]
const _: fn() = || {
let f: $type = todo!();
async_assert_fn_send!($($tok)*, f);
async_assert_fn_sync!($($tok)*, f);
async_assert_fn_unpin!($($tok)*, f);
};
};
}
macro_rules! cfg_not_wasi {
($($item:item)*) => {
$(
#[cfg(not(tokio_wasi))]
$item
)*
}
}
// Manually re-implementation of `async_assert_fn` for `poll_fn`. The macro
// doesn't work for this particular case because constructing the closure
// is too complicated.
const _: fn() = || {
let pinned = std::marker::PhantomPinned;
let f = tokio::macros::support::poll_fn(move |_| {
// Use `pinned` to take ownership of it.
let _ = &pinned;
std::task::Poll::Pending::<()>
});
require_send(&f);
require_sync(&f);
AmbiguousIfUnpin::some_item(&f);
};
cfg_not_wasi! {
mod fs {
use super::*;
assert_value!(tokio::fs::DirBuilder: Send & Sync & Unpin);
assert_value!(tokio::fs::DirEntry: Send & Sync & Unpin);
assert_value!(tokio::fs::File: Send & Sync & Unpin);
assert_value!(tokio::fs::OpenOptions: Send & Sync & Unpin);
assert_value!(tokio::fs::ReadDir: Send & Sync & Unpin);
async_assert_fn!(tokio::fs::canonicalize(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::copy(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::create_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::create_dir_all(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::hard_link(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::metadata(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_link(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::read_to_string(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_dir(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_dir_all(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::remove_file(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::rename(&str, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::set_permissions(&str, std::fs::Permissions): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::symlink_metadata(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::write(&str, Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::ReadDir::next_entry(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::OpenOptions::open(_, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirBuilder::create(_, &str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirEntry::metadata(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::DirEntry::file_type(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::open(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::create(&str): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::sync_all(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::sync_data(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::set_len(_, u64): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::metadata(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::try_clone(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::fs::File::into_std(_): Send & Sync & !Unpin);
async_assert_fn!(
tokio::fs::File::set_permissions(_, std::fs::Permissions): Send & Sync & !Unpin
);
}
}
cfg_not_wasi! {
assert_value!(tokio::net::TcpSocket: Send & Sync & Unpin);
async_assert_fn!(tokio::net::TcpListener::bind(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::connect(SocketAddr): Send & Sync & !Unpin);
}
assert_value!(tokio::net::TcpListener: Send & Sync & Unpin);
assert_value!(tokio::net::TcpStream: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::OwnedReadHalf: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::OwnedWriteHalf: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::ReadHalf<'_>: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::ReuniteError: Send & Sync & Unpin);
assert_value!(tokio::net::tcp::WriteHalf<'_>: Send & Sync & Unpin);
async_assert_fn!(tokio::net::TcpListener::accept(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::peek(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::readable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::TcpStream::writable(_): Send & Sync & !Unpin);
// Wasi does not support UDP
cfg_not_wasi! {
mod udp_socket {
use super::*;
assert_value!(tokio::net::UdpSocket: Send & Sync & Unpin);
async_assert_fn!(tokio::net::UdpSocket::bind(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::connect(_, SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::peek_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::readable(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::recv(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::send(_, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::send_to(_, &[u8], SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::UdpSocket::writable(_): Send & Sync & !Unpin);
}
}
async_assert_fn!(tokio::net::lookup_host(SocketAddr): Send & Sync & !Unpin);
async_assert_fn!(tokio::net::tcp::ReadHalf::peek(_, &mut [u8]): Send & Sync & !Unpin);
#[cfg(unix)]
mod unix_datagram {
use super::*;
use tokio::net::*;
assert_value!(UnixDatagram: Send & Sync & Unpin);
assert_value!(UnixListener: Send & Sync & Unpin);
assert_value!(UnixStream: Send & Sync & Unpin);
assert_value!(unix::OwnedReadHalf: Send & Sync & Unpin);
assert_value!(unix::OwnedWriteHalf: Send & Sync & Unpin);
assert_value!(unix::ReadHalf<'_>: Send & Sync & Unpin);
assert_value!(unix::ReuniteError: Send & Sync & Unpin);
assert_value!(unix::SocketAddr: Send & Sync & Unpin);
assert_value!(unix::UCred: Send & Sync & Unpin);
assert_value!(unix::WriteHalf<'_>: Send & Sync & Unpin);
async_assert_fn!(UnixDatagram::readable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::recv(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::recv_from(_, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::send(_, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::send_to(_, &[u8], &str): Send & Sync & !Unpin);
async_assert_fn!(UnixDatagram::writable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixListener::accept(_): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::connect(&str): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::readable(_): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(UnixStream::writable(_): Send & Sync & !Unpin);
}
#[cfg(windows)]
mod windows_named_pipe {
use super::*;
use tokio::net::windows::named_pipe::*;
assert_value!(ClientOptions: Send & Sync & Unpin);
assert_value!(NamedPipeClient: Send & Sync & Unpin);
assert_value!(NamedPipeServer: Send & Sync & Unpin);
assert_value!(PipeEnd: Send & Sync & Unpin);
assert_value!(PipeInfo: Send & Sync & Unpin);
assert_value!(PipeMode: Send & Sync & Unpin);
assert_value!(ServerOptions: Send & Sync & Unpin);
async_assert_fn!(NamedPipeClient::readable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeClient::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeClient::writable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::connect(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::readable(_): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::ready(_, tokio::io::Interest): Send & Sync & !Unpin);
async_assert_fn!(NamedPipeServer::writable(_): Send & Sync & !Unpin);
}
cfg_not_wasi! {
mod test_process {
use super::*;
assert_value!(tokio::process::Child: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStderr: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStdin: Send & Sync & Unpin);
assert_value!(tokio::process::ChildStdout: Send & Sync & Unpin);
assert_value!(tokio::process::Command: Send & Sync & Unpin);
async_assert_fn!(tokio::process::Child::kill(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::process::Child::wait(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::process::Child::wait_with_output(_): Send & Sync & !Unpin);
}
async_assert_fn!(tokio::signal::ctrl_c(): Send & Sync & !Unpin);
}
#[cfg(unix)]
mod unix_signal {
use super::*;
assert_value!(tokio::signal::unix::Signal: Send & Sync & Unpin);
assert_value!(tokio::signal::unix::SignalKind: Send & Sync & Unpin);
async_assert_fn!(tokio::signal::unix::Signal::recv(_): Send & Sync & !Unpin);
}
#[cfg(windows)]
mod windows_signal {
use super::*;
assert_value!(tokio::signal::windows::CtrlC: Send & Sync & Unpin);
assert_value!(tokio::signal::windows::CtrlBreak: Send & Sync & Unpin);
async_assert_fn!(tokio::signal::windows::CtrlC::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::signal::windows::CtrlBreak::recv(_): Send & Sync & !Unpin);
}
assert_value!(tokio::sync::AcquireError: Send & Sync & Unpin);
assert_value!(tokio::sync::Barrier: Send & Sync & Unpin);
assert_value!(tokio::sync::BarrierWaitResult: Send & Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::MappedMutexGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Mutex<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::Mutex<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::Mutex<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::MutexGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Notify: Send & Sync & Unpin);
assert_value!(tokio::sync::OnceCell<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OnceCell<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::OnceCell<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedMutexGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockMappedWriteGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockReadGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::OwnedRwLockWriteGuard<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::OwnedSemaphorePermit: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLock<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLock<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLock<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockMappedWriteGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockReadGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::RwLockWriteGuard<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::Semaphore: Send & Sync & Unpin);
assert_value!(tokio::sync::SemaphorePermit<'_>: Send & Sync & Unpin);
assert_value!(tokio::sync::TryAcquireError: Send & Sync & Unpin);
assert_value!(tokio::sync::TryLockError: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::broadcast::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::futures::Notified<'_>: Send & Sync & !Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::OwnedPermit<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Permit<'_, YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedReceiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::UnboundedSender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::SendTimeoutError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<YN>: Send & !Sync & Unpin);
assert_value!(tokio::sync::mpsc::error::TrySendError<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<YN>: Send & Sync & Unpin);
assert_value!(tokio::sync::oneshot::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Receiver<YY>: Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Ref<'_, YY>: !Send & Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<YN>: !Send & !Sync & Unpin);
assert_value!(tokio::sync::watch::Sender<YY>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinError: Send & Sync & Unpin);
assert_value!(tokio::task::JoinHandle<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::task::JoinHandle<YN>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinHandle<YY>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinSet<NN>: !Send & !Sync & Unpin);
assert_value!(tokio::task::JoinSet<YN>: Send & Sync & Unpin);
assert_value!(tokio::task::JoinSet<YY>: Send & Sync & Unpin);
assert_value!(tokio::task::LocalSet: !Send & !Sync & Unpin);
async_assert_fn!(tokio::sync::Barrier::wait(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<NN>::lock(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<NN>::lock_owned(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YN>::lock(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YN>::lock_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YY>::lock(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Mutex<YY>::lock_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Notify::notified(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = NN>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<NN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<NN>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YN>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send + Sync>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>> + Send>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YN>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YN>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send + Sync>>): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY> + Send>>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_init( _, fn() -> Pin<Box<dyn Future<Output = YY>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send + Sync>>): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>> + Send>>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::OnceCell<YY>::get_or_try_init( _, fn() -> Pin<Box<dyn Future<Output = std::io::Result<YY>>>>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<NN>::read(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<NN>::write(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YN>::read(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YN>::write(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YY>::read(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::RwLock<YY>::write(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_many(_, u32): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_many_owned(_, u32): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::Semaphore::acquire_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::broadcast::Receiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Receiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::reserve_owned(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send(_, NN): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<NN>::send_timeout(_, NN, Duration): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::reserve_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send(_, YN): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YN>::send_timeout(_, YN, Duration): Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::reserve_owned(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send(_, YY): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::Sender<YY>::send_timeout(_, YY, Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<NN>::recv(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YN>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedReceiver<YY>::recv(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::mpsc::UnboundedSender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<YN>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::oneshot::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<NN>::changed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<YN>::changed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Receiver<YY>::changed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<NN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<YN>::closed(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::sync::watch::Sender<YY>::closed(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::join_next(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<Cell<u32>>::shutdown(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::join_next(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<Rc<u32>>::shutdown(_): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<u32>::join_next(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::JoinSet<u32>::shutdown(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Cell<u32>>::scope(_, Cell<u32>, BoxFutureSync<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSend<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<Rc<u32>>::scope(_, Rc<u32>, BoxFutureSync<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::task::LocalSet::run_until(_, BoxFutureSync<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFuture<()>): !Send & !Sync & Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFutureSend<()>): Send & !Sync & Unpin);
async_assert_fn!(tokio::task::unconstrained(BoxFutureSync<()>): Send & Sync & Unpin);
assert_value!(tokio::runtime::Builder: Send & Sync & Unpin);
assert_value!(tokio::runtime::EnterGuard<'_>: Send & Sync & Unpin);
assert_value!(tokio::runtime::Handle: Send & Sync & Unpin);
assert_value!(tokio::runtime::Runtime: Send & Sync & Unpin);
assert_value!(tokio::time::Interval: Send & Sync & Unpin);
assert_value!(tokio::time::Instant: Send & Sync & Unpin);
assert_value!(tokio::time::Sleep: Send & Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFutureSync<()>>: Send & Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFutureSend<()>>: Send & !Sync & !Unpin);
assert_value!(tokio::time::Timeout<BoxFuture<()>>: !Send & !Sync & !Unpin);
assert_value!(tokio::time::error::Elapsed: Send & Sync & Unpin);
assert_value!(tokio::time::error::Error: Send & Sync & Unpin);
async_assert_fn!(tokio::time::advance(Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::sleep(Duration): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::sleep_until(Instant): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout(Duration, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSync<()>): Send & Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFutureSend<()>): Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::timeout_at(Instant, BoxFuture<()>): !Send & !Sync & !Unpin);
async_assert_fn!(tokio::time::Interval::tick(_): Send & Sync & !Unpin);
assert_value!(tokio::io::BufReader<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::BufStream<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::BufWriter<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::DuplexStream: Send & Sync & Unpin);
assert_value!(tokio::io::Empty: Send & Sync & Unpin);
assert_value!(tokio::io::Interest: Send & Sync & Unpin);
assert_value!(tokio::io::Lines<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::ReadBuf<'_>: Send & Sync & Unpin);
assert_value!(tokio::io::ReadHalf<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::Ready: Send & Sync & Unpin);
assert_value!(tokio::io::Repeat: Send & Sync & Unpin);
assert_value!(tokio::io::Sink: Send & Sync & Unpin);
assert_value!(tokio::io::Split<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::Stderr: Send & Sync & Unpin);
assert_value!(tokio::io::Stdin: Send & Sync & Unpin);
assert_value!(tokio::io::Stdout: Send & Sync & Unpin);
assert_value!(tokio::io::Take<TcpStream>: Send & Sync & Unpin);
assert_value!(tokio::io::WriteHalf<TcpStream>: Send & Sync & Unpin);
async_assert_fn!(tokio::io::copy(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::copy_bidirectional(&mut TcpStream, &mut TcpStream): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::copy_buf(&mut tokio::io::BufReader<TcpStream>, &mut TcpStream): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::empty(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::repeat(u8): Send & Sync & Unpin);
async_assert_fn!(tokio::io::sink(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::split(TcpStream): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stderr(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stdin(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::stdout(): Send & Sync & Unpin);
async_assert_fn!(tokio::io::Split<tokio::io::BufReader<TcpStream>>::next_segment(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::Lines<tokio::io::BufReader<TcpStream>>::next_line(_): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncBufReadExt::read_until(&mut BoxAsyncRead, u8, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncBufReadExt::read_line(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncBufReadExt::fill_buf(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_buf(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncReadExt::read_exact(&mut BoxAsyncRead, &mut [u8]): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncReadExt::read_u8(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i8(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_f32(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_f64(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i16_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_u128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_i128_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_f32_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_f64_le(&mut BoxAsyncRead): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncReadExt::read_to_end(&mut BoxAsyncRead, &mut Vec<u8>): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncReadExt::read_to_string(&mut BoxAsyncRead, &mut String): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncSeekExt::seek(&mut BoxAsyncSeek, SeekFrom): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncSeekExt::stream_position(&mut BoxAsyncSeek): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_vectored(&mut BoxAsyncWrite, _): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
& Sync
& !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_all_buf(&mut BoxAsyncWrite, &mut bytes::Bytes): Send
& Sync
& !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_all(&mut BoxAsyncWrite, &[u8]): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncWriteExt::write_u8(&mut BoxAsyncWrite, u8): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::write_i8(&mut BoxAsyncWrite, i8): Send & Sync & !Unpin);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u16(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i16(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u32(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i32(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u64(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i64(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u128(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i128(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_f32(&mut BoxAsyncWrite, f32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_f64(&mut BoxAsyncWrite, f64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u16_le(&mut BoxAsyncWrite, u16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i16_le(&mut BoxAsyncWrite, i16): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u32_le(&mut BoxAsyncWrite, u32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i32_le(&mut BoxAsyncWrite, i32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u64_le(&mut BoxAsyncWrite, u64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i64_le(&mut BoxAsyncWrite, i64): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_u128_le(&mut BoxAsyncWrite, u128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_i128_le(&mut BoxAsyncWrite, i128): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_f32_le(&mut BoxAsyncWrite, f32): Send & Sync & !Unpin
);
async_assert_fn!(
tokio::io::AsyncWriteExt::write_f64_le(&mut BoxAsyncWrite, f64): Send & Sync & !Unpin
);
async_assert_fn!(tokio::io::AsyncWriteExt::flush(&mut BoxAsyncWrite): Send & Sync & !Unpin);
async_assert_fn!(tokio::io::AsyncWriteExt::shutdown(&mut BoxAsyncWrite): Send & Sync & !Unpin);
#[cfg(unix)]
mod unix_asyncfd {
use super::*;
use tokio::io::unix::*;
struct ImplsFd<T> {
_t: T,
}
impl<T> std::os::unix::io::AsRawFd for ImplsFd<T> {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
unreachable!()
}
}
assert_value!(AsyncFd<ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFd<ImplsFd<YN>>: Send & !Sync & Unpin);
assert_value!(AsyncFd<ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<YN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YY>>: Send & Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<YN>>: Send & !Sync & Unpin);
assert_value!(AsyncFdReadyMutGuard<'_, ImplsFd<NN>>: !Send & !Sync & Unpin);
assert_value!(TryIoError: Send & Sync & Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::readable_mut(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YY>>::writable_mut(_): Send & Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::readable_mut(_): Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<YN>>::writable_mut(_): Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::readable_mut(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable(_): !Send & !Sync & !Unpin);
async_assert_fn!(AsyncFd<ImplsFd<NN>>::writable_mut(_): !Send & !Sync & !Unpin);
}

50
vendor/tokio/tests/buffered.rs vendored Normal file
View file

@ -0,0 +1,50 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support bind()
use tokio::net::TcpListener;
use tokio_test::assert_ok;
use std::io::prelude::*;
use std::net::TcpStream;
use std::thread;
#[tokio::test]
async fn echo_server() {
const N: usize = 1024;
let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(srv.local_addr());
let msg = "foo bar baz";
let t = thread::spawn(move || {
let mut s = assert_ok!(TcpStream::connect(&addr));
let t2 = thread::spawn(move || {
let mut s = assert_ok!(TcpStream::connect(&addr));
let mut b = vec![0; msg.len() * N];
assert_ok!(s.read_exact(&mut b));
b
});
let mut expected = Vec::<u8>::new();
for _i in 0..N {
expected.extend(msg.as_bytes());
let res = assert_ok!(s.write(msg.as_bytes()));
assert_eq!(res, msg.len());
}
(expected, t2)
});
let (mut a, _) = assert_ok!(srv.accept().await);
let (mut b, _) = assert_ok!(srv.accept().await);
let n = assert_ok!(tokio::io::copy(&mut a, &mut b).await);
let (expected, t2) = t.join().unwrap();
let actual = t2.join().unwrap();
assert!(expected == actual);
assert_eq!(n, msg.len() as u64 * 1024);
}

20
vendor/tokio/tests/fs.rs vendored Normal file
View file

@ -0,0 +1,20 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations
use tokio::fs;
use tokio_test::assert_ok;
#[tokio::test]
async fn path_read_write() {
let temp = tempdir();
let dir = temp.path();
assert_ok!(fs::write(dir.join("bar"), b"bytes").await);
let out = assert_ok!(fs::read(dir.join("bar")).await);
assert_eq!(out, b"bytes");
}
fn tempdir() -> tempfile::TempDir {
tempfile::tempdir().unwrap()
}

39
vendor/tokio/tests/fs_copy.rs vendored Normal file
View file

@ -0,0 +1,39 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations
use tempfile::tempdir;
use tokio::fs;
#[tokio::test]
async fn copy() {
let dir = tempdir().unwrap();
let source_path = dir.path().join("foo.txt");
let dest_path = dir.path().join("bar.txt");
fs::write(&source_path, b"Hello File!").await.unwrap();
fs::copy(&source_path, &dest_path).await.unwrap();
let from = fs::read(&source_path).await.unwrap();
let to = fs::read(&dest_path).await.unwrap();
assert_eq!(from, to);
}
#[tokio::test]
async fn copy_permissions() {
let dir = tempdir().unwrap();
let from_path = dir.path().join("foo.txt");
let to_path = dir.path().join("bar.txt");
let from = tokio::fs::File::create(&from_path).await.unwrap();
let mut from_perms = from.metadata().await.unwrap().permissions();
from_perms.set_readonly(true);
from.set_permissions(from_perms.clone()).await.unwrap();
tokio::fs::copy(from_path, &to_path).await.unwrap();
let to_perms = tokio::fs::metadata(to_path).await.unwrap().permissions();
assert_eq!(from_perms, to_perms);
}

87
vendor/tokio/tests/fs_dir.rs vendored Normal file
View file

@ -0,0 +1,87 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support directory operations
use tokio::fs;
use tokio_test::{assert_err, assert_ok};
use std::sync::{Arc, Mutex};
use tempfile::tempdir;
#[tokio::test]
async fn create_dir() {
let base_dir = tempdir().unwrap();
let new_dir = base_dir.path().join("foo");
let new_dir_2 = new_dir.clone();
assert_ok!(fs::create_dir(new_dir).await);
assert!(new_dir_2.is_dir());
}
#[tokio::test]
async fn create_all() {
let base_dir = tempdir().unwrap();
let new_dir = base_dir.path().join("foo").join("bar");
let new_dir_2 = new_dir.clone();
assert_ok!(fs::create_dir_all(new_dir).await);
assert!(new_dir_2.is_dir());
}
#[tokio::test]
async fn build_dir() {
let base_dir = tempdir().unwrap();
let new_dir = base_dir.path().join("foo").join("bar");
let new_dir_2 = new_dir.clone();
assert_ok!(fs::DirBuilder::new().recursive(true).create(new_dir).await);
assert!(new_dir_2.is_dir());
assert_err!(
fs::DirBuilder::new()
.recursive(false)
.create(new_dir_2)
.await
);
}
#[tokio::test]
async fn remove() {
let base_dir = tempdir().unwrap();
let new_dir = base_dir.path().join("foo");
let new_dir_2 = new_dir.clone();
std::fs::create_dir(new_dir.clone()).unwrap();
assert_ok!(fs::remove_dir(new_dir).await);
assert!(!new_dir_2.exists());
}
#[tokio::test]
async fn read_inherent() {
let base_dir = tempdir().unwrap();
let p = base_dir.path();
std::fs::create_dir(p.join("aa")).unwrap();
std::fs::create_dir(p.join("bb")).unwrap();
std::fs::create_dir(p.join("cc")).unwrap();
let files = Arc::new(Mutex::new(Vec::new()));
let f = files.clone();
let p = p.to_path_buf();
let mut entries = fs::read_dir(p).await.unwrap();
while let Some(e) = assert_ok!(entries.next_entry().await) {
let s = e.file_name().to_str().unwrap().to_string();
f.lock().unwrap().push(s);
}
let mut files = files.lock().unwrap();
files.sort(); // because the order is not guaranteed
assert_eq!(
*files,
vec!["aa".to_string(), "bb".to_string(), "cc".to_string()]
);
}

112
vendor/tokio/tests/fs_file.rs vendored Normal file
View file

@ -0,0 +1,112 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations
use std::io::prelude::*;
use tempfile::NamedTempFile;
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom};
use tokio_test::task;
const HELLO: &[u8] = b"hello world...";
#[tokio::test]
async fn basic_read() {
let mut tempfile = tempfile();
tempfile.write_all(HELLO).unwrap();
let mut file = File::open(tempfile.path()).await.unwrap();
let mut buf = [0; 1024];
let n = file.read(&mut buf).await.unwrap();
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
#[tokio::test]
async fn basic_write() {
let tempfile = tempfile();
let mut file = File::create(tempfile.path()).await.unwrap();
file.write_all(HELLO).await.unwrap();
file.flush().await.unwrap();
let file = std::fs::read(tempfile.path()).unwrap();
assert_eq!(file, HELLO);
}
#[tokio::test]
async fn basic_write_and_shutdown() {
let tempfile = tempfile();
let mut file = File::create(tempfile.path()).await.unwrap();
file.write_all(HELLO).await.unwrap();
file.shutdown().await.unwrap();
let file = std::fs::read(tempfile.path()).unwrap();
assert_eq!(file, HELLO);
}
#[tokio::test]
async fn rewind_seek_position() {
let tempfile = tempfile();
let mut file = File::create(tempfile.path()).await.unwrap();
file.seek(SeekFrom::Current(10)).await.unwrap();
file.rewind().await.unwrap();
assert_eq!(file.stream_position().await.unwrap(), 0);
}
#[tokio::test]
async fn coop() {
let mut tempfile = tempfile();
tempfile.write_all(HELLO).unwrap();
let mut task = task::spawn(async {
let mut file = File::open(tempfile.path()).await.unwrap();
let mut buf = [0; 1024];
loop {
let _ = file.read(&mut buf).await.unwrap();
file.seek(std::io::SeekFrom::Start(0)).await.unwrap();
}
});
for _ in 0..1_000 {
if task.poll().is_pending() {
return;
}
}
panic!("did not yield");
}
fn tempfile() -> NamedTempFile {
NamedTempFile::new().unwrap()
}
#[tokio::test]
#[cfg(unix)]
async fn unix_fd() {
use std::os::unix::io::AsRawFd;
let tempfile = tempfile();
let file = File::create(tempfile.path()).await.unwrap();
assert!(file.as_raw_fd() as u64 > 0);
}
#[tokio::test]
#[cfg(windows)]
async fn windows_handle() {
use std::os::windows::io::AsRawHandle;
let tempfile = tempfile();
let file = File::create(tempfile.path()).await.unwrap();
assert!(file.as_raw_handle() as u64 > 0);
}

68
vendor/tokio/tests/fs_link.rs vendored Normal file
View file

@ -0,0 +1,68 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations
use tokio::fs;
use std::io::prelude::*;
use std::io::BufReader;
use tempfile::tempdir;
#[tokio::test]
async fn test_hard_link() {
let dir = tempdir().unwrap();
let src = dir.path().join("src.txt");
let dst = dir.path().join("dst.txt");
{
let mut file = std::fs::File::create(&src).unwrap();
file.write_all(b"hello").unwrap();
}
let dst_2 = dst.clone();
assert!(fs::hard_link(src, dst_2.clone()).await.is_ok());
let mut content = String::new();
{
let file = std::fs::File::open(dst).unwrap();
let mut reader = BufReader::new(file);
reader.read_to_string(&mut content).unwrap();
}
assert!(content == "hello");
}
#[cfg(unix)]
#[tokio::test]
async fn test_symlink() {
let dir = tempdir().unwrap();
let src = dir.path().join("src.txt");
let dst = dir.path().join("dst.txt");
{
let mut file = std::fs::File::create(&src).unwrap();
file.write_all(b"hello").unwrap();
}
let src_2 = src.clone();
let dst_2 = dst.clone();
assert!(fs::symlink(src_2.clone(), dst_2.clone()).await.is_ok());
let mut content = String::new();
{
let file = std::fs::File::open(dst.clone()).unwrap();
let mut reader = BufReader::new(file);
reader.read_to_string(&mut content).unwrap();
}
assert!(content == "hello");
let read = fs::read_link(dst.clone()).await.unwrap();
assert!(read == src);
let symlink_meta = fs::symlink_metadata(dst.clone()).await.unwrap();
assert!(symlink_meta.file_type().is_symlink());
}

601
vendor/tokio/tests/io_async_fd.rs vendored Normal file
View file

@ -0,0 +1,601 @@
#![warn(rust_2018_idioms)]
#![cfg(all(unix, feature = "full"))]
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::time::Duration;
use std::{
future::Future,
io::{self, ErrorKind, Read, Write},
task::{Context, Waker},
};
use nix::unistd::{close, read, write};
use futures::poll;
use tokio::io::unix::{AsyncFd, AsyncFdReadyGuard};
use tokio_test::{assert_err, assert_pending};
struct TestWaker {
inner: Arc<TestWakerInner>,
waker: Waker,
}
#[derive(Default)]
struct TestWakerInner {
awoken: AtomicBool,
}
impl futures::task::ArcWake for TestWakerInner {
fn wake_by_ref(arc_self: &Arc<Self>) {
arc_self.awoken.store(true, Ordering::SeqCst);
}
}
impl TestWaker {
fn new() -> Self {
let inner: Arc<TestWakerInner> = Default::default();
Self {
inner: inner.clone(),
waker: futures::task::waker(inner),
}
}
fn awoken(&self) -> bool {
self.inner.awoken.swap(false, Ordering::SeqCst)
}
fn context(&self) -> Context<'_> {
Context::from_waker(&self.waker)
}
}
#[derive(Debug)]
struct FileDescriptor {
fd: RawFd,
}
impl AsRawFd for FileDescriptor {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl Read for &FileDescriptor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
read(self.fd, buf).map_err(io::Error::from)
}
}
impl Read for FileDescriptor {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(self as &Self).read(buf)
}
}
impl Write for &FileDescriptor {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
write(self.fd, buf).map_err(io::Error::from)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl Write for FileDescriptor {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(self as &Self).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
(self as &Self).flush()
}
}
impl Drop for FileDescriptor {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
fn set_nonblocking(fd: RawFd) {
use nix::fcntl::{OFlag, F_GETFL, F_SETFL};
let flags = nix::fcntl::fcntl(fd, F_GETFL).expect("fcntl(F_GETFD)");
if flags < 0 {
panic!(
"bad return value from fcntl(F_GETFL): {} ({:?})",
flags,
nix::Error::last()
);
}
let flags = OFlag::from_bits_truncate(flags) | OFlag::O_NONBLOCK;
nix::fcntl::fcntl(fd, F_SETFL(flags)).expect("fcntl(F_SETFD)");
}
fn socketpair() -> (FileDescriptor, FileDescriptor) {
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
let (fd_a, fd_b) = socket::socketpair(
AddressFamily::Unix,
SockType::Stream,
None,
SockFlag::empty(),
)
.expect("socketpair");
let fds = (FileDescriptor { fd: fd_a }, FileDescriptor { fd: fd_b });
set_nonblocking(fds.0.fd);
set_nonblocking(fds.1.fd);
fds
}
fn drain(mut fd: &FileDescriptor) {
let mut buf = [0u8; 512];
loop {
match fd.read(&mut buf[..]) {
Err(e) if e.kind() == ErrorKind::WouldBlock => break,
Ok(0) => panic!("unexpected EOF"),
Err(e) => panic!("unexpected error: {:?}", e),
Ok(_) => continue,
}
}
}
#[tokio::test]
async fn initially_writable() {
let (a, b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
let afd_b = AsyncFd::new(b).unwrap();
afd_a.writable().await.unwrap().clear_ready();
afd_b.writable().await.unwrap().clear_ready();
tokio::select! {
biased;
_ = tokio::time::sleep(Duration::from_millis(10)) => {},
_ = afd_a.readable() => panic!("Unexpected readable state"),
_ = afd_b.readable() => panic!("Unexpected readable state"),
}
}
#[tokio::test]
async fn reset_readable() {
let (a, mut b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
let readable = afd_a.readable();
tokio::pin!(readable);
tokio::select! {
_ = readable.as_mut() => panic!(),
_ = tokio::time::sleep(Duration::from_millis(10)) => {}
}
b.write_all(b"0").unwrap();
let mut guard = readable.await.unwrap();
guard
.try_io(|_| afd_a.get_ref().read(&mut [0]))
.unwrap()
.unwrap();
// `a` is not readable, but the reactor still thinks it is
// (because we have not observed a not-ready error yet)
afd_a.readable().await.unwrap().retain_ready();
// Explicitly clear the ready state
guard.clear_ready();
let readable = afd_a.readable();
tokio::pin!(readable);
tokio::select! {
_ = readable.as_mut() => panic!(),
_ = tokio::time::sleep(Duration::from_millis(10)) => {}
}
b.write_all(b"0").unwrap();
// We can observe the new readable event
afd_a.readable().await.unwrap().clear_ready();
}
#[tokio::test]
async fn reset_writable() {
let (a, b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
let mut guard = afd_a.writable().await.unwrap();
// Write until we get a WouldBlock. This also clears the ready state.
while guard
.try_io(|_| afd_a.get_ref().write(&[0; 512][..]))
.is_ok()
{}
// Writable state should be cleared now.
let writable = afd_a.writable();
tokio::pin!(writable);
tokio::select! {
_ = writable.as_mut() => panic!(),
_ = tokio::time::sleep(Duration::from_millis(10)) => {}
}
// Read from the other side; we should become writable now.
drain(&b);
let _ = writable.await.unwrap();
}
#[derive(Debug)]
struct ArcFd<T>(Arc<T>);
impl<T: AsRawFd> AsRawFd for ArcFd<T> {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
#[tokio::test]
async fn drop_closes() {
let (a, mut b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
assert_eq!(
ErrorKind::WouldBlock,
b.read(&mut [0]).err().unwrap().kind()
);
std::mem::drop(afd_a);
assert_eq!(0, b.read(&mut [0]).unwrap());
// into_inner does not close the fd
let (a, mut b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
let _a: FileDescriptor = afd_a.into_inner();
assert_eq!(
ErrorKind::WouldBlock,
b.read(&mut [0]).err().unwrap().kind()
);
// Drop closure behavior is delegated to the inner object
let (a, mut b) = socketpair();
let arc_fd = Arc::new(a);
let afd_a = AsyncFd::new(ArcFd(arc_fd.clone())).unwrap();
std::mem::drop(afd_a);
assert_eq!(
ErrorKind::WouldBlock,
b.read(&mut [0]).err().unwrap().kind()
);
std::mem::drop(arc_fd); // suppress unnecessary clone clippy warning
}
#[tokio::test]
async fn reregister() {
let (a, _b) = socketpair();
let afd_a = AsyncFd::new(a).unwrap();
let a = afd_a.into_inner();
AsyncFd::new(a).unwrap();
}
#[tokio::test]
async fn try_io() {
let (a, mut b) = socketpair();
b.write_all(b"0").unwrap();
let afd_a = AsyncFd::new(a).unwrap();
let mut guard = afd_a.readable().await.unwrap();
afd_a.get_ref().read_exact(&mut [0]).unwrap();
// Should not clear the readable state
let _ = guard.try_io(|_| Ok(()));
// Still readable...
let _ = afd_a.readable().await.unwrap();
// Should clear the readable state
let _ = guard.try_io(|_| io::Result::<()>::Err(ErrorKind::WouldBlock.into()));
// Assert not readable
let readable = afd_a.readable();
tokio::pin!(readable);
tokio::select! {
_ = readable.as_mut() => panic!(),
_ = tokio::time::sleep(Duration::from_millis(10)) => {}
}
// Write something down b again and make sure we're reawoken
b.write_all(b"0").unwrap();
let _ = readable.await.unwrap();
}
#[tokio::test]
async fn multiple_waiters() {
let (a, mut b) = socketpair();
let afd_a = Arc::new(AsyncFd::new(a).unwrap());
let barrier = Arc::new(tokio::sync::Barrier::new(11));
let mut tasks = Vec::new();
for _ in 0..10 {
let afd_a = afd_a.clone();
let barrier = barrier.clone();
let f = async move {
let notify_barrier = async {
barrier.wait().await;
futures::future::pending::<()>().await;
};
tokio::select! {
biased;
guard = afd_a.readable() => {
tokio::task::yield_now().await;
guard.unwrap().clear_ready()
},
_ = notify_barrier => unreachable!(),
}
std::mem::drop(afd_a);
};
tasks.push(tokio::spawn(f));
}
let mut all_tasks = futures::future::try_join_all(tasks);
tokio::select! {
r = std::pin::Pin::new(&mut all_tasks) => {
r.unwrap(); // propagate panic
panic!("Tasks exited unexpectedly")
},
_ = barrier.wait() => {}
};
b.write_all(b"0").unwrap();
all_tasks.await.unwrap();
}
#[tokio::test]
async fn poll_fns() {
let (a, b) = socketpair();
let afd_a = Arc::new(AsyncFd::new(a).unwrap());
let afd_b = Arc::new(AsyncFd::new(b).unwrap());
// Fill up the write side of A
while afd_a.get_ref().write(&[0; 512]).is_ok() {}
let waker = TestWaker::new();
assert_pending!(afd_a.as_ref().poll_read_ready(&mut waker.context()));
let afd_a_2 = afd_a.clone();
let r_barrier = Arc::new(tokio::sync::Barrier::new(2));
let barrier_clone = r_barrier.clone();
let read_fut = tokio::spawn(async move {
// Move waker onto this task first
assert_pending!(poll!(futures::future::poll_fn(|cx| afd_a_2
.as_ref()
.poll_read_ready(cx))));
barrier_clone.wait().await;
let _ = futures::future::poll_fn(|cx| afd_a_2.as_ref().poll_read_ready(cx)).await;
});
let afd_a_2 = afd_a.clone();
let w_barrier = Arc::new(tokio::sync::Barrier::new(2));
let barrier_clone = w_barrier.clone();
let mut write_fut = tokio::spawn(async move {
// Move waker onto this task first
assert_pending!(poll!(futures::future::poll_fn(|cx| afd_a_2
.as_ref()
.poll_write_ready(cx))));
barrier_clone.wait().await;
let _ = futures::future::poll_fn(|cx| afd_a_2.as_ref().poll_write_ready(cx)).await;
});
r_barrier.wait().await;
w_barrier.wait().await;
let readable = afd_a.readable();
tokio::pin!(readable);
tokio::select! {
_ = &mut readable => unreachable!(),
_ = tokio::task::yield_now() => {}
}
// Make A readable. We expect that 'readable' and 'read_fut' will both complete quickly
afd_b.get_ref().write_all(b"0").unwrap();
let _ = tokio::join!(readable, read_fut);
// Our original waker should _not_ be awoken (poll_read_ready retains only the last context)
assert!(!waker.awoken());
// The writable side should not be awoken
tokio::select! {
_ = &mut write_fut => unreachable!(),
_ = tokio::time::sleep(Duration::from_millis(5)) => {}
}
// Make it writable now
drain(afd_b.get_ref());
// now we should be writable (ie - the waker for poll_write should still be registered after we wake the read side)
let _ = write_fut.await;
}
fn assert_pending<T: std::fmt::Debug, F: Future<Output = T>>(f: F) -> std::pin::Pin<Box<F>> {
let mut pinned = Box::pin(f);
assert_pending!(pinned
.as_mut()
.poll(&mut Context::from_waker(futures::task::noop_waker_ref())));
pinned
}
fn rt() -> tokio::runtime::Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
#[test]
fn driver_shutdown_wakes_currently_pending() {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
let readable = assert_pending(afd_a.readable());
std::mem::drop(rt);
// The future was initialized **before** dropping the rt
assert_err!(futures::executor::block_on(readable));
// The future is initialized **after** dropping the rt.
assert_err!(futures::executor::block_on(afd_a.readable()));
}
#[test]
fn driver_shutdown_wakes_future_pending() {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
std::mem::drop(rt);
assert_err!(futures::executor::block_on(afd_a.readable()));
}
#[test]
fn driver_shutdown_wakes_pending_race() {
// TODO: make this a loom test
for _ in 0..100 {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
let _ = std::thread::spawn(move || std::mem::drop(rt));
// This may or may not return an error (but will be awoken)
let _ = futures::executor::block_on(afd_a.readable());
// However retrying will always return an error
assert_err!(futures::executor::block_on(afd_a.readable()));
}
}
async fn poll_readable<T: AsRawFd>(fd: &AsyncFd<T>) -> std::io::Result<AsyncFdReadyGuard<'_, T>> {
futures::future::poll_fn(|cx| fd.poll_read_ready(cx)).await
}
async fn poll_writable<T: AsRawFd>(fd: &AsyncFd<T>) -> std::io::Result<AsyncFdReadyGuard<'_, T>> {
futures::future::poll_fn(|cx| fd.poll_write_ready(cx)).await
}
#[test]
fn driver_shutdown_wakes_currently_pending_polls() {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
while afd_a.get_ref().write(&[0; 512]).is_ok() {} // make not writable
let readable = assert_pending(poll_readable(&afd_a));
let writable = assert_pending(poll_writable(&afd_a));
std::mem::drop(rt);
// Attempting to poll readiness when the rt is dropped is an error
assert_err!(futures::executor::block_on(readable));
assert_err!(futures::executor::block_on(writable));
}
#[test]
fn driver_shutdown_wakes_poll() {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
std::mem::drop(rt);
assert_err!(futures::executor::block_on(poll_readable(&afd_a)));
assert_err!(futures::executor::block_on(poll_writable(&afd_a)));
}
#[test]
fn driver_shutdown_wakes_poll_race() {
// TODO: make this a loom test
for _ in 0..100 {
let rt = rt();
let (a, _b) = socketpair();
let afd_a = {
let _enter = rt.enter();
AsyncFd::new(a).unwrap()
};
while afd_a.get_ref().write(&[0; 512]).is_ok() {} // make not writable
let _ = std::thread::spawn(move || std::mem::drop(rt));
// The poll variants will always return an error in this case
assert_err!(futures::executor::block_on(poll_readable(&afd_a)));
assert_err!(futures::executor::block_on(poll_writable(&afd_a)));
}
}

10
vendor/tokio/tests/io_async_read.rs vendored Normal file
View file

@ -0,0 +1,10 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::AsyncRead;
#[test]
fn assert_obj_safe() {
fn _assert<T>() {}
_assert::<Box<dyn AsyncRead>>();
}

379
vendor/tokio/tests/io_buf_reader.rs vendored Normal file
View file

@ -0,0 +1,379 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
// https://github.com/rust-lang/futures-rs/blob/1803948ff091b4eabf7f3bf39e16bbbdefca5cc8/futures/tests/io_buf_reader.rs
use futures::task::{noop_waker_ref, Context, Poll};
use std::cmp;
use std::io::{self, Cursor};
use std::pin::Pin;
use tokio::io::{
AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt,
BufReader, ReadBuf, SeekFrom,
};
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
macro_rules! run_fill_buf {
($reader:expr) => {{
let mut cx = Context::from_waker(noop_waker_ref());
loop {
if let Poll::Ready(x) = Pin::new(&mut $reader).poll_fill_buf(&mut cx) {
break x;
}
}
}};
}
struct MaybePending<'a> {
inner: &'a [u8],
ready_read: bool,
ready_fill_buf: bool,
}
impl<'a> MaybePending<'a> {
fn new(inner: &'a [u8]) -> Self {
Self {
inner,
ready_read: false,
ready_fill_buf: false,
}
}
}
impl AsyncRead for MaybePending<'_> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if self.ready_read {
self.ready_read = false;
Pin::new(&mut self.inner).poll_read(cx, buf)
} else {
self.ready_read = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
impl AsyncBufRead for MaybePending<'_> {
fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<&[u8]>> {
if self.ready_fill_buf {
self.ready_fill_buf = false;
if self.inner.is_empty() {
return Poll::Ready(Ok(&[]));
}
let len = cmp::min(2, self.inner.len());
Poll::Ready(Ok(&self.inner[0..len]))
} else {
self.ready_fill_buf = true;
Poll::Pending
}
}
fn consume(mut self: Pin<&mut Self>, amt: usize) {
self.inner = &self.inner[amt..];
}
}
#[tokio::test]
async fn test_buffered_reader() {
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, inner);
let mut buf = [0, 0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 3);
assert_eq!(buf, [5, 6, 7]);
assert_eq!(reader.buffer(), []);
let mut buf = [0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 2);
assert_eq!(buf, [0, 1]);
assert_eq!(reader.buffer(), []);
let mut buf = [0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [2]);
assert_eq!(reader.buffer(), [3]);
let mut buf = [0, 0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [3, 0, 0]);
assert_eq!(reader.buffer(), []);
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [4, 0, 0]);
assert_eq!(reader.buffer(), []);
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
}
#[tokio::test]
async fn test_buffered_reader_seek() {
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, Cursor::new(inner));
assert_eq!(reader.seek(SeekFrom::Start(3)).await.unwrap(), 3);
assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err());
assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4);
assert_eq!(run_fill_buf!(reader).unwrap(), &[1, 2][..]);
Pin::new(&mut reader).consume(1);
assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3);
}
#[tokio::test]
async fn test_buffered_reader_seek_underflow() {
// gimmick reader that yields its position modulo 256 for each byte
struct PositionReader {
pos: u64,
}
impl AsyncRead for PositionReader {
fn poll_read(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let b = buf.initialize_unfilled();
let len = b.len();
for x in b {
*x = self.pos as u8;
self.pos = self.pos.wrapping_add(1);
}
buf.advance(len);
Poll::Ready(Ok(()))
}
}
impl AsyncSeek for PositionReader {
fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
match pos {
SeekFrom::Start(n) => {
self.pos = n;
}
SeekFrom::Current(n) => {
self.pos = self.pos.wrapping_add(n as u64);
}
SeekFrom::End(n) => {
self.pos = u64::MAX.wrapping_add(n as u64);
}
}
Ok(())
}
fn poll_complete(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<u64>> {
Poll::Ready(Ok(self.pos))
}
}
let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 });
assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1, 2, 3, 4][..]);
assert_eq!(reader.seek(SeekFrom::End(-5)).await.unwrap(), u64::MAX - 5);
assert_eq!(run_fill_buf!(reader).unwrap().len(), 5);
// the following seek will require two underlying seeks
let expected = 9_223_372_036_854_775_802;
assert_eq!(
reader.seek(SeekFrom::Current(i64::MIN)).await.unwrap(),
expected
);
assert_eq!(run_fill_buf!(reader).unwrap().len(), 5);
// seeking to 0 should empty the buffer.
assert_eq!(reader.seek(SeekFrom::Current(0)).await.unwrap(), expected);
assert_eq!(reader.get_ref().pos, expected);
}
#[tokio::test]
async fn test_short_reads() {
/// A dummy reader intended at testing short-reads propagation.
struct ShortReader {
lengths: Vec<usize>,
}
impl AsyncRead for ShortReader {
fn poll_read(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if !self.lengths.is_empty() {
buf.advance(self.lengths.remove(0));
}
Poll::Ready(Ok(()))
}
}
let inner = ShortReader {
lengths: vec![0, 1, 2, 0, 1, 0],
};
let mut reader = BufReader::new(inner);
let mut buf = [0, 0];
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
assert_eq!(reader.read(&mut buf).await.unwrap(), 2);
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
assert_eq!(reader.read(&mut buf).await.unwrap(), 1);
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
}
#[tokio::test]
async fn maybe_pending() {
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, MaybePending::new(inner));
let mut buf = [0, 0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 3);
assert_eq!(buf, [5, 6, 7]);
assert_eq!(reader.buffer(), []);
let mut buf = [0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 2);
assert_eq!(buf, [0, 1]);
assert_eq!(reader.buffer(), []);
let mut buf = [0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [2]);
assert_eq!(reader.buffer(), [3]);
let mut buf = [0, 0, 0];
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [3, 0, 0]);
assert_eq!(reader.buffer(), []);
let nread = reader.read(&mut buf).await.unwrap();
assert_eq!(nread, 1);
assert_eq!(buf, [4, 0, 0]);
assert_eq!(reader.buffer(), []);
assert_eq!(reader.read(&mut buf).await.unwrap(), 0);
}
#[tokio::test]
async fn maybe_pending_buf_read() {
let inner = MaybePending::new(&[0, 1, 2, 3, 1, 0]);
let mut reader = BufReader::with_capacity(2, inner);
let mut v = Vec::new();
reader.read_until(3, &mut v).await.unwrap();
assert_eq!(v, [0, 1, 2, 3]);
v.clear();
reader.read_until(1, &mut v).await.unwrap();
assert_eq!(v, [1]);
v.clear();
reader.read_until(8, &mut v).await.unwrap();
assert_eq!(v, [0]);
v.clear();
reader.read_until(9, &mut v).await.unwrap();
assert_eq!(v, []);
}
// https://github.com/rust-lang/futures-rs/pull/1573#discussion_r281162309
#[tokio::test]
async fn maybe_pending_seek() {
struct MaybePendingSeek<'a> {
inner: Cursor<&'a [u8]>,
ready: bool,
seek_res: Option<io::Result<()>>,
}
impl<'a> MaybePendingSeek<'a> {
fn new(inner: &'a [u8]) -> Self {
Self {
inner: Cursor::new(inner),
ready: true,
seek_res: None,
}
}
}
impl AsyncRead for MaybePendingSeek<'_> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl AsyncBufRead for MaybePendingSeek<'_> {
fn poll_fill_buf(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<&[u8]>> {
let this: *mut Self = &mut *self as *mut _;
Pin::new(&mut unsafe { &mut *this }.inner).poll_fill_buf(cx)
}
fn consume(mut self: Pin<&mut Self>, amt: usize) {
Pin::new(&mut self.inner).consume(amt)
}
}
impl AsyncSeek for MaybePendingSeek<'_> {
fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
self.seek_res = Some(Pin::new(&mut self.inner).start_seek(pos));
Ok(())
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
if self.ready {
self.ready = false;
self.seek_res.take().unwrap_or(Ok(()))?;
Pin::new(&mut self.inner).poll_complete(cx)
} else {
self.ready = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4];
let mut reader = BufReader::with_capacity(2, MaybePendingSeek::new(inner));
assert_eq!(reader.seek(SeekFrom::Current(3)).await.unwrap(), 3);
assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err());
assert_eq!(run_fill_buf!(reader).unwrap(), &[0, 1][..]);
assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4);
assert_eq!(run_fill_buf!(reader).unwrap(), &[1, 2][..]);
Pin::new(&mut reader).consume(1);
assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3);
}
// This tests the AsyncBufReadExt::fill_buf wrapper.
#[tokio::test]
async fn test_fill_buf_wrapper() {
let (mut write, read) = tokio::io::duplex(16);
let mut read = BufReader::new(read);
write.write_all(b"hello world").await.unwrap();
assert_eq!(read.fill_buf().await.unwrap(), b"hello world");
read.consume(b"hello ".len());
assert_eq!(read.fill_buf().await.unwrap(), b"world");
assert_eq!(read.fill_buf().await.unwrap(), b"world");
read.consume(b"world".len());
let mut fill = spawn(read.fill_buf());
assert_pending!(fill.poll());
write.write_all(b"foo bar").await.unwrap();
assert_eq!(assert_ready!(fill.poll()).unwrap(), b"foo bar");
drop(fill);
drop(write);
assert_eq!(read.fill_buf().await.unwrap(), b"foo bar");
read.consume(b"foo bar".len());
assert_eq!(read.fill_buf().await.unwrap(), b"");
}

537
vendor/tokio/tests/io_buf_writer.rs vendored Normal file
View file

@ -0,0 +1,537 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
// https://github.com/rust-lang/futures-rs/blob/1803948ff091b4eabf7f3bf39e16bbbdefca5cc8/futures/tests/io_buf_writer.rs
use futures::task::{Context, Poll};
use std::io::{self, Cursor};
use std::pin::Pin;
use tokio::io::{AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufWriter, SeekFrom};
use futures::future;
use tokio_test::assert_ok;
use std::cmp;
use std::io::IoSlice;
mod support {
pub(crate) mod io_vec;
}
use support::io_vec::IoBufs;
struct MaybePending {
inner: Vec<u8>,
ready: bool,
}
impl MaybePending {
fn new(inner: Vec<u8>) -> Self {
Self {
inner,
ready: false,
}
}
}
impl AsyncWrite for MaybePending {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.ready {
self.ready = false;
Pin::new(&mut self.inner).poll_write(cx, buf)
} else {
self.ready = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
async fn write_vectored<W>(writer: &mut W, bufs: &[IoSlice<'_>]) -> io::Result<usize>
where
W: AsyncWrite + Unpin,
{
let mut writer = Pin::new(writer);
future::poll_fn(|cx| writer.as_mut().poll_write_vectored(cx, bufs)).await
}
#[tokio::test]
async fn buf_writer() {
let mut writer = BufWriter::with_capacity(2, Vec::new());
assert_eq!(writer.write(&[0, 1]).await.unwrap(), 2);
assert_eq!(writer.buffer(), []);
assert_eq!(*writer.get_ref(), [0, 1]);
assert_eq!(writer.write(&[2]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [2]);
assert_eq!(*writer.get_ref(), [0, 1]);
assert_eq!(writer.write(&[3]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [2, 3]);
assert_eq!(*writer.get_ref(), [0, 1]);
writer.flush().await.unwrap();
assert_eq!(writer.buffer(), []);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
assert_eq!(writer.write(&[4]).await.unwrap(), 1);
assert_eq!(writer.write(&[5]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [4, 5]);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3]);
assert_eq!(writer.write(&[6]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [6]);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]);
assert_eq!(writer.write(&[7, 8]).await.unwrap(), 2);
assert_eq!(writer.buffer(), []);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(writer.write(&[9, 10, 11]).await.unwrap(), 3);
assert_eq!(writer.buffer(), []);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
writer.flush().await.unwrap();
assert_eq!(writer.buffer(), []);
assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
}
#[tokio::test]
async fn buf_writer_inner_flushes() {
let mut w = BufWriter::with_capacity(3, Vec::new());
assert_eq!(w.write(&[0, 1]).await.unwrap(), 2);
assert_eq!(*w.get_ref(), []);
w.flush().await.unwrap();
let w = w.into_inner();
assert_eq!(w, [0, 1]);
}
#[tokio::test]
async fn buf_writer_seek() {
let mut w = BufWriter::with_capacity(3, Cursor::new(Vec::new()));
w.write_all(&[0, 1, 2, 3, 4, 5]).await.unwrap();
w.write_all(&[6, 7]).await.unwrap();
assert_eq!(w.seek(SeekFrom::Current(0)).await.unwrap(), 8);
assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]);
assert_eq!(w.seek(SeekFrom::Start(2)).await.unwrap(), 2);
w.write_all(&[8, 9]).await.unwrap();
w.flush().await.unwrap();
assert_eq!(&w.into_inner().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]);
}
#[tokio::test]
async fn maybe_pending_buf_writer() {
let mut writer = BufWriter::with_capacity(2, MaybePending::new(Vec::new()));
assert_eq!(writer.write(&[0, 1]).await.unwrap(), 2);
assert_eq!(writer.buffer(), []);
assert_eq!(&writer.get_ref().inner, &[0, 1]);
assert_eq!(writer.write(&[2]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [2]);
assert_eq!(&writer.get_ref().inner, &[0, 1]);
assert_eq!(writer.write(&[3]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [2, 3]);
assert_eq!(&writer.get_ref().inner, &[0, 1]);
writer.flush().await.unwrap();
assert_eq!(writer.buffer(), []);
assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
assert_eq!(writer.write(&[4]).await.unwrap(), 1);
assert_eq!(writer.write(&[5]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [4, 5]);
assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]);
assert_eq!(writer.write(&[6]).await.unwrap(), 1);
assert_eq!(writer.buffer(), [6]);
assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5]);
assert_eq!(writer.write(&[7, 8]).await.unwrap(), 2);
assert_eq!(writer.buffer(), []);
assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(writer.write(&[9, 10, 11]).await.unwrap(), 3);
assert_eq!(writer.buffer(), []);
assert_eq!(
writer.get_ref().inner,
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
);
writer.flush().await.unwrap();
assert_eq!(writer.buffer(), []);
assert_eq!(
&writer.get_ref().inner,
&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
);
}
#[tokio::test]
async fn maybe_pending_buf_writer_inner_flushes() {
let mut w = BufWriter::with_capacity(3, MaybePending::new(Vec::new()));
assert_eq!(w.write(&[0, 1]).await.unwrap(), 2);
assert_eq!(&w.get_ref().inner, &[]);
w.flush().await.unwrap();
let w = w.into_inner().inner;
assert_eq!(w, [0, 1]);
}
#[tokio::test]
async fn maybe_pending_buf_writer_seek() {
struct MaybePendingSeek {
inner: Cursor<Vec<u8>>,
ready_write: bool,
ready_seek: bool,
seek_res: Option<io::Result<()>>,
}
impl MaybePendingSeek {
fn new(inner: Vec<u8>) -> Self {
Self {
inner: Cursor::new(inner),
ready_write: false,
ready_seek: false,
seek_res: None,
}
}
}
impl AsyncWrite for MaybePendingSeek {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.ready_write {
self.ready_write = false;
Pin::new(&mut self.inner).poll_write(cx, buf)
} else {
self.ready_write = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
impl AsyncSeek for MaybePendingSeek {
fn start_seek(mut self: Pin<&mut Self>, pos: SeekFrom) -> io::Result<()> {
self.seek_res = Some(Pin::new(&mut self.inner).start_seek(pos));
Ok(())
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
if self.ready_seek {
self.ready_seek = false;
self.seek_res.take().unwrap_or(Ok(()))?;
Pin::new(&mut self.inner).poll_complete(cx)
} else {
self.ready_seek = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
let mut w = BufWriter::with_capacity(3, MaybePendingSeek::new(Vec::new()));
w.write_all(&[0, 1, 2, 3, 4, 5]).await.unwrap();
w.write_all(&[6, 7]).await.unwrap();
assert_eq!(w.seek(SeekFrom::Current(0)).await.unwrap(), 8);
assert_eq!(
&w.get_ref().inner.get_ref()[..],
&[0, 1, 2, 3, 4, 5, 6, 7][..]
);
assert_eq!(w.seek(SeekFrom::Start(2)).await.unwrap(), 2);
w.write_all(&[8, 9]).await.unwrap();
w.flush().await.unwrap();
assert_eq!(
&w.into_inner().inner.into_inner()[..],
&[0, 1, 8, 9, 4, 5, 6, 7]
);
}
struct MockWriter {
data: Vec<u8>,
write_len: usize,
vectored: bool,
}
impl MockWriter {
fn new(write_len: usize) -> Self {
MockWriter {
data: Vec::new(),
write_len,
vectored: false,
}
}
fn vectored(write_len: usize) -> Self {
MockWriter {
data: Vec::new(),
write_len,
vectored: true,
}
}
fn write_up_to(&mut self, buf: &[u8], limit: usize) -> usize {
let len = cmp::min(buf.len(), limit);
self.data.extend_from_slice(&buf[..len]);
len
}
}
impl AsyncWrite for MockWriter {
fn poll_write(
self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
let this = self.get_mut();
let n = this.write_up_to(buf, this.write_len);
Ok(n).into()
}
fn poll_write_vectored(
self: Pin<&mut Self>,
_: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
let this = self.get_mut();
let mut total_written = 0;
for buf in bufs {
let n = this.write_up_to(buf, this.write_len - total_written);
total_written += n;
if total_written == this.write_len {
break;
}
}
Ok(total_written).into()
}
fn is_write_vectored(&self) -> bool {
self.vectored
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Ok(()).into()
}
}
#[tokio::test]
async fn write_vectored_empty_on_non_vectored() {
let mut w = BufWriter::new(MockWriter::new(4));
let n = assert_ok!(write_vectored(&mut w, &[]).await);
assert_eq!(n, 0);
let io_vec = [IoSlice::new(&[]); 3];
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 0);
assert_ok!(w.flush().await);
assert!(w.get_ref().data.is_empty());
}
#[tokio::test]
async fn write_vectored_empty_on_vectored() {
let mut w = BufWriter::new(MockWriter::vectored(4));
let n = assert_ok!(write_vectored(&mut w, &[]).await);
assert_eq!(n, 0);
let io_vec = [IoSlice::new(&[]); 3];
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 0);
assert_ok!(w.flush().await);
assert!(w.get_ref().data.is_empty());
}
#[tokio::test]
async fn write_vectored_basic_on_non_vectored() {
let msg = b"foo bar baz";
let bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&msg[4..8]),
IoSlice::new(&msg[8..]),
];
let mut w = BufWriter::new(MockWriter::new(4));
let n = assert_ok!(write_vectored(&mut w, &bufs).await);
assert_eq!(n, msg.len());
assert!(w.buffer() == &msg[..]);
assert_ok!(w.flush().await);
assert_eq!(w.get_ref().data, msg);
}
#[tokio::test]
async fn write_vectored_basic_on_vectored() {
let msg = b"foo bar baz";
let bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&msg[4..8]),
IoSlice::new(&msg[8..]),
];
let mut w = BufWriter::new(MockWriter::vectored(4));
let n = assert_ok!(write_vectored(&mut w, &bufs).await);
assert_eq!(n, msg.len());
assert!(w.buffer() == &msg[..]);
assert_ok!(w.flush().await);
assert_eq!(w.get_ref().data, msg);
}
#[tokio::test]
async fn write_vectored_large_total_on_non_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&msg[4..8]),
IoSlice::new(&msg[8..]),
];
let io_vec = IoBufs::new(&mut bufs);
let mut w = BufWriter::with_capacity(8, MockWriter::new(4));
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 8);
assert!(w.buffer() == &msg[..8]);
let io_vec = io_vec.advance(n);
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 3);
assert!(w.get_ref().data.as_slice() == &msg[..8]);
assert!(w.buffer() == &msg[8..]);
}
#[tokio::test]
async fn write_vectored_large_total_on_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&msg[4..8]),
IoSlice::new(&msg[8..]),
];
let io_vec = IoBufs::new(&mut bufs);
let mut w = BufWriter::with_capacity(8, MockWriter::vectored(10));
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 10);
assert!(w.buffer().is_empty());
let io_vec = io_vec.advance(n);
let n = assert_ok!(write_vectored(&mut w, &io_vec).await);
assert_eq!(n, 1);
assert!(w.get_ref().data.as_slice() == &msg[..10]);
assert!(w.buffer() == &msg[10..]);
}
struct VectoredWriteHarness {
writer: BufWriter<MockWriter>,
buf_capacity: usize,
}
impl VectoredWriteHarness {
fn new(buf_capacity: usize) -> Self {
VectoredWriteHarness {
writer: BufWriter::with_capacity(buf_capacity, MockWriter::new(4)),
buf_capacity,
}
}
fn with_vectored_backend(buf_capacity: usize) -> Self {
VectoredWriteHarness {
writer: BufWriter::with_capacity(buf_capacity, MockWriter::vectored(4)),
buf_capacity,
}
}
async fn write_all<'a, 'b>(&mut self, mut io_vec: IoBufs<'a, 'b>) -> usize {
let mut total_written = 0;
while !io_vec.is_empty() {
let n = assert_ok!(write_vectored(&mut self.writer, &io_vec).await);
assert!(n != 0);
assert!(self.writer.buffer().len() <= self.buf_capacity);
total_written += n;
io_vec = io_vec.advance(n);
}
total_written
}
async fn flush(&mut self) -> &[u8] {
assert_ok!(self.writer.flush().await);
&self.writer.get_ref().data
}
}
#[tokio::test]
async fn write_vectored_odd_on_non_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&[]),
IoSlice::new(&msg[4..9]),
IoSlice::new(&msg[9..]),
];
let mut h = VectoredWriteHarness::new(8);
let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
assert_eq!(bytes_written, msg.len());
assert_eq!(h.flush().await, msg);
}
#[tokio::test]
async fn write_vectored_odd_on_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&msg[0..4]),
IoSlice::new(&[]),
IoSlice::new(&msg[4..9]),
IoSlice::new(&msg[9..]),
];
let mut h = VectoredWriteHarness::with_vectored_backend(8);
let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
assert_eq!(bytes_written, msg.len());
assert_eq!(h.flush().await, msg);
}
#[tokio::test]
async fn write_vectored_large_slice_on_non_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&[]),
IoSlice::new(&msg[..9]),
IoSlice::new(&msg[9..]),
];
let mut h = VectoredWriteHarness::new(8);
let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
assert_eq!(bytes_written, msg.len());
assert_eq!(h.flush().await, msg);
}
#[tokio::test]
async fn write_vectored_large_slice_on_vectored() {
let msg = b"foo bar baz";
let mut bufs = [
IoSlice::new(&[]),
IoSlice::new(&msg[..9]),
IoSlice::new(&msg[9..]),
];
let mut h = VectoredWriteHarness::with_vectored_backend(8);
let bytes_written = h.write_all(IoBufs::new(&mut bufs)).await;
assert_eq!(bytes_written, msg.len());
assert_eq!(h.flush().await, msg);
}

16
vendor/tokio/tests/io_chain.rs vendored Normal file
View file

@ -0,0 +1,16 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::AsyncReadExt;
use tokio_test::assert_ok;
#[tokio::test]
async fn chain() {
let mut buf = Vec::new();
let rd1: &[u8] = b"hello ";
let rd2: &[u8] = b"world";
let mut rd = rd1.chain(rd2);
assert_ok!(rd.read_to_end(&mut buf).await);
assert_eq!(buf, b"hello world");
}

87
vendor/tokio/tests/io_copy.rs vendored Normal file
View file

@ -0,0 +1,87 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use bytes::BytesMut;
use futures::ready;
use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio_test::assert_ok;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn copy() {
struct Rd(bool);
impl AsyncRead for Rd {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if self.0 {
buf.put_slice(b"hello world");
self.0 = false;
Poll::Ready(Ok(()))
} else {
Poll::Ready(Ok(()))
}
}
}
let mut rd = Rd(true);
let mut wr = Vec::new();
let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
assert_eq!(n, 11);
assert_eq!(wr, b"hello world");
}
#[tokio::test]
async fn proxy() {
struct BufferedWd {
buf: BytesMut,
writer: io::DuplexStream,
}
impl AsyncWrite for BufferedWd {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.get_mut().buf.extend_from_slice(buf);
Poll::Ready(Ok(buf.len()))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let this = self.get_mut();
while !this.buf.is_empty() {
let n = ready!(Pin::new(&mut this.writer).poll_write(cx, &this.buf))?;
let _ = this.buf.split_to(n);
}
Pin::new(&mut this.writer).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.writer).poll_shutdown(cx)
}
}
let (rd, wd) = io::duplex(1024);
let mut rd = rd.take(1024);
let mut wd = BufferedWd {
buf: BytesMut::new(),
writer: wd,
};
// write start bytes
assert_ok!(wd.write_all(&[0x42; 512]).await);
assert_ok!(wd.flush().await);
let n = assert_ok!(io::copy(&mut rd, &mut wd).await);
assert_eq!(n, 1024);
}

View file

@ -0,0 +1,140 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support bind()
use std::time::Duration;
use tokio::io::{self, copy_bidirectional, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::task::JoinHandle;
async fn make_socketpair() -> (TcpStream, TcpStream) {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let connector = TcpStream::connect(addr);
let acceptor = listener.accept();
let (c1, c2) = tokio::join!(connector, acceptor);
(c1.unwrap(), c2.unwrap().0)
}
async fn block_write(s: &mut TcpStream) -> usize {
static BUF: [u8; 2048] = [0; 2048];
let mut copied = 0;
loop {
tokio::select! {
result = s.write(&BUF) => {
copied += result.expect("write error")
},
_ = tokio::time::sleep(Duration::from_millis(10)) => {
break;
}
}
}
copied
}
async fn symmetric<F, Fut>(mut cb: F)
where
F: FnMut(JoinHandle<io::Result<(u64, u64)>>, TcpStream, TcpStream) -> Fut,
Fut: std::future::Future<Output = ()>,
{
// We run the test twice, with streams passed to copy_bidirectional in
// different orders, in order to ensure that the two arguments are
// interchangeable.
let (a, mut a1) = make_socketpair().await;
let (b, mut b1) = make_socketpair().await;
let handle = tokio::spawn(async move { copy_bidirectional(&mut a1, &mut b1).await });
cb(handle, a, b).await;
let (a, mut a1) = make_socketpair().await;
let (b, mut b1) = make_socketpair().await;
let handle = tokio::spawn(async move { copy_bidirectional(&mut b1, &mut a1).await });
cb(handle, b, a).await;
}
#[tokio::test]
async fn test_basic_transfer() {
symmetric(|_handle, mut a, mut b| async move {
a.write_all(b"test").await.unwrap();
let mut tmp = [0; 4];
b.read_exact(&mut tmp).await.unwrap();
assert_eq!(&tmp[..], b"test");
})
.await
}
#[tokio::test]
async fn test_transfer_after_close() {
symmetric(|handle, mut a, mut b| async move {
AsyncWriteExt::shutdown(&mut a).await.unwrap();
b.read_to_end(&mut Vec::new()).await.unwrap();
b.write_all(b"quux").await.unwrap();
let mut tmp = [0; 4];
a.read_exact(&mut tmp).await.unwrap();
assert_eq!(&tmp[..], b"quux");
// Once both are closed, we should have our handle back
drop(b);
assert_eq!(handle.await.unwrap().unwrap(), (0, 4));
})
.await
}
#[tokio::test]
async fn blocking_one_side_does_not_block_other() {
symmetric(|handle, mut a, mut b| async move {
block_write(&mut a).await;
b.write_all(b"quux").await.unwrap();
let mut tmp = [0; 4];
a.read_exact(&mut tmp).await.unwrap();
assert_eq!(&tmp[..], b"quux");
AsyncWriteExt::shutdown(&mut a).await.unwrap();
let mut buf = Vec::new();
b.read_to_end(&mut buf).await.unwrap();
drop(b);
assert_eq!(handle.await.unwrap().unwrap(), (buf.len() as u64, 4));
})
.await
}
#[tokio::test]
async fn immediate_exit_on_write_error() {
let payload = b"here, take this";
let error = || io::Error::new(io::ErrorKind::Other, "no thanks!");
let mut a = tokio_test::io::Builder::new()
.read(payload)
.write_error(error())
.build();
let mut b = tokio_test::io::Builder::new()
.read(payload)
.write_error(error())
.build();
assert!(copy_bidirectional(&mut a, &mut b).await.is_err());
}
#[tokio::test]
async fn immediate_exit_on_read_error() {
let error = || io::Error::new(io::ErrorKind::Other, "got nothing!");
let mut a = tokio_test::io::Builder::new().read_error(error()).build();
let mut b = tokio_test::io::Builder::new().read_error(error()).build();
assert!(copy_bidirectional(&mut a, &mut b).await.is_err());
}

100
vendor/tokio/tests/io_driver.rs vendored Normal file
View file

@ -0,0 +1,100 @@
#![warn(rust_2018_idioms)]
// Wasi does not support panic recovery or threading
#![cfg(all(feature = "full", not(tokio_wasi)))]
use tokio::net::TcpListener;
use tokio::runtime;
use tokio_test::{assert_ok, assert_pending};
use futures::task::{waker_ref, ArcWake};
use std::future::Future;
use std::net::TcpStream;
use std::pin::Pin;
use std::sync::{mpsc, Arc, Mutex};
use std::task::Context;
struct Task<T> {
future: Mutex<Pin<Box<T>>>,
}
impl<T: Send> ArcWake for Task<T> {
fn wake_by_ref(_: &Arc<Self>) {
// Do nothing...
}
}
impl<T> Task<T> {
fn new(future: T) -> Task<T> {
Task {
future: Mutex::new(Box::pin(future)),
}
}
}
#[test]
fn test_drop_on_notify() {
// When the reactor receives a kernel notification, it notifies the
// task that holds the associated socket. If this notification results in
// the task being dropped, the socket will also be dropped.
//
// Previously, there was a deadlock scenario where the reactor, while
// notifying, held a lock and the task being dropped attempted to acquire
// that same lock in order to clean up state.
//
// To simulate this case, we create a fake executor that does nothing when
// the task is notified. This simulates an executor in the process of
// shutting down. Then, when the task handle is dropped, the task itself is
// dropped.
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
let (addr_tx, addr_rx) = mpsc::channel();
// Define a task that just drains the listener
let task = Arc::new(Task::new(async move {
// Create a listener
let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Send the address
let addr = listener.local_addr().unwrap();
addr_tx.send(addr).unwrap();
loop {
let _ = listener.accept().await;
}
}));
{
let _enter = rt.enter();
let waker = waker_ref(&task);
let mut cx = Context::from_waker(&waker);
assert_pending!(task.future.lock().unwrap().as_mut().poll(&mut cx));
}
// Get the address
let addr = addr_rx.recv().unwrap();
drop(task);
// Establish a connection to the acceptor
let _s = TcpStream::connect(&addr).unwrap();
// Force the reactor to turn
rt.block_on(async {});
}
#[test]
#[should_panic(
expected = "A Tokio 1.x context was found, but IO is disabled. Call `enable_io` on the runtime builder to enable IO."
)]
fn panics_when_io_disabled() {
let rt = runtime::Builder::new_current_thread().build().unwrap();
rt.block_on(async {
let _ =
tokio::net::TcpListener::from_std(std::net::TcpListener::bind("127.0.0.1:0").unwrap());
});
}

54
vendor/tokio/tests/io_driver_drop.rs vendored Normal file
View file

@ -0,0 +1,54 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support bind
use tokio::net::TcpListener;
use tokio::runtime;
use tokio_test::{assert_err, assert_pending, assert_ready, task};
#[test]
fn tcp_doesnt_block() {
let rt = rt();
let listener = {
let _enter = rt.enter();
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
TcpListener::from_std(listener).unwrap()
};
drop(rt);
let mut task = task::spawn(async move {
assert_err!(listener.accept().await);
});
assert_ready!(task.poll());
}
#[test]
fn drop_wakes() {
let rt = rt();
let listener = {
let _enter = rt.enter();
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
TcpListener::from_std(listener).unwrap()
};
let mut task = task::spawn(async move {
assert_err!(listener.accept().await);
});
assert_pending!(task.poll());
drop(rt);
assert!(task.is_woken());
assert_ready!(task.poll());
}
fn rt() -> runtime::Runtime {
runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

34
vendor/tokio/tests/io_fill_buf.rs vendored Normal file
View file

@ -0,0 +1,34 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support file operations
use tempfile::NamedTempFile;
use tokio::fs::File;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio_test::assert_ok;
#[tokio::test]
async fn fill_buf_file() {
let file = NamedTempFile::new().unwrap();
assert_ok!(std::fs::write(file.path(), b"hello"));
let file = assert_ok!(File::open(file.path()).await);
let mut file = BufReader::new(file);
let mut contents = Vec::new();
loop {
let consumed = {
let buffer = assert_ok!(file.fill_buf().await);
if buffer.is_empty() {
break;
}
contents.extend_from_slice(buffer);
buffer.len()
};
file.consume(consumed);
}
assert_eq!(contents, b"hello");
}

19
vendor/tokio/tests/io_lines.rs vendored Normal file
View file

@ -0,0 +1,19 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::AsyncBufReadExt;
use tokio_test::assert_ok;
#[tokio::test]
async fn lines_inherent() {
let rd: &[u8] = b"hello\r\nworld\n\n";
let mut st = rd.lines();
let b = assert_ok!(st.next_line().await).unwrap();
assert_eq!(b, "hello");
let b = assert_ok!(st.next_line().await).unwrap();
assert_eq!(b, "world");
let b = assert_ok!(st.next_line().await).unwrap();
assert_eq!(b, "");
assert!(assert_ok!(st.next_line().await).is_none());
}

121
vendor/tokio/tests/io_mem_stream.rs vendored Normal file
View file

@ -0,0 +1,121 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
#[tokio::test]
async fn ping_pong() {
let (mut a, mut b) = duplex(32);
let mut buf = [0u8; 4];
a.write_all(b"ping").await.unwrap();
b.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"ping");
b.write_all(b"pong").await.unwrap();
a.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"pong");
}
#[tokio::test]
async fn across_tasks() {
let (mut a, mut b) = duplex(32);
let t1 = tokio::spawn(async move {
a.write_all(b"ping").await.unwrap();
let mut buf = [0u8; 4];
a.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"pong");
});
let t2 = tokio::spawn(async move {
let mut buf = [0u8; 4];
b.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"ping");
b.write_all(b"pong").await.unwrap();
});
t1.await.unwrap();
t2.await.unwrap();
}
#[tokio::test]
async fn disconnect() {
let (mut a, mut b) = duplex(32);
let t1 = tokio::spawn(async move {
a.write_all(b"ping").await.unwrap();
// and dropped
});
let t2 = tokio::spawn(async move {
let mut buf = [0u8; 32];
let n = b.read(&mut buf).await.unwrap();
assert_eq!(&buf[..n], b"ping");
let n = b.read(&mut buf).await.unwrap();
assert_eq!(n, 0);
});
t1.await.unwrap();
t2.await.unwrap();
}
#[tokio::test]
async fn disconnect_reader() {
let (a, mut b) = duplex(2);
let t1 = tokio::spawn(async move {
// this will block, as not all data fits into duplex
b.write_all(b"ping").await.unwrap_err();
});
let t2 = tokio::spawn(async move {
// here we drop the reader side, and we expect the writer in the other
// task to exit with an error
drop(a);
});
t2.await.unwrap();
t1.await.unwrap();
}
#[tokio::test]
async fn max_write_size() {
let (mut a, mut b) = duplex(32);
let t1 = tokio::spawn(async move {
let n = a.write(&[0u8; 64]).await.unwrap();
assert_eq!(n, 32);
let n = a.write(&[0u8; 64]).await.unwrap();
assert_eq!(n, 4);
});
let mut buf = [0u8; 4];
b.read_exact(&mut buf).await.unwrap();
t1.await.unwrap();
// drop b only after task t1 finishes writing
drop(b);
}
#[tokio::test]
async fn duplex_is_cooperative() {
let (mut tx, mut rx) = tokio::io::duplex(1024 * 8);
tokio::select! {
biased;
_ = async {
loop {
let buf = [3u8; 4096];
tx.write_all(&buf).await.unwrap();
let mut buf = [0u8; 4096];
let _ = rx.read(&mut buf).await.unwrap();
}
} => {},
_ = tokio::task::yield_now() => {}
}
}

176
vendor/tokio/tests/io_panic.rs vendored Normal file
View file

@ -0,0 +1,176 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
use std::task::{Context, Poll};
use std::{error::Error, pin::Pin};
use tokio::io::{self, split, AsyncRead, AsyncWrite, ReadBuf};
mod support {
pub mod panic;
}
use support::panic::test_panic;
struct RW;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
#[cfg(unix)]
mod unix {
use std::os::unix::prelude::{AsRawFd, RawFd};
pub struct MockFd;
impl AsRawFd for MockFd {
fn as_raw_fd(&self) -> RawFd {
0
}
}
}
#[test]
fn read_buf_initialize_unfilled_to_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let mut buffer = Vec::<u8>::new();
let mut read_buf = ReadBuf::new(&mut buffer);
read_buf.initialize_unfilled_to(2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn read_buf_advance_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let mut buffer = Vec::<u8>::new();
let mut read_buf = ReadBuf::new(&mut buffer);
read_buf.advance(2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn read_buf_set_filled_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let mut buffer = Vec::<u8>::new();
let mut read_buf = ReadBuf::new(&mut buffer);
read_buf.set_filled(2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn read_buf_put_slice_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let mut buffer = Vec::<u8>::new();
let mut read_buf = ReadBuf::new(&mut buffer);
let new_slice = [0x40_u8, 0x41_u8];
read_buf.put_slice(&new_slice);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn unsplit_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (r1, _w1) = split(RW);
let (_r2, w2) = split(RW);
r1.unsplit(w2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn async_fd_new_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::io::unix::AsyncFd;
use tokio::runtime::Builder;
let panic_location_file = test_panic(|| {
// Runtime without `enable_io` so it has no IO driver set.
let rt = Builder::new_current_thread().build().unwrap();
rt.block_on(async {
let fd = unix::MockFd;
let _ = AsyncFd::new(fd);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn async_fd_with_interest_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::io::unix::AsyncFd;
use tokio::io::Interest;
use tokio::runtime::Builder;
let panic_location_file = test_panic(|| {
// Runtime without `enable_io` so it has no IO driver set.
let rt = Builder::new_current_thread().build().unwrap();
rt.block_on(async {
let fd = unix::MockFd;
let _ = AsyncFd::with_interest(fd, Interest::READABLE);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}

375
vendor/tokio/tests/io_poll_aio.rs vendored Normal file
View file

@ -0,0 +1,375 @@
#![warn(rust_2018_idioms)]
#![cfg(all(target_os = "freebsd", feature = "net"))]
use mio_aio::{AioCb, AioFsyncMode, LioCb};
use std::{
future::Future,
mem,
os::unix::io::{AsRawFd, RawFd},
pin::Pin,
task::{Context, Poll},
};
use tempfile::tempfile;
use tokio::io::bsd::{Aio, AioSource};
use tokio_test::assert_pending;
mod aio {
use super::*;
/// Adapts mio_aio::AioCb (which implements mio::event::Source) to AioSource
struct WrappedAioCb<'a>(AioCb<'a>);
impl<'a> AioSource for WrappedAioCb<'a> {
fn register(&mut self, kq: RawFd, token: usize) {
self.0.register_raw(kq, token)
}
fn deregister(&mut self) {
self.0.deregister_raw()
}
}
/// A very crude implementation of an AIO-based future
struct FsyncFut(Aio<WrappedAioCb<'static>>);
impl Future for FsyncFut {
type Output = std::io::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let poll_result = self.0.poll_ready(cx);
match poll_result {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Ready(Ok(_ev)) => {
// At this point, we could clear readiness. But there's no
// point, since we're about to drop the Aio.
let result = (*self.0).0.aio_return();
match result {
Ok(_) => Poll::Ready(Ok(())),
Err(e) => Poll::Ready(Err(e.into())),
}
}
}
}
}
/// Low-level AIO Source
///
/// An example bypassing mio_aio and Nix to demonstrate how the kevent
/// registration actually works, under the hood.
struct LlSource(Pin<Box<libc::aiocb>>);
impl AioSource for LlSource {
fn register(&mut self, kq: RawFd, token: usize) {
let mut sev: libc::sigevent = unsafe { mem::MaybeUninit::zeroed().assume_init() };
sev.sigev_notify = libc::SIGEV_KEVENT;
sev.sigev_signo = kq;
sev.sigev_value = libc::sigval {
sival_ptr: token as *mut libc::c_void,
};
self.0.aio_sigevent = sev;
}
fn deregister(&mut self) {
unsafe {
self.0.aio_sigevent = mem::zeroed();
}
}
}
struct LlFut(Aio<LlSource>);
impl Future for LlFut {
type Output = std::io::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let poll_result = self.0.poll_ready(cx);
match poll_result {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Ready(Ok(_ev)) => {
let r = unsafe { libc::aio_return(self.0 .0.as_mut().get_unchecked_mut()) };
assert_eq!(0, r);
Poll::Ready(Ok(()))
}
}
}
}
/// A very simple object that can implement AioSource and can be reused.
///
/// mio_aio normally assumes that each AioCb will be consumed on completion.
/// This somewhat contrived example shows how an Aio object can be reused
/// anyway.
struct ReusableFsyncSource {
aiocb: Pin<Box<AioCb<'static>>>,
fd: RawFd,
token: usize,
}
impl ReusableFsyncSource {
fn fsync(&mut self) {
self.aiocb.register_raw(self.fd, self.token);
self.aiocb.fsync(AioFsyncMode::O_SYNC).unwrap();
}
fn new(aiocb: AioCb<'static>) -> Self {
ReusableFsyncSource {
aiocb: Box::pin(aiocb),
fd: 0,
token: 0,
}
}
fn reset(&mut self, aiocb: AioCb<'static>) {
self.aiocb = Box::pin(aiocb);
}
}
impl AioSource for ReusableFsyncSource {
fn register(&mut self, kq: RawFd, token: usize) {
self.fd = kq;
self.token = token;
}
fn deregister(&mut self) {
self.fd = 0;
}
}
struct ReusableFsyncFut<'a>(&'a mut Aio<ReusableFsyncSource>);
impl<'a> Future for ReusableFsyncFut<'a> {
type Output = std::io::Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let poll_result = self.0.poll_ready(cx);
match poll_result {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Ready(Ok(ev)) => {
// Since this future uses a reusable Aio, we must clear
// its readiness here. That makes the future
// non-idempotent; the caller can't poll it repeatedly after
// it has already returned Ready. But that's ok; most
// futures behave this way.
self.0.clear_ready(ev);
let result = (*self.0).aiocb.aio_return();
match result {
Ok(_) => Poll::Ready(Ok(())),
Err(e) => Poll::Ready(Err(e.into())),
}
}
}
}
}
#[tokio::test]
async fn fsync() {
let f = tempfile().unwrap();
let fd = f.as_raw_fd();
let aiocb = AioCb::from_fd(fd, 0);
let source = WrappedAioCb(aiocb);
let mut poll_aio = Aio::new_for_aio(source).unwrap();
(*poll_aio).0.fsync(AioFsyncMode::O_SYNC).unwrap();
let fut = FsyncFut(poll_aio);
fut.await.unwrap();
}
#[tokio::test]
async fn ll_fsync() {
let f = tempfile().unwrap();
let fd = f.as_raw_fd();
let mut aiocb: libc::aiocb = unsafe { mem::MaybeUninit::zeroed().assume_init() };
aiocb.aio_fildes = fd;
let source = LlSource(Box::pin(aiocb));
let mut poll_aio = Aio::new_for_aio(source).unwrap();
let r = unsafe {
let p = (*poll_aio).0.as_mut().get_unchecked_mut();
libc::aio_fsync(libc::O_SYNC, p)
};
assert_eq!(0, r);
let fut = LlFut(poll_aio);
fut.await.unwrap();
}
/// A suitably crafted future type can reuse an Aio object
#[tokio::test]
async fn reuse() {
let f = tempfile().unwrap();
let fd = f.as_raw_fd();
let aiocb0 = AioCb::from_fd(fd, 0);
let source = ReusableFsyncSource::new(aiocb0);
let mut poll_aio = Aio::new_for_aio(source).unwrap();
poll_aio.fsync();
let fut0 = ReusableFsyncFut(&mut poll_aio);
fut0.await.unwrap();
let aiocb1 = AioCb::from_fd(fd, 0);
poll_aio.reset(aiocb1);
let mut ctx = Context::from_waker(futures::task::noop_waker_ref());
assert_pending!(poll_aio.poll_ready(&mut ctx));
poll_aio.fsync();
let fut1 = ReusableFsyncFut(&mut poll_aio);
fut1.await.unwrap();
}
}
mod lio {
use super::*;
struct WrappedLioCb<'a>(LioCb<'a>);
impl<'a> AioSource for WrappedLioCb<'a> {
fn register(&mut self, kq: RawFd, token: usize) {
self.0.register_raw(kq, token)
}
fn deregister(&mut self) {
self.0.deregister_raw()
}
}
/// A very crude lio_listio-based Future
struct LioFut(Option<Aio<WrappedLioCb<'static>>>);
impl Future for LioFut {
type Output = std::io::Result<Vec<isize>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let poll_result = self.0.as_mut().unwrap().poll_ready(cx);
match poll_result {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Ready(Ok(_ev)) => {
// At this point, we could clear readiness. But there's no
// point, since we're about to drop the Aio.
let r = self.0.take().unwrap().into_inner().0.into_results(|iter| {
iter.map(|lr| lr.result.unwrap()).collect::<Vec<isize>>()
});
Poll::Ready(Ok(r))
}
}
}
}
/// Minimal example demonstrating reuse of an Aio object with lio
/// readiness. mio_aio::LioCb actually does something similar under the
/// hood.
struct ReusableLioSource {
liocb: Option<LioCb<'static>>,
fd: RawFd,
token: usize,
}
impl ReusableLioSource {
fn new(liocb: LioCb<'static>) -> Self {
ReusableLioSource {
liocb: Some(liocb),
fd: 0,
token: 0,
}
}
fn reset(&mut self, liocb: LioCb<'static>) {
self.liocb = Some(liocb);
}
fn submit(&mut self) {
self.liocb
.as_mut()
.unwrap()
.register_raw(self.fd, self.token);
self.liocb.as_mut().unwrap().submit().unwrap();
}
}
impl AioSource for ReusableLioSource {
fn register(&mut self, kq: RawFd, token: usize) {
self.fd = kq;
self.token = token;
}
fn deregister(&mut self) {
self.fd = 0;
}
}
struct ReusableLioFut<'a>(&'a mut Aio<ReusableLioSource>);
impl<'a> Future for ReusableLioFut<'a> {
type Output = std::io::Result<Vec<isize>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let poll_result = self.0.poll_ready(cx);
match poll_result {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Ready(Ok(ev)) => {
// Since this future uses a reusable Aio, we must clear
// its readiness here. That makes the future
// non-idempotent; the caller can't poll it repeatedly after
// it has already returned Ready. But that's ok; most
// futures behave this way.
self.0.clear_ready(ev);
let r = (*self.0).liocb.take().unwrap().into_results(|iter| {
iter.map(|lr| lr.result.unwrap()).collect::<Vec<isize>>()
});
Poll::Ready(Ok(r))
}
}
}
}
/// An lio_listio operation with one write element
#[tokio::test]
async fn onewrite() {
const WBUF: &[u8] = b"abcdef";
let f = tempfile().unwrap();
let mut builder = mio_aio::LioCbBuilder::with_capacity(1);
builder = builder.emplace_slice(
f.as_raw_fd(),
0,
&WBUF[..],
0,
mio_aio::LioOpcode::LIO_WRITE,
);
let liocb = builder.finish();
let source = WrappedLioCb(liocb);
let mut poll_aio = Aio::new_for_lio(source).unwrap();
// Send the operation to the kernel
(*poll_aio).0.submit().unwrap();
let fut = LioFut(Some(poll_aio));
let v = fut.await.unwrap();
assert_eq!(v.len(), 1);
assert_eq!(v[0] as usize, WBUF.len());
}
/// A suitably crafted future type can reuse an Aio object
#[tokio::test]
async fn reuse() {
const WBUF: &[u8] = b"abcdef";
let f = tempfile().unwrap();
let mut builder0 = mio_aio::LioCbBuilder::with_capacity(1);
builder0 = builder0.emplace_slice(
f.as_raw_fd(),
0,
&WBUF[..],
0,
mio_aio::LioOpcode::LIO_WRITE,
);
let liocb0 = builder0.finish();
let source = ReusableLioSource::new(liocb0);
let mut poll_aio = Aio::new_for_aio(source).unwrap();
poll_aio.submit();
let fut0 = ReusableLioFut(&mut poll_aio);
let v = fut0.await.unwrap();
assert_eq!(v.len(), 1);
assert_eq!(v[0] as usize, WBUF.len());
// Now reuse the same Aio
let mut builder1 = mio_aio::LioCbBuilder::with_capacity(1);
builder1 = builder1.emplace_slice(
f.as_raw_fd(),
0,
&WBUF[..],
0,
mio_aio::LioOpcode::LIO_WRITE,
);
let liocb1 = builder1.finish();
poll_aio.reset(liocb1);
let mut ctx = Context::from_waker(futures::task::noop_waker_ref());
assert_pending!(poll_aio.poll_ready(&mut ctx));
poll_aio.submit();
let fut1 = ReusableLioFut(&mut poll_aio);
let v = fut1.await.unwrap();
assert_eq!(v.len(), 1);
assert_eq!(v[0] as usize, WBUF.len());
}
}

75
vendor/tokio/tests/io_read.rs vendored Normal file
View file

@ -0,0 +1,75 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
use tokio_test::assert_ok;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
mod support {
pub(crate) mod leaked_buffers;
}
use support::leaked_buffers::LeakedBuffers;
#[tokio::test]
async fn read() {
#[derive(Default)]
struct Rd {
poll_cnt: usize,
}
impl AsyncRead for Rd {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
assert_eq!(0, self.poll_cnt);
self.poll_cnt += 1;
buf.put_slice(b"hello world");
Poll::Ready(Ok(()))
}
}
let mut buf = Box::new([0; 11]);
let mut rd = Rd::default();
let n = assert_ok!(rd.read(&mut buf[..]).await);
assert_eq!(n, 11);
assert_eq!(buf[..], b"hello world"[..]);
}
struct BadAsyncRead {
leaked_buffers: LeakedBuffers,
}
impl BadAsyncRead {
fn new() -> Self {
Self {
leaked_buffers: LeakedBuffers::new(),
}
}
}
impl AsyncRead for BadAsyncRead {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
*buf = ReadBuf::new(unsafe { self.leaked_buffers.create(buf.capacity()) });
buf.advance(buf.capacity());
Poll::Ready(Ok(()))
}
}
#[tokio::test]
#[should_panic]
async fn read_buf_bad_async_read() {
let mut buf = Vec::with_capacity(10);
BadAsyncRead::new().read_buf(&mut buf).await.unwrap();
}

36
vendor/tokio/tests/io_read_buf.rs vendored Normal file
View file

@ -0,0 +1,36 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
use tokio_test::assert_ok;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn read_buf() {
struct Rd {
cnt: usize,
}
impl AsyncRead for Rd {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.cnt += 1;
buf.put_slice(b"hello world");
Poll::Ready(Ok(()))
}
}
let mut buf = vec![];
let mut rd = Rd { cnt: 0 };
let n = assert_ok!(rd.read_buf(&mut buf).await);
assert_eq!(1, rd.cnt);
assert_eq!(n, 11);
assert_eq!(buf[..], b"hello world"[..]);
}

15
vendor/tokio/tests/io_read_exact.rs vendored Normal file
View file

@ -0,0 +1,15 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::AsyncReadExt;
use tokio_test::assert_ok;
#[tokio::test]
async fn read_exact() {
let mut buf = Box::new([0; 8]);
let mut rd: &[u8] = b"hello world";
let n = assert_ok!(rd.read_exact(&mut buf[..]).await);
assert_eq!(n, 8);
assert_eq!(buf[..], b"hello wo"[..]);
}

107
vendor/tokio/tests/io_read_line.rs vendored Normal file
View file

@ -0,0 +1,107 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::io::ErrorKind;
use tokio::io::{AsyncBufReadExt, BufReader, Error};
use tokio_test::{assert_ok, io::Builder};
use std::io::Cursor;
#[tokio::test]
async fn read_line() {
let mut buf = String::new();
let mut rd = Cursor::new(b"hello\nworld\n\n");
let n = assert_ok!(rd.read_line(&mut buf).await);
assert_eq!(n, 6);
assert_eq!(buf, "hello\n");
buf.clear();
let n = assert_ok!(rd.read_line(&mut buf).await);
assert_eq!(n, 6);
assert_eq!(buf, "world\n");
buf.clear();
let n = assert_ok!(rd.read_line(&mut buf).await);
assert_eq!(n, 1);
assert_eq!(buf, "\n");
buf.clear();
let n = assert_ok!(rd.read_line(&mut buf).await);
assert_eq!(n, 0);
assert_eq!(buf, "");
}
#[tokio::test]
async fn read_line_not_all_ready() {
let mock = Builder::new()
.read(b"Hello Wor")
.read(b"ld\nFizzBuz")
.read(b"z\n1\n2")
.build();
let mut read = BufReader::new(mock);
let mut line = "We say ".to_string();
let bytes = read.read_line(&mut line).await.unwrap();
assert_eq!(bytes, "Hello World\n".len());
assert_eq!(line.as_str(), "We say Hello World\n");
line = "I solve ".to_string();
let bytes = read.read_line(&mut line).await.unwrap();
assert_eq!(bytes, "FizzBuzz\n".len());
assert_eq!(line.as_str(), "I solve FizzBuzz\n");
line.clear();
let bytes = read.read_line(&mut line).await.unwrap();
assert_eq!(bytes, 2);
assert_eq!(line.as_str(), "1\n");
line.clear();
let bytes = read.read_line(&mut line).await.unwrap();
assert_eq!(bytes, 1);
assert_eq!(line.as_str(), "2");
}
#[tokio::test]
async fn read_line_invalid_utf8() {
let mock = Builder::new().read(b"Hello Wor\xffld.\n").build();
let mut read = BufReader::new(mock);
let mut line = "Foo".to_string();
let err = read.read_line(&mut line).await.expect_err("Should fail");
assert_eq!(err.kind(), ErrorKind::InvalidData);
assert_eq!(err.to_string(), "stream did not contain valid UTF-8");
assert_eq!(line.as_str(), "Foo");
}
#[tokio::test]
async fn read_line_fail() {
let mock = Builder::new()
.read(b"Hello Wor")
.read_error(Error::new(ErrorKind::Other, "The world has no end"))
.build();
let mut read = BufReader::new(mock);
let mut line = "Foo".to_string();
let err = read.read_line(&mut line).await.expect_err("Should fail");
assert_eq!(err.kind(), ErrorKind::Other);
assert_eq!(err.to_string(), "The world has no end");
assert_eq!(line.as_str(), "FooHello Wor");
}
#[tokio::test]
async fn read_line_fail_and_utf8_fail() {
let mock = Builder::new()
.read(b"Hello Wor")
.read(b"\xff\xff\xff")
.read_error(Error::new(ErrorKind::Other, "The world has no end"))
.build();
let mut read = BufReader::new(mock);
let mut line = "Foo".to_string();
let err = read.read_line(&mut line).await.expect_err("Should fail");
assert_eq!(err.kind(), ErrorKind::Other);
assert_eq!(err.to_string(), "The world has no end");
assert_eq!(line.as_str(), "Foo");
}

78
vendor/tokio/tests/io_read_to_end.rs vendored Normal file
View file

@ -0,0 +1,78 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf};
use tokio_test::assert_ok;
#[tokio::test]
async fn read_to_end() {
let mut buf = vec![];
let mut rd: &[u8] = b"hello world";
let n = assert_ok!(rd.read_to_end(&mut buf).await);
assert_eq!(n, 11);
assert_eq!(buf[..], b"hello world"[..]);
}
#[derive(Copy, Clone, Debug)]
enum State {
Initializing,
JustFilling,
Done,
}
struct UninitTest {
num_init: usize,
state: State,
}
impl AsyncRead for UninitTest {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let me = Pin::into_inner(self);
let real_num_init = buf.initialized().len() - buf.filled().len();
assert_eq!(real_num_init, me.num_init, "{:?}", me.state);
match me.state {
State::Initializing => {
buf.initialize_unfilled_to(me.num_init + 2);
buf.advance(1);
me.num_init += 1;
if me.num_init == 24 {
me.state = State::JustFilling;
}
}
State::JustFilling => {
buf.advance(1);
me.num_init -= 1;
if me.num_init == 15 {
// The buffer is resized on next call.
me.num_init = 0;
me.state = State::Done;
}
}
State::Done => { /* .. do nothing .. */ }
}
Poll::Ready(Ok(()))
}
}
#[tokio::test]
async fn read_to_end_uninit() {
let mut buf = Vec::with_capacity(64);
let mut test = UninitTest {
num_init: 0,
state: State::Initializing,
};
test.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf.len(), 33);
}

63
vendor/tokio/tests/io_read_to_string.rs vendored Normal file
View file

@ -0,0 +1,63 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::io;
use tokio::io::AsyncReadExt;
use tokio_test::assert_ok;
use tokio_test::io::Builder;
#[tokio::test]
async fn read_to_string() {
let mut buf = String::new();
let mut rd: &[u8] = b"hello world";
let n = assert_ok!(rd.read_to_string(&mut buf).await);
assert_eq!(n, 11);
assert_eq!(buf[..], "hello world"[..]);
}
#[tokio::test]
async fn to_string_does_not_truncate_on_utf8_error() {
let data = vec![0xff, 0xff, 0xff];
let mut s = "abc".to_string();
match AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s).await {
Ok(len) => panic!("Should fail: {} bytes.", len),
Err(err) if err.to_string() == "stream did not contain valid UTF-8" => {}
Err(err) => panic!("Fail: {}.", err),
}
assert_eq!(s, "abc");
}
#[tokio::test]
async fn to_string_does_not_truncate_on_io_error() {
let mut mock = Builder::new()
.read(b"def")
.read_error(io::Error::new(io::ErrorKind::Other, "whoops"))
.build();
let mut s = "abc".to_string();
match AsyncReadExt::read_to_string(&mut mock, &mut s).await {
Ok(len) => panic!("Should fail: {} bytes.", len),
Err(err) if err.to_string() == "whoops" => {}
Err(err) => panic!("Fail: {}.", err),
}
assert_eq!(s, "abc");
}
#[tokio::test]
async fn to_string_appends() {
let data = b"def".to_vec();
let mut s = "abc".to_string();
let len = AsyncReadExt::read_to_string(&mut data.as_slice(), &mut s)
.await
.unwrap();
assert_eq!(len, 3);
assert_eq!(s, "abcdef");
}

74
vendor/tokio/tests/io_read_until.rs vendored Normal file
View file

@ -0,0 +1,74 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::io::ErrorKind;
use tokio::io::{AsyncBufReadExt, BufReader, Error};
use tokio_test::{assert_ok, io::Builder};
#[tokio::test]
async fn read_until() {
let mut buf = vec![];
let mut rd: &[u8] = b"hello world";
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 6);
assert_eq!(buf, b"hello ");
buf.clear();
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 5);
assert_eq!(buf, b"world");
buf.clear();
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 0);
assert_eq!(buf, []);
}
#[tokio::test]
async fn read_until_not_all_ready() {
let mock = Builder::new()
.read(b"Hello Wor")
.read(b"ld#Fizz\xffBuz")
.read(b"z#1#2")
.build();
let mut read = BufReader::new(mock);
let mut chunk = b"We say ".to_vec();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, b"Hello World#".len());
assert_eq!(chunk, b"We say Hello World#");
chunk = b"I solve ".to_vec();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, b"Fizz\xffBuzz\n".len());
assert_eq!(chunk, b"I solve Fizz\xffBuzz#");
chunk.clear();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, 2);
assert_eq!(chunk, b"1#");
chunk.clear();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, 1);
assert_eq!(chunk, b"2");
}
#[tokio::test]
async fn read_until_fail() {
let mock = Builder::new()
.read(b"Hello \xffWor")
.read_error(Error::new(ErrorKind::Other, "The world has no end"))
.build();
let mut read = BufReader::new(mock);
let mut chunk = b"Foo".to_vec();
let err = read
.read_until(b'#', &mut chunk)
.await
.expect_err("Should fail");
assert_eq!(err.kind(), ErrorKind::Other);
assert_eq!(err.to_string(), "The world has no end");
assert_eq!(chunk, b"FooHello \xffWor");
}

79
vendor/tokio/tests/io_split.rs vendored Normal file
View file

@ -0,0 +1,79 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
use tokio::io::{split, AsyncRead, AsyncWrite, ReadBuf, ReadHalf, WriteHalf};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
struct RW;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(&[b'z']);
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
}
#[test]
fn is_send_and_sync() {
fn assert_bound<T: Send + Sync>() {}
assert_bound::<ReadHalf<RW>>();
assert_bound::<WriteHalf<RW>>();
}
#[test]
fn split_stream_id() {
let (r1, w1) = split(RW);
let (r2, w2) = split(RW);
assert!(r1.is_pair_of(&w1));
assert!(!r1.is_pair_of(&w2));
assert!(r2.is_pair_of(&w2));
assert!(!r2.is_pair_of(&w1));
}
#[test]
fn unsplit_ok() {
let (r, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err1() {
let (r, _) = split(RW);
let (_, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err2() {
let (_, w) = split(RW);
let (r, _) = split(RW);
r.unsplit(w);
}

74
vendor/tokio/tests/io_take.rs vendored Normal file
View file

@ -0,0 +1,74 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{self, AsyncRead, AsyncReadExt, ReadBuf};
use tokio_test::assert_ok;
mod support {
pub(crate) mod leaked_buffers;
}
use support::leaked_buffers::LeakedBuffers;
#[tokio::test]
async fn take() {
let mut buf = [0; 6];
let rd: &[u8] = b"hello world";
let mut rd = rd.take(4);
let n = assert_ok!(rd.read(&mut buf).await);
assert_eq!(n, 4);
assert_eq!(&buf, &b"hell\0\0"[..]);
}
#[tokio::test]
async fn issue_4435() {
let mut buf = [0; 8];
let rd: &[u8] = b"hello world";
let rd = rd.take(4);
tokio::pin!(rd);
let mut read_buf = ReadBuf::new(&mut buf);
read_buf.put_slice(b"AB");
futures::future::poll_fn(|cx| rd.as_mut().poll_read(cx, &mut read_buf))
.await
.unwrap();
assert_eq!(&buf, &b"ABhell\0\0"[..]);
}
struct BadReader {
leaked_buffers: LeakedBuffers,
}
impl BadReader {
fn new() -> Self {
Self {
leaked_buffers: LeakedBuffers::new(),
}
}
}
impl AsyncRead for BadReader {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
read_buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let mut buf = ReadBuf::new(unsafe { self.leaked_buffers.create(10) });
buf.put_slice(&[123; 10]);
*read_buf = buf;
Poll::Ready(Ok(()))
}
}
#[tokio::test]
#[should_panic]
async fn bad_reader_fails() {
let mut buf = Vec::with_capacity(10);
BadReader::new().take(10).read_buf(&mut buf).await.unwrap();
}

32
vendor/tokio/tests/io_util_empty.rs vendored Normal file
View file

@ -0,0 +1,32 @@
#![cfg(feature = "full")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt};
#[tokio::test]
async fn empty_read_is_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
let mut buf = [0u8; 4096];
let _ = tokio::io::empty().read(&mut buf).await;
}
} => {},
_ = tokio::task::yield_now() => {}
}
}
#[tokio::test]
async fn empty_buf_reads_are_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
let mut buf = String::new();
let _ = tokio::io::empty().read_line(&mut buf).await;
}
} => {},
_ = tokio::task::yield_now() => {}
}
}

58
vendor/tokio/tests/io_write.rs vendored Normal file
View file

@ -0,0 +1,58 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::assert_ok;
use bytes::BytesMut;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
assert_eq!(self.cnt, 0);
self.buf.extend(&buf[0..4]);
Ok(4).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
let n = assert_ok!(wr.write(b"hello world").await);
assert_eq!(n, 4);
assert_eq!(wr.buf, b"hell"[..]);
}
#[tokio::test]
async fn write_cursor() {
use std::io::Cursor;
let mut wr = Cursor::new(Vec::new());
let n = assert_ok!(wr.write(b"hello world").await);
assert_eq!(n, 11);
assert_eq!(wr.get_ref().as_slice(), &b"hello world"[..]);
}

51
vendor/tokio/tests/io_write_all.rs vendored Normal file
View file

@ -0,0 +1,51 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::assert_ok;
use bytes::BytesMut;
use std::cmp;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write_all() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let n = cmp::min(4, buf.len());
let buf = &buf[0..n];
self.cnt += 1;
self.buf.extend(buf);
Ok(buf.len()).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
assert_ok!(wr.write_all(b"hello world").await);
assert_eq!(wr.buf, b"hello world"[..]);
assert_eq!(wr.cnt, 3);
}

96
vendor/tokio/tests/io_write_all_buf.rs vendored Normal file
View file

@ -0,0 +1,96 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::{assert_err, assert_ok};
use bytes::{Buf, Bytes, BytesMut};
use std::cmp;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write_all_buf() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let n = cmp::min(4, buf.len());
dbg!(buf);
let buf = &buf[0..n];
self.cnt += 1;
self.buf.extend(buf);
Ok(buf.len()).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world"));
assert_ok!(wr.write_all_buf(&mut buf).await);
assert_eq!(wr.buf, b"helloworld"[..]);
// expect 4 writes, [hell],[o],[worl],[d]
assert_eq!(wr.cnt, 4);
assert!(!buf.has_remaining());
}
#[tokio::test]
async fn write_buf_err() {
/// Error out after writing the first 4 bytes
struct Wr {
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<io::Result<usize>> {
self.cnt += 1;
if self.cnt == 2 {
return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, "whoops")));
}
Poll::Ready(Ok(4))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr { cnt: 0 };
let mut buf = Bytes::from_static(b"hello").chain(Bytes::from_static(b"world"));
assert_err!(wr.write_all_buf(&mut buf).await);
assert_eq!(
buf.copy_to_bytes(buf.remaining()),
Bytes::from_static(b"oworld")
);
}

56
vendor/tokio/tests/io_write_buf.rs vendored Normal file
View file

@ -0,0 +1,56 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::assert_ok;
use bytes::BytesMut;
use std::cmp;
use std::io::{self, Cursor};
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write_all() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
assert_eq!(self.cnt, 0);
let n = cmp::min(4, buf.len());
let buf = &buf[0..n];
self.cnt += 1;
self.buf.extend(buf);
Ok(buf.len()).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
let mut buf = Cursor::new(&b"hello world"[..]);
assert_ok!(wr.write_buf(&mut buf).await);
assert_eq!(wr.buf, b"hell"[..]);
assert_eq!(wr.cnt, 1);
assert_eq!(buf.position(), 4);
}

37
vendor/tokio/tests/io_write_int.rs vendored Normal file
View file

@ -0,0 +1,37 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write_int_should_err_if_write_count_0() {
struct Wr {}
impl AsyncWrite for Wr {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<io::Result<usize>> {
Ok(0).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {};
// should be ok just to test these 2, other cases actually expanded by same macro.
assert!(wr.write_i8(0).await.is_err());
assert!(wr.write_i32(12).await.is_err());
}

20
vendor/tokio/tests/join_handle_panic.rs vendored Normal file
View file

@ -0,0 +1,20 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
struct PanicsOnDrop;
impl Drop for PanicsOnDrop {
fn drop(&mut self) {
panic!("I told you so");
}
}
#[tokio::test]
async fn test_panics_do_not_propagate_when_dropping_join_handle() {
let join_handle = tokio::spawn(async move { PanicsOnDrop });
// only drop the JoinHandle when the task has completed
// (which is difficult to synchronize precisely)
tokio::time::sleep(std::time::Duration::from_millis(3)).await;
drop(join_handle);
}

155
vendor/tokio/tests/macros_join.rs vendored Normal file
View file

@ -0,0 +1,155 @@
#![cfg(feature = "macros")]
#![allow(clippy::blacklisted_name)]
use std::sync::Arc;
#[cfg(tokio_wasm_not_wasi)]
#[cfg(target_pointer_width = "64")]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio::sync::{oneshot, Semaphore};
use tokio_test::{assert_pending, assert_ready, task};
#[maybe_tokio_test]
async fn sync_one_lit_expr_comma() {
let foo = tokio::join!(async { 1 },);
assert_eq!(foo, (1,));
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_no_comma() {
let foo = tokio::join!(async { 1 });
assert_eq!(foo, (1,));
}
#[maybe_tokio_test]
async fn sync_two_lit_expr_comma() {
let foo = tokio::join!(async { 1 }, async { 2 },);
assert_eq!(foo, (1, 2));
}
#[maybe_tokio_test]
async fn sync_two_lit_expr_no_comma() {
let foo = tokio::join!(async { 1 }, async { 2 });
assert_eq!(foo, (1, 2));
}
#[maybe_tokio_test]
async fn two_await() {
let (tx1, rx1) = oneshot::channel::<&str>();
let (tx2, rx2) = oneshot::channel::<u32>();
let mut join = task::spawn(async {
tokio::join!(async { rx1.await.unwrap() }, async { rx2.await.unwrap() })
});
assert_pending!(join.poll());
tx2.send(123).unwrap();
assert!(join.is_woken());
assert_pending!(join.poll());
tx1.send("hello").unwrap();
assert!(join.is_woken());
let res = assert_ready!(join.poll());
assert_eq!(("hello", 123), res);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn join_size() {
use futures::future;
use std::mem;
let fut = async {
let ready = future::ready(0i32);
tokio::join!(ready)
};
assert_eq!(mem::size_of_val(&fut), 32);
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
tokio::join!(ready1, ready2)
};
assert_eq!(mem::size_of_val(&fut), 40);
}
async fn non_cooperative_task(permits: Arc<Semaphore>) -> usize {
let mut exceeded_budget = 0;
for _ in 0..5 {
// Another task should run after this task uses its whole budget
for _ in 0..128 {
let _permit = permits.clone().acquire_owned().await.unwrap();
}
exceeded_budget += 1;
}
exceeded_budget
}
async fn poor_little_task(permits: Arc<Semaphore>) -> usize {
let mut how_many_times_i_got_to_run = 0;
for _ in 0..5 {
let _permit = permits.clone().acquire_owned().await.unwrap();
how_many_times_i_got_to_run += 1;
}
how_many_times_i_got_to_run
}
#[tokio::test]
async fn join_does_not_allow_tasks_to_starve() {
let permits = Arc::new(Semaphore::new(1));
// non_cooperative_task should yield after its budget is exceeded and then poor_little_task should run.
let (non_cooperative_result, little_task_result) = tokio::join!(
non_cooperative_task(Arc::clone(&permits)),
poor_little_task(permits)
);
assert_eq!(5, non_cooperative_result);
assert_eq!(5, little_task_result);
}
#[tokio::test]
async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() {
let poll_order = Arc::new(std::sync::Mutex::new(vec![]));
let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move {
for _ in 0..4 {
{
let mut guard = poll_order.lock().unwrap();
guard.push(x);
}
tokio::task::yield_now().await;
}
};
tokio::join!(
fut(1, Arc::clone(&poll_order)),
fut(2, Arc::clone(&poll_order)),
fut(3, Arc::clone(&poll_order)),
);
// Each time the future created by join! is polled, it should start
// by polling a different future first.
assert_eq!(
vec![1, 2, 3, 2, 3, 1, 3, 1, 2, 1, 2, 3],
*poll_order.lock().unwrap()
);
}

21
vendor/tokio/tests/macros_pin.rs vendored Normal file
View file

@ -0,0 +1,21 @@
#![cfg(feature = "macros")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
async fn one() {}
async fn two() {}
#[maybe_tokio_test]
async fn multi_pin() {
tokio::pin! {
let f1 = one();
let f2 = two();
}
(&mut f1).await;
(&mut f2).await;
}

View file

@ -0,0 +1,26 @@
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threading
#[allow(unused_imports)]
use std as tokio;
use ::tokio as tokio1;
async fn compute() -> usize {
let join = tokio1::spawn(async { 1 });
join.await.unwrap()
}
#[tokio1::main(crate = "tokio1")]
async fn compute_main() -> usize {
compute().await
}
#[test]
fn crate_rename_main() {
assert_eq!(1, compute_main());
}
#[tokio1::test(crate = "tokio1")]
async fn crate_rename_test() {
assert_eq!(1, compute().await);
}

665
vendor/tokio/tests/macros_select.rs vendored Normal file
View file

@ -0,0 +1,665 @@
#![cfg(feature = "macros")]
#![allow(clippy::blacklisted_name)]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio::sync::oneshot;
use tokio_test::{assert_ok, assert_pending, assert_ready};
use futures::future::poll_fn;
use std::task::Poll::Ready;
#[maybe_tokio_test]
async fn sync_one_lit_expr_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn nested_one() {
let foo = tokio::select! {
foo = async { 1 } => tokio::select! {
bar = async { foo } => bar,
},
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_no_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_block() {
let foo = tokio::select! {
foo = async { 1 } => { foo }
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_await() {
let foo = tokio::select! {
foo = one() => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_ident() {
let one = one();
let foo = tokio::select! {
foo = one => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_two() {
use std::cell::Cell;
let cnt = Cell::new(0);
let res = tokio::select! {
foo = async {
cnt.set(cnt.get() + 1);
1
} => foo,
bar = async {
cnt.set(cnt.get() + 1);
2
} => bar,
};
assert_eq!(1, cnt.get());
assert!(res == 1 || res == 2);
}
#[maybe_tokio_test]
async fn drop_in_fut() {
let s = "hello".to_string();
let res = tokio::select! {
foo = async {
let v = one().await;
drop(s);
v
} => foo
};
assert_eq!(res, 1);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn one_ready() {
let (tx1, rx1) = oneshot::channel::<i32>();
let (_tx2, rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
let v = tokio::select! {
res = rx1 => {
assert_ok!(res)
},
_ = rx2 => unreachable!(),
};
assert_eq!(1, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn select_streams() {
use tokio::sync::mpsc;
let (tx1, mut rx1) = mpsc::unbounded_channel::<i32>();
let (tx2, mut rx2) = mpsc::unbounded_channel::<i32>();
tokio::spawn(async move {
assert_ok!(tx2.send(1));
tokio::task::yield_now().await;
assert_ok!(tx1.send(2));
tokio::task::yield_now().await;
assert_ok!(tx2.send(3));
tokio::task::yield_now().await;
drop((tx1, tx2));
});
let mut rem = true;
let mut msgs = vec![];
while rem {
tokio::select! {
Some(x) = rx1.recv() => {
msgs.push(x);
}
Some(y) = rx2.recv() => {
msgs.push(y);
}
else => {
rem = false;
}
}
}
msgs.sort_unstable();
assert_eq!(&msgs[..], &[1, 2, 3]);
}
#[maybe_tokio_test]
async fn move_uncompleted_futures() {
let (tx1, mut rx1) = oneshot::channel::<i32>();
let (tx2, mut rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
tx2.send(2).unwrap();
let ran;
tokio::select! {
res = &mut rx1 => {
assert_eq!(1, assert_ok!(res));
assert_eq!(2, assert_ok!(rx2.await));
ran = true;
},
res = &mut rx2 => {
assert_eq!(2, assert_ok!(res));
assert_eq!(1, assert_ok!(rx1.await));
ran = true;
},
}
assert!(ran);
}
#[maybe_tokio_test]
async fn nested() {
let res = tokio::select! {
x = async { 1 } => {
tokio::select! {
y = async { 2 } => x + y,
}
}
};
assert_eq!(res, 3);
}
#[maybe_tokio_test]
#[cfg(target_pointer_width = "64")]
async fn struct_size() {
use futures::future;
use std::mem;
let fut = async {
let ready = future::ready(0i32);
tokio::select! {
_ = ready => {},
}
};
assert_eq!(mem::size_of_val(&fut), 40);
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 48);
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
let ready3 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
_ = ready3 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 56);
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
else => {
value += 27;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn future_panics_after_poll() {
use tokio_test::task;
let (tx, rx) = oneshot::channel();
let mut polled = false;
let f = poll_fn(|_| {
assert!(!polled);
polled = true;
Ready(None::<()>)
});
let mut f = task::spawn(async {
tokio::select! {
Some(_) = f => unreachable!(),
ret = rx => ret.unwrap(),
}
});
assert_pending!(f.poll());
assert_pending!(f.poll());
assert_ok!(tx.send(1));
let res = assert_ready!(f.poll());
assert_eq!(1, res);
}
#[maybe_tokio_test]
async fn disable_with_if() {
use tokio_test::task;
let f = poll_fn(|_| panic!());
let (tx, rx) = oneshot::channel();
let mut f = task::spawn(async {
tokio::select! {
_ = f, if false => unreachable!(),
_ = rx => (),
}
});
assert_pending!(f.poll());
assert_ok!(tx.send(()));
assert!(f.is_woken());
assert_ready!(f.poll());
}
#[maybe_tokio_test]
async fn join_with_select() {
use tokio_test::task;
let (tx1, mut rx1) = oneshot::channel();
let (tx2, mut rx2) = oneshot::channel();
let mut f = task::spawn(async {
let mut a = None;
let mut b = None;
while a.is_none() || b.is_none() {
tokio::select! {
v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)),
v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2))
}
}
(a.unwrap(), b.unwrap())
});
assert_pending!(f.poll());
assert_ok!(tx1.send(123));
assert!(f.is_woken());
assert_pending!(f.poll());
assert_ok!(tx2.send(456));
assert!(f.is_woken());
let (a, b) = assert_ready!(f.poll());
assert_eq!(a, 123);
assert_eq!(b, 456);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition() {
use tokio::time::{self, Duration};
tokio::select! {
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition_biased() {
use tokio::time::{self, Duration};
tokio::select! {
biased;
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[maybe_tokio_test]
async fn many_branches() {
let num = tokio::select! {
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
};
assert_eq!(1, num);
}
#[maybe_tokio_test]
async fn never_branch_no_warnings() {
let t = tokio::select! {
_ = async_never() => 0,
one_async_ready = one() => one_async_ready,
};
assert_eq!(t, 1);
}
async fn one() -> usize {
1
}
async fn require_mutable(_: &mut i32) {}
async fn async_noop() {}
async fn async_never() -> ! {
futures::future::pending().await
}
// From https://github.com/tokio-rs/tokio/issues/2857
#[maybe_tokio_test]
async fn mut_on_left_hand_side() {
let v = async move {
let ok = async { 1 };
tokio::pin!(ok);
tokio::select! {
mut a = &mut ok => {
a += 1;
a
}
}
}
.await;
assert_eq!(v, 2);
}
#[maybe_tokio_test]
async fn biased_one_not_ready() {
let (_tx1, rx1) = oneshot::channel::<i32>();
let (tx2, rx2) = oneshot::channel::<i32>();
let (tx3, rx3) = oneshot::channel::<i32>();
tx2.send(2).unwrap();
tx3.send(3).unwrap();
let v = tokio::select! {
biased;
_ = rx1 => unreachable!(),
res = rx2 => {
assert_ok!(res)
},
_ = rx3 => {
panic!("This branch should never be activated because `rx2` should be polled before `rx3` due to `biased;`.")
}
};
assert_eq!(2, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn biased_eventually_ready() {
use tokio::task::yield_now;
let one = async {};
let two = async { yield_now().await };
let three = async { yield_now().await };
let mut count = 0u8;
tokio::pin!(one, two, three);
loop {
tokio::select! {
biased;
_ = &mut two, if count < 2 => {
count += 1;
assert_eq!(count, 2);
}
_ = &mut three, if count < 3 => {
count += 1;
assert_eq!(count, 3);
}
_ = &mut one, if count < 1 => {
count += 1;
assert_eq!(count, 1);
}
else => break,
}
}
assert_eq!(count, 3);
}
// https://github.com/tokio-rs/tokio/issues/3830
// https://github.com/rust-lang/rust-clippy/issues/7304
#[warn(clippy::default_numeric_fallback)]
pub async fn default_numeric_fallback() {
tokio::select! {
_ = async {} => (),
else => (),
}
}
// https://github.com/tokio-rs/tokio/issues/4182
#[maybe_tokio_test]
async fn mut_ref_patterns() {
tokio::select! {
Some(mut foo) = async { Some("1".to_string()) } => {
assert_eq!(foo, "1");
foo = "2".to_string();
assert_eq!(foo, "2");
},
};
tokio::select! {
Some(ref foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
},
};
tokio::select! {
Some(ref mut foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
*foo = "2".to_string();
assert_eq!(*foo, "2");
},
};
}
#[cfg(tokio_unstable)]
mod unstable {
use tokio::runtime::RngSeed;
#[test]
fn deterministic_select_current_thread() {
let seed = b"bytes used to generate seed";
let rt1 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values = rt1.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
let rt2 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values = rt2.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
assert_eq!(rt1_values, rt2_values);
}
#[test]
#[cfg(all(feature = "rt-multi-thread", not(tokio_wasi)))]
fn deterministic_select_multi_thread() {
let seed = b"bytes used to generate seed";
let rt1 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values = rt1.block_on(async {
let _ = tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) }).await;
});
let rt2 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values = rt2.block_on(async {
let _ = tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) }).await;
});
assert_eq!(rt1_values, rt2_values);
}
async fn select_0_to_9() -> u32 {
tokio::select!(
x = async { 0 } => x,
x = async { 1 } => x,
x = async { 2 } => x,
x = async { 3 } => x,
x = async { 4 } => x,
x = async { 5 } => x,
x = async { 6 } => x,
x = async { 7 } => x,
x = async { 8 } => x,
x = async { 9 } => x,
)
}
}

88
vendor/tokio/tests/macros_test.rs vendored Normal file
View file

@ -0,0 +1,88 @@
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threading
use tokio::test;
#[test]
async fn test_macro_can_be_used_via_use() {
tokio::spawn(async {}).await.unwrap();
}
#[tokio::test]
async fn test_macro_is_resilient_to_shadowing() {
tokio::spawn(async {}).await.unwrap();
}
// https://github.com/tokio-rs/tokio/issues/3403
#[rustfmt::skip] // this `rustfmt::skip` is necessary because unused_braces does not warn if the block contains newline.
#[tokio::main]
pub async fn unused_braces_main() { println!("hello") }
#[rustfmt::skip] // this `rustfmt::skip` is necessary because unused_braces does not warn if the block contains newline.
#[tokio::test]
async fn unused_braces_test() { assert_eq!(1 + 1, 2) }
// https://github.com/tokio-rs/tokio/pull/3766#issuecomment-835508651
#[std::prelude::v1::test]
fn trait_method() {
trait A {
fn f(self);
}
impl A for () {
#[tokio::main]
async fn f(self) {}
}
().f()
}
// https://github.com/tokio-rs/tokio/issues/4175
#[tokio::main]
pub async fn issue_4175_main_1() -> ! {
panic!();
}
#[tokio::main]
pub async fn issue_4175_main_2() -> std::io::Result<()> {
panic!();
}
#[allow(unreachable_code)]
#[tokio::test]
pub async fn issue_4175_test() -> std::io::Result<()> {
return Ok(());
panic!();
}
// https://github.com/tokio-rs/tokio/issues/4175
#[allow(clippy::let_unit_value)]
pub mod clippy_semicolon_if_nothing_returned {
#![deny(clippy::semicolon_if_nothing_returned)]
#[tokio::main]
pub async fn local() {
let _x = ();
}
#[tokio::main]
pub async fn item() {
fn _f() {}
}
#[tokio::main]
pub async fn semi() {
panic!();
}
#[tokio::main]
pub async fn empty() {
// To trigger clippy::semicolon_if_nothing_returned lint, the block needs to contain newline.
}
}
// https://github.com/tokio-rs/tokio/issues/5243
pub mod issue_5243 {
macro_rules! mac {
(async fn $name:ident() $b:block) => {
#[::tokio::test]
async fn $name() {
$b
}
};
}
mac!(
async fn foo() {}
);
}

185
vendor/tokio/tests/macros_try_join.rs vendored Normal file
View file

@ -0,0 +1,185 @@
#![cfg(feature = "macros")]
#![allow(clippy::blacklisted_name)]
use std::sync::Arc;
use tokio::sync::{oneshot, Semaphore};
use tokio_test::{assert_pending, assert_ready, task};
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
#[maybe_tokio_test]
async fn sync_one_lit_expr_comma() {
let foo = tokio::try_join!(async { ok(1) },);
assert_eq!(foo, Ok((1,)));
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_no_comma() {
let foo = tokio::try_join!(async { ok(1) });
assert_eq!(foo, Ok((1,)));
}
#[maybe_tokio_test]
async fn sync_two_lit_expr_comma() {
let foo = tokio::try_join!(async { ok(1) }, async { ok(2) },);
assert_eq!(foo, Ok((1, 2)));
}
#[maybe_tokio_test]
async fn sync_two_lit_expr_no_comma() {
let foo = tokio::try_join!(async { ok(1) }, async { ok(2) });
assert_eq!(foo, Ok((1, 2)));
}
#[maybe_tokio_test]
async fn two_await() {
let (tx1, rx1) = oneshot::channel::<&str>();
let (tx2, rx2) = oneshot::channel::<u32>();
let mut join =
task::spawn(async { tokio::try_join!(async { rx1.await }, async { rx2.await }) });
assert_pending!(join.poll());
tx2.send(123).unwrap();
assert!(join.is_woken());
assert_pending!(join.poll());
tx1.send("hello").unwrap();
assert!(join.is_woken());
let res: Result<(&str, u32), _> = assert_ready!(join.poll());
assert_eq!(Ok(("hello", 123)), res);
}
#[maybe_tokio_test]
async fn err_abort_early() {
let (tx1, rx1) = oneshot::channel::<&str>();
let (tx2, rx2) = oneshot::channel::<u32>();
let (_tx3, rx3) = oneshot::channel::<u32>();
let mut join = task::spawn(async {
tokio::try_join!(async { rx1.await }, async { rx2.await }, async {
rx3.await
})
});
assert_pending!(join.poll());
tx2.send(123).unwrap();
assert!(join.is_woken());
assert_pending!(join.poll());
drop(tx1);
assert!(join.is_woken());
let res = assert_ready!(join.poll());
assert!(res.is_err());
}
#[test]
#[cfg(target_pointer_width = "64")]
fn join_size() {
use futures::future;
use std::mem;
let fut = async {
let ready = future::ready(ok(0i32));
tokio::try_join!(ready)
};
assert_eq!(mem::size_of_val(&fut), 32);
let fut = async {
let ready1 = future::ready(ok(0i32));
let ready2 = future::ready(ok(0i32));
tokio::try_join!(ready1, ready2)
};
assert_eq!(mem::size_of_val(&fut), 48);
}
fn ok<T>(val: T) -> Result<T, ()> {
Ok(val)
}
async fn non_cooperative_task(permits: Arc<Semaphore>) -> Result<usize, String> {
let mut exceeded_budget = 0;
for _ in 0..5 {
// Another task should run after this task uses its whole budget
for _ in 0..128 {
let _permit = permits.clone().acquire_owned().await.unwrap();
}
exceeded_budget += 1;
}
Ok(exceeded_budget)
}
async fn poor_little_task(permits: Arc<Semaphore>) -> Result<usize, String> {
let mut how_many_times_i_got_to_run = 0;
for _ in 0..5 {
let _permit = permits.clone().acquire_owned().await.unwrap();
how_many_times_i_got_to_run += 1;
}
Ok(how_many_times_i_got_to_run)
}
#[tokio::test]
async fn try_join_does_not_allow_tasks_to_starve() {
let permits = Arc::new(Semaphore::new(10));
// non_cooperative_task should yield after its budget is exceeded and then poor_little_task should run.
let result = tokio::try_join!(
non_cooperative_task(Arc::clone(&permits)),
poor_little_task(permits)
);
let (non_cooperative_result, little_task_result) = result.unwrap();
assert_eq!(5, non_cooperative_result);
assert_eq!(5, little_task_result);
}
#[tokio::test]
async fn a_different_future_is_polled_first_every_time_poll_fn_is_polled() {
let poll_order = Arc::new(std::sync::Mutex::new(vec![]));
let fut = |x, poll_order: Arc<std::sync::Mutex<Vec<i32>>>| async move {
for _ in 0..4 {
{
let mut guard = poll_order.lock().unwrap();
guard.push(x);
}
tokio::task::yield_now().await;
}
};
tokio::join!(
fut(1, Arc::clone(&poll_order)),
fut(2, Arc::clone(&poll_order)),
fut(3, Arc::clone(&poll_order)),
);
// Each time the future created by join! is polled, it should start
// by polling a different future first.
assert_eq!(
vec![1, 2, 3, 2, 3, 1, 3, 1, 2, 1, 2, 3],
*poll_order.lock().unwrap()
);
}

14
vendor/tokio/tests/net_bind_resource.rs vendored Normal file
View file

@ -0,0 +1,14 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support panic recovery or bind
use tokio::net::TcpListener;
use std::convert::TryFrom;
use std::net;
#[test]
#[should_panic]
fn no_runtime_panics_binding_net_tcp_listener() {
let listener = net::TcpListener::bind("127.0.0.1:0").expect("failed to bind listener");
let _ = TcpListener::try_from(listener);
}

38
vendor/tokio/tests/net_lookup_host.rs vendored Normal file
View file

@ -0,0 +1,38 @@
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support direct socket operations
use tokio::net;
use tokio_test::assert_ok;
use std::io;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
#[tokio::test]
async fn lookup_socket_addr() {
let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let actual = assert_ok!(net::lookup_host(addr).await).collect::<Vec<_>>();
assert_eq!(vec![addr], actual);
}
#[tokio::test]
async fn lookup_str_socket_addr() {
let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let actual = assert_ok!(net::lookup_host("127.0.0.1:8000").await).collect::<Vec<_>>();
assert_eq!(vec![addr], actual);
}
#[tokio::test]
async fn resolve_dns() -> io::Result<()> {
let mut hosts = net::lookup_host("localhost:3000").await?;
let host = hosts.next().unwrap();
let expected = if host.is_ipv4() {
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000)
} else {
SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 3000)
};
assert_eq!(host, expected);
Ok(())
}

419
vendor/tokio/tests/net_named_pipe.rs vendored Normal file
View file

@ -0,0 +1,419 @@
#![cfg(feature = "full")]
#![cfg(all(windows))]
use std::io;
use std::mem;
use std::os::windows::io::AsRawHandle;
use std::time::Duration;
use tokio::io::AsyncWriteExt;
use tokio::net::windows::named_pipe::{ClientOptions, PipeMode, ServerOptions};
use tokio::time;
use windows_sys::Win32::Foundation::{ERROR_NO_DATA, ERROR_PIPE_BUSY, NO_ERROR, UNICODE_STRING};
#[tokio::test]
async fn test_named_pipe_client_drop() -> io::Result<()> {
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-client-drop";
let mut server = ServerOptions::new().create(PIPE_NAME)?;
assert_eq!(num_instances("test-named-pipe-client-drop")?, 1);
let client = ClientOptions::new().open(PIPE_NAME)?;
server.connect().await?;
drop(client);
// instance will be broken because client is gone
match server.write_all(b"ping").await {
Err(e) if e.raw_os_error() == Some(ERROR_NO_DATA as i32) => (),
x => panic!("{:?}", x),
}
Ok(())
}
#[tokio::test]
async fn test_named_pipe_single_client() -> io::Result<()> {
use tokio::io::{AsyncBufReadExt as _, BufReader};
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-single-client";
let server = ServerOptions::new().create(PIPE_NAME)?;
let server = tokio::spawn(async move {
// Note: we wait for a client to connect.
server.connect().await?;
let mut server = BufReader::new(server);
let mut buf = String::new();
server.read_line(&mut buf).await?;
server.write_all(b"pong\n").await?;
Ok::<_, io::Error>(buf)
});
let client = tokio::spawn(async move {
let client = ClientOptions::new().open(PIPE_NAME)?;
let mut client = BufReader::new(client);
let mut buf = String::new();
client.write_all(b"ping\n").await?;
client.read_line(&mut buf).await?;
Ok::<_, io::Error>(buf)
});
let (server, client) = tokio::try_join!(server, client)?;
assert_eq!(server?, "ping\n");
assert_eq!(client?, "pong\n");
Ok(())
}
#[tokio::test]
async fn test_named_pipe_multi_client() -> io::Result<()> {
use tokio::io::{AsyncBufReadExt as _, BufReader};
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-multi-client";
const N: usize = 10;
// The first server needs to be constructed early so that clients can
// be correctly connected. Otherwise calling .wait will cause the client to
// error.
let mut server = ServerOptions::new().create(PIPE_NAME)?;
let server = tokio::spawn(async move {
for _ in 0..N {
// Wait for client to connect.
server.connect().await?;
let mut inner = BufReader::new(server);
// Construct the next server to be connected before sending the one
// we already have of onto a task. This ensures that the server
// isn't closed (after it's done in the task) before a new one is
// available. Otherwise the client might error with
// `io::ErrorKind::NotFound`.
server = ServerOptions::new().create(PIPE_NAME)?;
let _ = tokio::spawn(async move {
let mut buf = String::new();
inner.read_line(&mut buf).await?;
inner.write_all(b"pong\n").await?;
inner.flush().await?;
Ok::<_, io::Error>(())
});
}
Ok::<_, io::Error>(())
});
let mut clients = Vec::new();
for _ in 0..N {
clients.push(tokio::spawn(async move {
// This showcases a generic connect loop.
//
// We immediately try to create a client, if it's not found or the
// pipe is busy we use the specialized wait function on the client
// builder.
let client = loop {
match ClientOptions::new().open(PIPE_NAME) {
Ok(client) => break client,
Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (),
Err(e) if e.kind() == io::ErrorKind::NotFound => (),
Err(e) => return Err(e),
}
// Wait for a named pipe to become available.
time::sleep(Duration::from_millis(10)).await;
};
let mut client = BufReader::new(client);
let mut buf = String::new();
client.write_all(b"ping\n").await?;
client.flush().await?;
client.read_line(&mut buf).await?;
Ok::<_, io::Error>(buf)
}));
}
for client in clients {
let result = client.await?;
assert_eq!(result?, "pong\n");
}
server.await??;
Ok(())
}
#[tokio::test]
async fn test_named_pipe_multi_client_ready() -> io::Result<()> {
use tokio::io::Interest;
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-multi-client-ready";
const N: usize = 10;
// The first server needs to be constructed early so that clients can
// be correctly connected. Otherwise calling .wait will cause the client to
// error.
let mut server = ServerOptions::new().create(PIPE_NAME)?;
let server = tokio::spawn(async move {
for _ in 0..N {
// Wait for client to connect.
server.connect().await?;
let inner_server = server;
// Construct the next server to be connected before sending the one
// we already have of onto a task. This ensures that the server
// isn't closed (after it's done in the task) before a new one is
// available. Otherwise the client might error with
// `io::ErrorKind::NotFound`.
server = ServerOptions::new().create(PIPE_NAME)?;
let _ = tokio::spawn(async move {
let server = inner_server;
{
let mut read_buf = [0u8; 5];
let mut read_buf_cursor = 0;
loop {
server.readable().await?;
let buf = &mut read_buf[read_buf_cursor..];
match server.try_read(buf) {
Ok(n) => {
read_buf_cursor += n;
if read_buf_cursor == read_buf.len() {
break;
}
}
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
return Err(e);
}
}
}
};
{
let write_buf = b"pong\n";
let mut write_buf_cursor = 0;
loop {
server.writable().await?;
let buf = &write_buf[write_buf_cursor..];
match server.try_write(buf) {
Ok(n) => {
write_buf_cursor += n;
if write_buf_cursor == write_buf.len() {
break;
}
}
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
return Err(e);
}
}
}
}
Ok::<_, io::Error>(())
});
}
Ok::<_, io::Error>(())
});
let mut clients = Vec::new();
for _ in 0..N {
clients.push(tokio::spawn(async move {
// This showcases a generic connect loop.
//
// We immediately try to create a client, if it's not found or the
// pipe is busy we use the specialized wait function on the client
// builder.
let client = loop {
match ClientOptions::new().open(PIPE_NAME) {
Ok(client) => break client,
Err(e) if e.raw_os_error() == Some(ERROR_PIPE_BUSY as i32) => (),
Err(e) if e.kind() == io::ErrorKind::NotFound => (),
Err(e) => return Err(e),
}
// Wait for a named pipe to become available.
time::sleep(Duration::from_millis(10)).await;
};
let mut read_buf = [0u8; 5];
let mut read_buf_cursor = 0;
let write_buf = b"ping\n";
let mut write_buf_cursor = 0;
loop {
let mut interest = Interest::READABLE;
if write_buf_cursor < write_buf.len() {
interest |= Interest::WRITABLE;
}
let ready = client.ready(interest).await?;
if ready.is_readable() {
let buf = &mut read_buf[read_buf_cursor..];
match client.try_read(buf) {
Ok(n) => {
read_buf_cursor += n;
if read_buf_cursor == read_buf.len() {
break;
}
}
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
return Err(e);
}
}
}
if ready.is_writable() {
let buf = &write_buf[write_buf_cursor..];
if buf.is_empty() {
continue;
}
match client.try_write(buf) {
Ok(n) => {
write_buf_cursor += n;
}
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
return Err(e);
}
}
}
}
let buf = String::from_utf8_lossy(&read_buf).into_owned();
Ok::<_, io::Error>(buf)
}));
}
for client in clients {
let result = client.await?;
assert_eq!(result?, "pong\n");
}
server.await??;
Ok(())
}
// This tests what happens when a client tries to disconnect.
#[tokio::test]
async fn test_named_pipe_mode_message() -> io::Result<()> {
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-mode-message";
let server = ServerOptions::new()
.pipe_mode(PipeMode::Message)
.create(PIPE_NAME)?;
let _ = ClientOptions::new().open(PIPE_NAME)?;
server.connect().await?;
Ok(())
}
// This tests `NamedPipeServer::connect` with various access settings.
#[tokio::test]
async fn test_named_pipe_access() -> io::Result<()> {
const PIPE_NAME: &str = r"\\.\pipe\test-named-pipe-access";
for (inb, outb) in [(true, true), (true, false), (false, true)] {
let (tx, rx) = tokio::sync::oneshot::channel();
let server = tokio::spawn(async move {
let s = ServerOptions::new()
.access_inbound(inb)
.access_outbound(outb)
.create(PIPE_NAME)?;
let mut connect_fut = tokio_test::task::spawn(s.connect());
assert!(connect_fut.poll().is_pending());
tx.send(()).unwrap();
connect_fut.await
});
// Wait for the server to call connect.
rx.await.unwrap();
let _ = ClientOptions::new().read(outb).write(inb).open(PIPE_NAME)?;
server.await??;
}
Ok(())
}
fn num_instances(pipe_name: impl AsRef<str>) -> io::Result<u32> {
use ntapi::ntioapi;
let mut name = pipe_name.as_ref().encode_utf16().collect::<Vec<_>>();
let mut name = UNICODE_STRING {
Length: (name.len() * mem::size_of::<u16>()) as u16,
MaximumLength: (name.len() * mem::size_of::<u16>()) as u16,
Buffer: name.as_mut_ptr(),
};
let root = std::fs::File::open(r"\\.\Pipe\")?;
let mut io_status_block = unsafe { mem::zeroed() };
let mut file_directory_information = [0_u8; 1024];
let status = unsafe {
ntioapi::NtQueryDirectoryFile(
root.as_raw_handle(),
std::ptr::null_mut(),
None,
std::ptr::null_mut(),
&mut io_status_block,
&mut file_directory_information as *mut _ as *mut _,
1024,
ntioapi::FileDirectoryInformation,
0,
&mut name as *mut _ as _,
0,
)
};
if status as u32 != NO_ERROR {
return Err(io::Error::last_os_error());
}
let info = unsafe {
mem::transmute::<_, &ntioapi::FILE_DIRECTORY_INFORMATION>(&file_directory_information)
};
let raw_name = unsafe {
std::slice::from_raw_parts(
info.FileName.as_ptr(),
info.FileNameLength as usize / mem::size_of::<u16>(),
)
};
let name = String::from_utf16(raw_name).unwrap();
let num_instances = unsafe { *info.EndOfFile.QuadPart() };
assert_eq!(name, pipe_name.as_ref());
Ok(num_instances as u32)
}

188
vendor/tokio/tests/net_panic.rs vendored Normal file
View file

@ -0,0 +1,188 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use std::error::Error;
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime::{Builder, Runtime};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn udp_socket_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
use std::net::SocketAddr;
use tokio::net::UdpSocket;
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let std_sock = std::net::UdpSocket::bind(addr).unwrap();
std_sock.set_nonblocking(true).unwrap();
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _sock = UdpSocket::from_std(std_sock);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn tcp_listener_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
let std_listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
std_listener.set_nonblocking(true).unwrap();
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _ = TcpListener::from_std(std_listener);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn tcp_stream_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
let std_listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let std_stream = std::net::TcpStream::connect(std_listener.local_addr().unwrap()).unwrap();
std_stream.set_nonblocking(true).unwrap();
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _ = TcpStream::from_std(std_stream);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn unix_listener_bind_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::net::UnixListener;
let dir = tempfile::tempdir().unwrap();
let sock_path = dir.path().join("socket");
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _ = UnixListener::bind(&sock_path);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn unix_listener_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::net::UnixListener;
let dir = tempfile::tempdir().unwrap();
let sock_path = dir.path().join("socket");
let std_listener = std::os::unix::net::UnixListener::bind(&sock_path).unwrap();
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _ = UnixListener::from_std(std_listener);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn unix_stream_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::net::UnixStream;
let dir = tempfile::tempdir().unwrap();
let sock_path = dir.path().join("socket");
let _std_listener = std::os::unix::net::UnixListener::bind(&sock_path).unwrap();
let std_stream = std::os::unix::net::UnixStream::connect(&sock_path).unwrap();
let panic_location_file = test_panic(|| {
let rt = runtime_without_io();
rt.block_on(async {
let _ = UnixStream::from_std(std_stream);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(unix)]
fn unix_datagram_from_std_panic_caller() -> Result<(), Box<dyn Error>> {
use std::os::unix::net::UnixDatagram as StdUDS;
use tokio::net::UnixDatagram;
let dir = tempfile::tempdir().unwrap();
let sock_path = dir.path().join("socket");
// Bind the socket to a filesystem path
// /let socket_path = tmp.path().join("socket");
let std_socket = StdUDS::bind(&sock_path).unwrap();
std_socket.set_nonblocking(true).unwrap();
let panic_location_file = test_panic(move || {
let rt = runtime_without_io();
rt.block_on(async {
let _ = UnixDatagram::from_std(std_socket);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
#[cfg(windows)]
fn server_options_max_instances_panic_caller() -> Result<(), Box<dyn Error>> {
use tokio::net::windows::named_pipe::ServerOptions;
let panic_location_file = test_panic(move || {
let rt = runtime_without_io();
rt.block_on(async {
let mut options = ServerOptions::new();
options.max_instances(255);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
// Runtime without `enable_io` so it has no IO driver set.
fn runtime_without_io() -> Runtime {
Builder::new_current_thread().build().unwrap()
}

41
vendor/tokio/tests/no_rt.rs vendored Normal file
View file

@ -0,0 +1,41 @@
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi does not support panic recovery
use tokio::net::TcpStream;
use tokio::sync::oneshot;
use tokio::time::{timeout, Duration};
use futures::executor::block_on;
use std::net::TcpListener;
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
fn timeout_panics_when_no_tokio_context() {
block_on(timeout_value());
}
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
fn panics_when_no_reactor() {
let srv = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = srv.local_addr().unwrap();
block_on(TcpStream::connect(&addr)).unwrap();
}
async fn timeout_value() {
let (_tx, rx) = oneshot::channel::<()>();
let dur = Duration::from_millis(10);
let _ = timeout(dur, rx).await;
}
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
fn io_panics_when_no_tokio_context() {
let _ = tokio::net::TcpListener::from_std(std::net::TcpListener::bind("127.0.0.1:0").unwrap());
}

13
vendor/tokio/tests/process_arg0.rs vendored Normal file
View file

@ -0,0 +1,13 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", unix))]
use tokio::process::Command;
#[tokio::test]
async fn arg0() {
let mut cmd = Command::new("sh");
cmd.arg0("test_string").arg("-c").arg("echo $0");
let output = cmd.output().await.unwrap();
assert_eq!(output.stdout, b"test_string\n");
}

View file

@ -0,0 +1,45 @@
#![cfg(feature = "process")]
#![warn(rust_2018_idioms)]
// This test reveals a difference in behavior of kqueue on FreeBSD. When the
// reader disconnects, there does not seem to be an `EVFILT_WRITE` filter that
// is returned.
//
// It is expected that `EVFILT_WRITE` would be returned with either the
// `EV_EOF` or `EV_ERROR` flag set. If either flag is set a write would be
// attempted, but that does not seem to occur.
#![cfg(all(unix, not(target_os = "freebsd")))]
use std::process::Stdio;
use std::time::Duration;
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use tokio::time;
use tokio_test::assert_err;
#[tokio::test]
async fn issue_2174() {
let mut child = Command::new("sleep")
.arg("2")
.stdin(Stdio::piped())
.stdout(Stdio::null())
.spawn()
.unwrap();
let mut input = child.stdin.take().unwrap();
// Writes will buffer up to 65_636. This *should* loop at least 8 times
// and then register interest.
let handle = tokio::spawn(async move {
let data = [0u8; 8192];
loop {
input.write_all(&data).await.unwrap();
}
});
// Sleep enough time so that the child process's stdin's buffer fills.
time::sleep(Duration::from_secs(1)).await;
// Kill the child process.
child.kill().await.unwrap();
assert_err!(handle.await);
}

38
vendor/tokio/tests/process_issue_42.rs vendored Normal file
View file

@ -0,0 +1,38 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
use futures::future::join_all;
use std::process::Stdio;
use tokio::process::Command;
use tokio::task;
#[tokio::test]
async fn issue_42() {
// We spawn a many batches of processes which should exit at roughly the
// same time (modulo OS scheduling delays), to make sure that consuming
// a readiness event for one process doesn't inadvertently starve another.
// We then do this many times (in parallel) in an effort to stress test the
// implementation to ensure there are no race conditions.
// See alexcrichton/tokio-process#42 for background
let join_handles = (0..10usize).map(|_| {
task::spawn(async {
let processes = (0..10usize).map(|i| {
let mut child = Command::new("echo")
.arg(format!("I am spawned process #{}", i))
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.kill_on_drop(true)
.spawn()
.unwrap();
async move { child.wait().await }
});
join_all(processes).await;
})
});
join_all(join_handles).await;
}

View file

@ -0,0 +1,44 @@
#![cfg(all(unix, feature = "process"))]
#![warn(rust_2018_idioms)]
use std::io::ErrorKind;
use std::process::Stdio;
use std::time::Duration;
use tokio::io::AsyncReadExt;
use tokio::process::Command;
use tokio::time::sleep;
use tokio_test::assert_ok;
#[tokio::test]
async fn kill_on_drop() {
let mut cmd = Command::new("bash");
cmd.args(&[
"-c",
"
# Fork another child that won't get killed
sh -c 'sleep 1; echo child ran' &
disown -a
# Await our death
sleep 5
echo hello from beyond the grave
",
]);
let e = cmd.kill_on_drop(true).stdout(Stdio::piped()).spawn();
if e.is_err() && e.as_ref().unwrap_err().kind() == ErrorKind::NotFound {
println!("bash not available; skipping test");
return;
}
let mut child = e.unwrap();
sleep(Duration::from_secs(2)).await;
let mut out = child.stdout.take().unwrap();
drop(child);
let mut msg = String::new();
assert_ok!(out.read_to_string(&mut msg).await);
assert_eq!("child ran\n", msg);
}

View file

@ -0,0 +1,23 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(windows)]
use tokio::process::Command;
use windows_sys::Win32::System::Threading::GetProcessId;
#[tokio::test]
async fn obtain_raw_handle() {
let mut cmd = Command::new("cmd");
cmd.kill_on_drop(true);
cmd.arg("/c");
cmd.arg("pause");
let child = cmd.spawn().unwrap();
let orig_id = child.id().expect("missing id");
assert!(orig_id > 0);
let handle = child.raw_handle().expect("process stopped");
let handled_id = unsafe { GetProcessId(handle as _) };
assert_eq!(handled_id, orig_id);
}

34
vendor/tokio/tests/process_smoke.rs vendored Normal file
View file

@ -0,0 +1,34 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi cannot run system commands
use tokio::process::Command;
use tokio_test::assert_ok;
#[tokio::test]
async fn simple() {
let mut cmd;
if cfg!(windows) {
cmd = Command::new("cmd");
cmd.arg("/c");
} else {
cmd = Command::new("sh");
cmd.arg("-c");
}
let mut child = cmd.arg("exit 2").spawn().unwrap();
let id = child.id().expect("missing id");
assert!(id > 0);
let status = assert_ok!(child.wait().await);
assert_eq!(status.code(), Some(2));
// test that the `.wait()` method is fused just like the stdlib
let status = assert_ok!(child.wait().await);
assert_eq!(status.code(), Some(2));
// Can't get id after process has exited
assert_eq!(child.id(), None);
drop(child.kill());
}

448
vendor/tokio/tests/rt_basic.rs vendored Normal file
View file

@ -0,0 +1,448 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::runtime::Runtime;
use tokio::sync::oneshot;
use tokio::time::{timeout, Duration};
use tokio_test::{assert_err, assert_ok};
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::task::{Context, Poll};
use std::thread;
mod support {
pub(crate) mod mpsc_stream;
}
macro_rules! cfg_metrics {
($($t:tt)*) => {
#[cfg(tokio_unstable)]
{
$( $t )*
}
}
}
#[test]
fn spawned_task_does_not_progress_without_block_on() {
let (tx, mut rx) = oneshot::channel();
let rt = rt();
rt.spawn(async move {
assert_ok!(tx.send("hello"));
});
thread::sleep(Duration::from_millis(50));
assert_err!(rx.try_recv());
let out = rt.block_on(async { assert_ok!(rx.await) });
assert_eq!(out, "hello");
}
#[test]
fn no_extra_poll() {
use pin_project_lite::pin_project;
use std::pin::Pin;
use std::sync::{
atomic::{AtomicUsize, Ordering::SeqCst},
Arc,
};
use std::task::{Context, Poll};
use tokio_stream::{Stream, StreamExt};
pin_project! {
struct TrackPolls<S> {
npolls: Arc<AtomicUsize>,
#[pin]
s: S,
}
}
impl<S> Stream for TrackPolls<S>
where
S: Stream,
{
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
this.npolls.fetch_add(1, SeqCst);
this.s.poll_next(cx)
}
}
let (tx, rx) = support::mpsc_stream::unbounded_channel_stream::<()>();
let rx = TrackPolls {
npolls: Arc::new(AtomicUsize::new(0)),
s: rx,
};
let npolls = Arc::clone(&rx.npolls);
let rt = rt();
// TODO: could probably avoid this, but why not.
let mut rx = Box::pin(rx);
rt.spawn(async move { while rx.next().await.is_some() {} });
rt.block_on(async {
tokio::task::yield_now().await;
});
// should have been polled exactly once: the initial poll
assert_eq!(npolls.load(SeqCst), 1);
tx.send(()).unwrap();
rt.block_on(async {
tokio::task::yield_now().await;
});
// should have been polled twice more: once to yield Some(), then once to yield Pending
assert_eq!(npolls.load(SeqCst), 1 + 2);
drop(tx);
rt.block_on(async {
tokio::task::yield_now().await;
});
// should have been polled once more: to yield None
assert_eq!(npolls.load(SeqCst), 1 + 2 + 1);
}
#[test]
fn acquire_mutex_in_drop() {
use futures::future::pending;
use tokio::task;
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
let rt = rt();
rt.spawn(async move {
let _ = rx2.await;
unreachable!();
});
rt.spawn(async move {
let _ = rx1.await;
tx2.send(()).unwrap();
unreachable!();
});
// Spawn a task that will never notify
rt.spawn(async move {
pending::<()>().await;
tx1.send(()).unwrap();
});
// Tick the loop
rt.block_on(async {
task::yield_now().await;
});
// Drop the rt
drop(rt);
}
#[test]
fn drop_tasks_in_context() {
static SUCCESS: AtomicBool = AtomicBool::new(false);
struct ContextOnDrop;
impl Future for ContextOnDrop {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for ContextOnDrop {
fn drop(&mut self) {
if tokio::runtime::Handle::try_current().is_ok() {
SUCCESS.store(true, Ordering::SeqCst);
}
}
}
let rt = rt();
rt.spawn(ContextOnDrop);
drop(rt);
assert!(SUCCESS.load(Ordering::SeqCst));
}
#[test]
#[cfg_attr(tokio_wasi, ignore = "Wasi does not support panic recovery")]
#[should_panic(expected = "boom")]
fn wake_in_drop_after_panic() {
let (tx, rx) = oneshot::channel::<()>();
struct WakeOnDrop(Option<oneshot::Sender<()>>);
impl Drop for WakeOnDrop {
fn drop(&mut self) {
self.0.take().unwrap().send(()).unwrap();
}
}
let rt = rt();
rt.spawn(async move {
let _wake_on_drop = WakeOnDrop(Some(tx));
// wait forever
futures::future::pending::<()>().await;
});
let _join = rt.spawn(async move { rx.await });
rt.block_on(async {
tokio::task::yield_now().await;
panic!("boom");
});
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(0, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(2, local);
}
}
#[cfg_attr(tokio_wasi, ignore = "WASI: std::thread::spawn not supported")]
#[test]
fn spawn_remote() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(async move {
std::thread::spawn(move || {
std::thread::sleep(Duration::from_millis(10));
tx.send("ZOMG").unwrap();
});
rx.await.unwrap()
});
handle.await.unwrap()
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(1, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(1, local);
}
}
#[test]
#[cfg_attr(tokio_wasi, ignore = "Wasi does not support panic recovery")]
#[should_panic(
expected = "A Tokio 1.x context was found, but timers are disabled. Call `enable_time` on the runtime builder to enable timers."
)]
fn timeout_panics_when_no_time_handle() {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {
let (_tx, rx) = oneshot::channel::<()>();
let dur = Duration::from_millis(20);
let _ = timeout(dur, rx).await;
});
}
#[cfg(tokio_unstable)]
mod unstable {
use tokio::runtime::{Builder, RngSeed, UnhandledPanic};
#[test]
#[should_panic(
expected = "a spawned task panicked and the runtime is configured to shut down on unhandled panic"
)]
fn shutdown_on_panic() {
let rt = Builder::new_current_thread()
.unhandled_panic(UnhandledPanic::ShutdownRuntime)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async {
panic!("boom");
});
futures::future::pending::<()>().await;
})
}
#[test]
#[cfg_attr(tokio_wasi, ignore = "Wasi does not support panic recovery")]
fn spawns_do_nothing() {
use std::sync::Arc;
let rt = Builder::new_current_thread()
.unhandled_panic(UnhandledPanic::ShutdownRuntime)
.build()
.unwrap();
let rt1 = Arc::new(rt);
let rt2 = rt1.clone();
let _ = std::thread::spawn(move || {
rt2.block_on(async {
tokio::spawn(async {
panic!("boom");
});
futures::future::pending::<()>().await;
})
})
.join();
let task = rt1.spawn(async {});
let res = futures::executor::block_on(task);
assert!(res.is_err());
}
#[test]
#[cfg_attr(tokio_wasi, ignore = "Wasi does not support panic recovery")]
fn shutdown_all_concurrent_block_on() {
const N: usize = 2;
use std::sync::{mpsc, Arc};
let rt = Builder::new_current_thread()
.unhandled_panic(UnhandledPanic::ShutdownRuntime)
.build()
.unwrap();
let rt = Arc::new(rt);
let mut ths = vec![];
let (tx, rx) = mpsc::channel();
for _ in 0..N {
let rt = rt.clone();
let tx = tx.clone();
ths.push(std::thread::spawn(move || {
rt.block_on(async {
tx.send(()).unwrap();
futures::future::pending::<()>().await;
});
}));
}
for _ in 0..N {
rx.recv().unwrap();
}
rt.spawn(async {
panic!("boom");
});
for th in ths {
assert!(th.join().is_err());
}
}
#[test]
fn rng_seed() {
let seed = b"bytes used to generate seed";
let rt1 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values = rt1.block_on(async {
let rand_1 = tokio::macros::support::thread_rng_n(100);
let rand_2 = tokio::macros::support::thread_rng_n(100);
(rand_1, rand_2)
});
let rt2 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values = rt2.block_on(async {
let rand_1 = tokio::macros::support::thread_rng_n(100);
let rand_2 = tokio::macros::support::thread_rng_n(100);
(rand_1, rand_2)
});
assert_eq!(rt1_values, rt2_values);
}
#[test]
fn rng_seed_multi_enter() {
let seed = b"bytes used to generate seed";
fn two_rand_values() -> (u32, u32) {
let rand_1 = tokio::macros::support::thread_rng_n(100);
let rand_2 = tokio::macros::support::thread_rng_n(100);
(rand_1, rand_2)
}
let rt1 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values_1 = rt1.block_on(async { two_rand_values() });
let rt1_values_2 = rt1.block_on(async { two_rand_values() });
let rt2 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values_1 = rt2.block_on(async { two_rand_values() });
let rt2_values_2 = rt2.block_on(async { two_rand_values() });
assert_eq!(rt1_values_1, rt2_values_1);
assert_eq!(rt1_values_2, rt2_values_2);
}
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

1279
vendor/tokio/tests/rt_common.rs vendored Normal file

File diff suppressed because it is too large Load diff

535
vendor/tokio/tests/rt_handle_block_on.rs vendored Normal file
View file

@ -0,0 +1,535 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
// All io tests that deal with shutdown is currently ignored because there are known bugs in with
// shutting down the io driver while concurrently registering new resources. See
// https://github.com/tokio-rs/tokio/pull/3569#pullrequestreview-612703467 fo more details.
//
// When this has been fixed we want to re-enable these tests.
use std::time::Duration;
use tokio::runtime::{Handle, Runtime};
use tokio::sync::mpsc;
#[cfg(not(tokio_wasi))]
use tokio::{net, time};
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
macro_rules! multi_threaded_rt_test {
($($t:tt)*) => {
mod threaded_scheduler_4_threads_only {
use super::*;
$($t)*
fn rt() -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_all()
.build()
.unwrap()
}
}
mod threaded_scheduler_1_thread_only {
use super::*;
$($t)*
fn rt() -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap()
}
}
}
}
#[cfg(not(tokio_wasi))]
macro_rules! rt_test {
($($t:tt)*) => {
mod current_thread_scheduler {
use super::*;
$($t)*
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
}
mod threaded_scheduler_4_threads {
use super::*;
$($t)*
fn rt() -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_all()
.build()
.unwrap()
}
}
mod threaded_scheduler_1_thread {
use super::*;
$($t)*
fn rt() -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap()
}
}
}
}
// ==== runtime independent futures ======
#[test]
fn basic() {
test_with_runtimes(|| {
let one = Handle::current().block_on(async { 1 });
assert_eq!(1, one);
});
}
#[test]
fn bounded_mpsc_channel() {
test_with_runtimes(|| {
let (tx, mut rx) = mpsc::channel(1024);
Handle::current().block_on(tx.send(42)).unwrap();
let value = Handle::current().block_on(rx.recv()).unwrap();
assert_eq!(value, 42);
});
}
#[test]
fn unbounded_mpsc_channel() {
test_with_runtimes(|| {
let (tx, mut rx) = mpsc::unbounded_channel();
let _ = tx.send(42);
let value = Handle::current().block_on(rx.recv()).unwrap();
assert_eq!(value, 42);
})
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support file operations or bind
rt_test! {
use tokio::fs;
// ==== spawn blocking futures ======
#[test]
fn basic_fs() {
let rt = rt();
let _enter = rt.enter();
let contents = Handle::current()
.block_on(fs::read_to_string("Cargo.toml"))
.unwrap();
assert!(contents.contains("https://tokio.rs"));
}
#[test]
fn fs_shutdown_before_started() {
let rt = rt();
let _enter = rt.enter();
rt.shutdown_timeout(Duration::from_secs(1000));
let err: std::io::Error = Handle::current()
.block_on(fs::read_to_string("Cargo.toml"))
.unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
let inner_err = err.get_ref().expect("no inner error");
assert_eq!(inner_err.to_string(), "background task failed");
}
#[test]
fn basic_spawn_blocking() {
use tokio::task::spawn_blocking;
let rt = rt();
let _enter = rt.enter();
let answer = Handle::current()
.block_on(spawn_blocking(|| {
std::thread::sleep(Duration::from_millis(100));
42
}))
.unwrap();
assert_eq!(answer, 42);
}
#[test]
fn spawn_blocking_after_shutdown_fails() {
use tokio::task::spawn_blocking;
let rt = rt();
let _enter = rt.enter();
rt.shutdown_timeout(Duration::from_secs(1000));
let join_err = Handle::current()
.block_on(spawn_blocking(|| {
std::thread::sleep(Duration::from_millis(100));
42
}))
.unwrap_err();
assert!(join_err.is_cancelled());
}
#[test]
fn spawn_blocking_started_before_shutdown_continues() {
use tokio::task::spawn_blocking;
let rt = rt();
let _enter = rt.enter();
let handle = spawn_blocking(|| {
std::thread::sleep(Duration::from_secs(1));
42
});
rt.shutdown_timeout(Duration::from_secs(1000));
let answer = Handle::current().block_on(handle).unwrap();
assert_eq!(answer, 42);
}
// ==== net ======
#[test]
fn tcp_listener_bind() {
let rt = rt();
let _enter = rt.enter();
Handle::current()
.block_on(net::TcpListener::bind("127.0.0.1:0"))
.unwrap();
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[test]
fn tcp_listener_connect_after_shutdown() {
let rt = rt();
let _enter = rt.enter();
rt.shutdown_timeout(Duration::from_secs(1000));
let err = Handle::current()
.block_on(net::TcpListener::bind("127.0.0.1:0"))
.unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(
err.get_ref().unwrap().to_string(),
"A Tokio 1.x context was found, but it is being shutdown.",
);
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[test]
fn tcp_listener_connect_before_shutdown() {
let rt = rt();
let _enter = rt.enter();
let bind_future = net::TcpListener::bind("127.0.0.1:0");
rt.shutdown_timeout(Duration::from_secs(1000));
let err = Handle::current().block_on(bind_future).unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(
err.get_ref().unwrap().to_string(),
"A Tokio 1.x context was found, but it is being shutdown.",
);
}
#[test]
fn udp_socket_bind() {
let rt = rt();
let _enter = rt.enter();
Handle::current()
.block_on(net::UdpSocket::bind("127.0.0.1:0"))
.unwrap();
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[test]
fn udp_stream_bind_after_shutdown() {
let rt = rt();
let _enter = rt.enter();
rt.shutdown_timeout(Duration::from_secs(1000));
let err = Handle::current()
.block_on(net::UdpSocket::bind("127.0.0.1:0"))
.unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(
err.get_ref().unwrap().to_string(),
"A Tokio 1.x context was found, but it is being shutdown.",
);
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[test]
fn udp_stream_bind_before_shutdown() {
let rt = rt();
let _enter = rt.enter();
let bind_future = net::UdpSocket::bind("127.0.0.1:0");
rt.shutdown_timeout(Duration::from_secs(1000));
let err = Handle::current().block_on(bind_future).unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(
err.get_ref().unwrap().to_string(),
"A Tokio 1.x context was found, but it is being shutdown.",
);
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[cfg(unix)]
#[test]
fn unix_listener_bind_after_shutdown() {
let rt = rt();
let _enter = rt.enter();
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("socket");
rt.shutdown_timeout(Duration::from_secs(1000));
let err = net::UnixListener::bind(path).unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(
err.get_ref().unwrap().to_string(),
"A Tokio 1.x context was found, but it is being shutdown.",
);
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[cfg(unix)]
#[test]
fn unix_listener_shutdown_after_bind() {
let rt = rt();
let _enter = rt.enter();
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("socket");
let listener = net::UnixListener::bind(path).unwrap();
rt.shutdown_timeout(Duration::from_secs(1000));
// this should not timeout but fail immediately since the runtime has been shutdown
let err = Handle::current().block_on(listener.accept()).unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
}
// All io tests are ignored for now. See above why that is.
#[ignore]
#[cfg(unix)]
#[test]
fn unix_listener_shutdown_after_accept() {
let rt = rt();
let _enter = rt.enter();
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("socket");
let listener = net::UnixListener::bind(path).unwrap();
let accept_future = listener.accept();
rt.shutdown_timeout(Duration::from_secs(1000));
// this should not timeout but fail immediately since the runtime has been shutdown
let err = Handle::current().block_on(accept_future).unwrap_err();
assert_eq!(err.kind(), std::io::ErrorKind::Other);
assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
}
// ==== nesting ======
#[test]
#[should_panic(
expected = "Cannot start a runtime from within a runtime. This happens because a function (like `block_on`) attempted to block the current thread while the thread is being used to drive asynchronous tasks."
)]
fn nesting() {
fn some_non_async_function() -> i32 {
Handle::current().block_on(time::sleep(Duration::from_millis(10)));
1
}
let rt = rt();
rt.block_on(async { some_non_async_function() });
}
#[test]
fn spawn_after_runtime_dropped() {
use futures::future::FutureExt;
let rt = rt();
let handle = rt.block_on(async move {
Handle::current()
});
let jh1 = handle.spawn(futures::future::pending::<()>());
drop(rt);
let jh2 = handle.spawn(futures::future::pending::<()>());
let err1 = jh1.now_or_never().unwrap().unwrap_err();
let err2 = jh2.now_or_never().unwrap().unwrap_err();
assert!(err1.is_cancelled());
assert!(err2.is_cancelled());
}
}
#[cfg(not(tokio_wasi))]
multi_threaded_rt_test! {
#[cfg(unix)]
#[test]
fn unix_listener_bind() {
let rt = rt();
let _enter = rt.enter();
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("socket");
let listener = net::UnixListener::bind(path).unwrap();
// this should timeout and not fail immediately since the runtime has not been shutdown
let _: tokio::time::error::Elapsed = Handle::current()
.block_on(tokio::time::timeout(
Duration::from_millis(10),
listener.accept(),
))
.unwrap_err();
}
// ==== timers ======
// `Handle::block_on` doesn't work with timer futures on a current thread runtime as there is no
// one to drive the timers so they will just hang forever. Therefore they are not tested.
#[test]
fn sleep() {
let rt = rt();
let _enter = rt.enter();
Handle::current().block_on(time::sleep(Duration::from_millis(100)));
}
#[test]
#[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
fn sleep_before_shutdown_panics() {
let rt = rt();
let _enter = rt.enter();
let f = time::sleep(Duration::from_millis(100));
rt.shutdown_timeout(Duration::from_secs(1000));
Handle::current().block_on(f);
}
#[test]
#[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
fn sleep_after_shutdown_panics() {
let rt = rt();
let _enter = rt.enter();
rt.shutdown_timeout(Duration::from_secs(1000));
Handle::current().block_on(time::sleep(Duration::from_millis(100)));
}
}
// ==== utils ======
/// Create a new multi threaded runtime
#[cfg(not(tokio_wasi))]
fn new_multi_thread(n: usize) -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(n)
.enable_all()
.build()
.unwrap()
}
/// Create a new single threaded runtime
fn new_current_thread() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
/// Utility to test things on both kinds of runtimes both before and after shutting it down.
fn test_with_runtimes<F>(f: F)
where
F: Fn(),
{
{
let rt = new_current_thread();
let _enter = rt.enter();
f();
rt.shutdown_timeout(Duration::from_secs(1000));
f();
}
#[cfg(not(tokio_wasi))]
{
let rt = new_multi_thread(1);
let _enter = rt.enter();
f();
rt.shutdown_timeout(Duration::from_secs(1000));
f();
}
#[cfg(not(tokio_wasi))]
{
let rt = new_multi_thread(4);
let _enter = rt.enter();
f();
rt.shutdown_timeout(Duration::from_secs(1000));
f();
}
}

468
vendor/tokio/tests/rt_metrics.rs vendored Normal file
View file

@ -0,0 +1,468 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", tokio_unstable, not(tokio_wasi)))]
use std::sync::{Arc, Mutex};
use tokio::runtime::Runtime;
use tokio::time::{self, Duration};
#[test]
fn num_workers() {
let rt = current_thread();
assert_eq!(1, rt.metrics().num_workers());
let rt = threaded();
assert_eq!(2, rt.metrics().num_workers());
}
#[test]
fn num_blocking_threads() {
let rt = current_thread();
assert_eq!(0, rt.metrics().num_blocking_threads());
let _ = rt.block_on(rt.spawn_blocking(move || {}));
assert_eq!(1, rt.metrics().num_blocking_threads());
}
#[test]
fn num_idle_blocking_threads() {
let rt = current_thread();
assert_eq!(0, rt.metrics().num_idle_blocking_threads());
let _ = rt.block_on(rt.spawn_blocking(move || {}));
rt.block_on(async {
time::sleep(Duration::from_millis(5)).await;
});
assert_eq!(1, rt.metrics().num_idle_blocking_threads());
}
#[test]
fn blocking_queue_depth() {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(1)
.build()
.unwrap();
assert_eq!(0, rt.metrics().blocking_queue_depth());
let ready = Arc::new(Mutex::new(()));
let guard = ready.lock().unwrap();
let ready_cloned = ready.clone();
let wait_until_ready = move || {
let _unused = ready_cloned.lock().unwrap();
};
let h1 = rt.spawn_blocking(wait_until_ready.clone());
let h2 = rt.spawn_blocking(wait_until_ready);
assert!(rt.metrics().blocking_queue_depth() > 0);
drop(guard);
let _ = rt.block_on(h1);
let _ = rt.block_on(h2);
assert_eq!(0, rt.metrics().blocking_queue_depth());
}
#[test]
fn remote_schedule_count() {
use std::thread;
let rt = current_thread();
let handle = rt.handle().clone();
let task = thread::spawn(move || {
handle.spawn(async {
// DO nothing
})
})
.join()
.unwrap();
rt.block_on(task).unwrap();
assert_eq!(1, rt.metrics().remote_schedule_count());
let rt = threaded();
let handle = rt.handle().clone();
let task = thread::spawn(move || {
handle.spawn(async {
// DO nothing
})
})
.join()
.unwrap();
rt.block_on(task).unwrap();
assert_eq!(1, rt.metrics().remote_schedule_count());
}
#[test]
fn worker_park_count() {
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
time::sleep(Duration::from_millis(1)).await;
});
drop(rt);
assert!(1 <= metrics.worker_park_count(0));
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
time::sleep(Duration::from_millis(1)).await;
});
drop(rt);
assert!(1 <= metrics.worker_park_count(0));
assert!(1 <= metrics.worker_park_count(1));
}
#[test]
fn worker_noop_count() {
// There isn't really a great way to generate no-op parks as they happen as
// false-positive events under concurrency.
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
time::sleep(Duration::from_millis(1)).await;
});
drop(rt);
assert!(2 <= metrics.worker_noop_count(0));
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
time::sleep(Duration::from_millis(1)).await;
});
drop(rt);
assert!(1 <= metrics.worker_noop_count(0));
assert!(1 <= metrics.worker_noop_count(1));
}
#[test]
fn worker_steal_count() {
// This metric only applies to the multi-threaded runtime.
//
// We use a blocking channel to backup one worker thread.
use std::sync::mpsc::channel;
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
let (tx, rx) = channel();
// Move to the runtime.
tokio::spawn(async move {
// Spawn the task that sends to the channel
tokio::spawn(async move {
tx.send(()).unwrap();
});
// Spawn a task that bumps the previous task out of the "next
// scheduled" slot.
tokio::spawn(async {});
// Blocking receive on the channel.
rx.recv().unwrap();
})
.await
.unwrap();
});
drop(rt);
let n: u64 = (0..metrics.num_workers())
.map(|i| metrics.worker_steal_count(i))
.sum();
assert_eq!(1, n);
}
#[test]
fn worker_poll_count() {
const N: u64 = 5;
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
for _ in 0..N {
tokio::spawn(async {}).await.unwrap();
}
});
drop(rt);
assert_eq!(N, metrics.worker_poll_count(0));
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
for _ in 0..N {
tokio::spawn(async {}).await.unwrap();
}
});
drop(rt);
// Account for the `block_on` task
let n = (0..metrics.num_workers())
.map(|i| metrics.worker_poll_count(i))
.sum();
assert_eq!(N, n);
}
#[test]
fn worker_total_busy_duration() {
const N: usize = 5;
let zero = Duration::from_millis(0);
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
for _ in 0..N {
tokio::spawn(async {
tokio::task::yield_now().await;
})
.await
.unwrap();
}
});
drop(rt);
assert!(zero < metrics.worker_total_busy_duration(0));
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
for _ in 0..N {
tokio::spawn(async {
tokio::task::yield_now().await;
})
.await
.unwrap();
}
});
drop(rt);
for i in 0..metrics.num_workers() {
assert!(zero < metrics.worker_total_busy_duration(i));
}
}
#[test]
fn worker_local_schedule_count() {
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
tokio::spawn(async {}).await.unwrap();
});
drop(rt);
assert_eq!(1, metrics.worker_local_schedule_count(0));
assert_eq!(0, metrics.remote_schedule_count());
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
// Move to the runtime
tokio::spawn(async {
tokio::spawn(async {}).await.unwrap();
})
.await
.unwrap();
});
drop(rt);
let n: u64 = (0..metrics.num_workers())
.map(|i| metrics.worker_local_schedule_count(i))
.sum();
assert_eq!(2, n);
assert_eq!(1, metrics.remote_schedule_count());
}
#[test]
fn worker_overflow_count() {
// Only applies to the threaded worker
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async {
// Move to the runtime
tokio::spawn(async {
let (tx1, rx1) = std::sync::mpsc::channel();
let (tx2, rx2) = std::sync::mpsc::channel();
// First, we need to block the other worker until all tasks have
// been spawned.
tokio::spawn(async move {
tx1.send(()).unwrap();
rx2.recv().unwrap();
});
// Bump the next-run spawn
tokio::spawn(async {});
rx1.recv().unwrap();
// Spawn many tasks
for _ in 0..300 {
tokio::spawn(async {});
}
tx2.send(()).unwrap();
})
.await
.unwrap();
});
drop(rt);
let n: u64 = (0..metrics.num_workers())
.map(|i| metrics.worker_overflow_count(i))
.sum();
assert_eq!(1, n);
}
#[test]
fn injection_queue_depth() {
use std::thread;
let rt = current_thread();
let handle = rt.handle().clone();
let metrics = rt.metrics();
thread::spawn(move || {
handle.spawn(async {});
})
.join()
.unwrap();
assert_eq!(1, metrics.injection_queue_depth());
let rt = threaded();
let handle = rt.handle().clone();
let metrics = rt.metrics();
// First we need to block the runtime workers
let (tx1, rx1) = std::sync::mpsc::channel();
let (tx2, rx2) = std::sync::mpsc::channel();
rt.spawn(async move { rx1.recv().unwrap() });
rt.spawn(async move { rx2.recv().unwrap() });
thread::spawn(move || {
handle.spawn(async {});
})
.join()
.unwrap();
let n = metrics.injection_queue_depth();
assert!(1 <= n, "{}", n);
assert!(3 >= n, "{}", n);
tx1.send(()).unwrap();
tx2.send(()).unwrap();
}
#[test]
fn worker_local_queue_depth() {
const N: usize = 100;
let rt = current_thread();
let metrics = rt.metrics();
rt.block_on(async {
for _ in 0..N {
tokio::spawn(async {});
}
assert_eq!(N, metrics.worker_local_queue_depth(0));
});
let rt = threaded();
let metrics = rt.metrics();
rt.block_on(async move {
// Move to the runtime
tokio::spawn(async move {
let (tx1, rx1) = std::sync::mpsc::channel();
let (tx2, rx2) = std::sync::mpsc::channel();
// First, we need to block the other worker until all tasks have
// been spawned.
tokio::spawn(async move {
tx1.send(()).unwrap();
rx2.recv().unwrap();
});
// Bump the next-run spawn
tokio::spawn(async {});
rx1.recv().unwrap();
// Spawn some tasks
for _ in 0..100 {
tokio::spawn(async {});
}
let n: usize = (0..metrics.num_workers())
.map(|i| metrics.worker_local_queue_depth(i))
.sum();
assert_eq!(n, N);
tx2.send(()).unwrap();
})
.await
.unwrap();
});
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
#[test]
fn io_driver_fd_count() {
let rt = current_thread();
let metrics = rt.metrics();
assert_eq!(metrics.io_driver_fd_registered_count(), 0);
let stream = tokio::net::TcpStream::connect("google.com:80");
let stream = rt.block_on(async move { stream.await.unwrap() });
assert_eq!(metrics.io_driver_fd_registered_count(), 1);
assert_eq!(metrics.io_driver_fd_deregistered_count(), 0);
drop(stream);
assert_eq!(metrics.io_driver_fd_deregistered_count(), 1);
assert_eq!(metrics.io_driver_fd_registered_count(), 1);
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
#[test]
fn io_driver_ready_count() {
let rt = current_thread();
let metrics = rt.metrics();
let stream = tokio::net::TcpStream::connect("google.com:80");
let _stream = rt.block_on(async move { stream.await.unwrap() });
assert_eq!(metrics.io_driver_ready_count(), 1);
}
fn current_thread() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
fn threaded() -> Runtime {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
}

77
vendor/tokio/tests/rt_panic.rs vendored Normal file
View file

@ -0,0 +1,77 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(not(tokio_wasi))] // Wasi doesn't support panic recovery
use futures::future;
use std::error::Error;
use tokio::runtime::{Builder, Handle, Runtime};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn current_handle_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Handle::current();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn into_panic_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(move || {
let rt = current_thread();
rt.block_on(async {
let handle = tokio::spawn(future::pending::<()>());
handle.abort();
let err = handle.await.unwrap_err();
assert!(!&err.is_panic());
let _ = err.into_panic();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn builder_worker_threads_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Builder::new_multi_thread().worker_threads(0).build();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn builder_max_blocking_threads_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Builder::new_multi_thread().max_blocking_threads(0).build();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
fn current_thread() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

590
vendor/tokio/tests/rt_threaded.rs vendored Normal file
View file

@ -0,0 +1,590 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime;
use tokio::sync::oneshot;
use tokio_test::{assert_err, assert_ok};
use futures::future::poll_fn;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{mpsc, Arc, Mutex};
use std::task::{Context, Poll, Waker};
macro_rules! cfg_metrics {
($($t:tt)*) => {
#[cfg(tokio_unstable)]
{
$( $t )*
}
}
}
#[test]
fn single_thread() {
// No panic when starting a runtime w/ a single thread
let _ = runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(1)
.build();
}
#[test]
fn many_oneshot_futures() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..5 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
drop(rt);
}
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(1, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(1, local);
}
}
#[test]
fn many_multishot_futures() {
const CHAIN: usize = 200;
const CYCLES: usize = 5;
const TRACKS: usize = 50;
for _ in 0..50 {
let rt = rt();
let mut start_txs = Vec::with_capacity(TRACKS);
let mut final_rxs = Vec::with_capacity(TRACKS);
for _ in 0..TRACKS {
let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10);
for _ in 0..CHAIN {
let (next_tx, next_rx) = tokio::sync::mpsc::channel(10);
// Forward all the messages
rt.spawn(async move {
while let Some(v) = chain_rx.recv().await {
next_tx.send(v).await.unwrap();
}
});
chain_rx = next_rx;
}
// This final task cycles if needed
let (final_tx, final_rx) = tokio::sync::mpsc::channel(10);
let cycle_tx = start_tx.clone();
let mut rem = CYCLES;
rt.spawn(async move {
for _ in 0..CYCLES {
let msg = chain_rx.recv().await.unwrap();
rem -= 1;
if rem == 0 {
final_tx.send(msg).await.unwrap();
} else {
cycle_tx.send(msg).await.unwrap();
}
}
});
start_txs.push(start_tx);
final_rxs.push(final_rx);
}
{
rt.block_on(async move {
for start_tx in start_txs {
start_tx.send("ping").await.unwrap();
}
for mut final_rx in final_rxs {
final_rx.recv().await.unwrap();
}
});
}
}
}
#[test]
fn spawn_shutdown() {
let rt = rt();
let (tx, rx) = mpsc::channel();
rt.block_on(async {
tokio::spawn(client_server(tx.clone()));
});
// Use spawner
rt.spawn(client_server(tx));
assert_ok!(rx.recv());
assert_ok!(rx.recv());
drop(rt);
assert_err!(rx.try_recv());
}
async fn client_server(tx: mpsc::Sender<()>) {
let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Get the assigned address
let addr = assert_ok!(server.local_addr());
// Spawn the server
tokio::spawn(async move {
// Accept a socket
let (mut socket, _) = server.accept().await.unwrap();
// Write some data
socket.write_all(b"hello").await.unwrap();
});
let mut client = TcpStream::connect(&addr).await.unwrap();
let mut buf = vec![];
client.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello");
tx.send(()).unwrap();
}
#[test]
fn drop_threadpool_drops_futures() {
for _ in 0..1_000 {
let num_inc = Arc::new(AtomicUsize::new(0));
let num_dec = Arc::new(AtomicUsize::new(0));
let num_drop = Arc::new(AtomicUsize::new(0));
struct Never(Arc<AtomicUsize>);
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for Never {
fn drop(&mut self) {
self.0.fetch_add(1, Relaxed);
}
}
let a = num_inc.clone();
let b = num_dec.clone();
let rt = runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
a.fetch_add(1, Relaxed);
})
.on_thread_stop(move || {
b.fetch_add(1, Relaxed);
})
.build()
.unwrap();
rt.spawn(Never(num_drop.clone()));
// Wait for the pool to shutdown
drop(rt);
// Assert that only a single thread was spawned.
let a = num_inc.load(Relaxed);
assert!(a >= 1);
// Assert that all threads shutdown
let b = num_dec.load(Relaxed);
assert_eq!(a, b);
// Assert that the future was dropped
let c = num_drop.load(Relaxed);
assert_eq!(c, 1);
}
}
#[test]
fn start_stop_callbacks_called() {
use std::sync::atomic::{AtomicUsize, Ordering};
let after_start = Arc::new(AtomicUsize::new(0));
let before_stop = Arc::new(AtomicUsize::new(0));
let after_inner = after_start.clone();
let before_inner = before_stop.clone();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
after_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.on_thread_stop(move || {
before_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.build()
.unwrap();
let (tx, rx) = oneshot::channel();
rt.spawn(async move {
assert_ok!(tx.send(()));
});
assert_ok!(rt.block_on(rx));
drop(rt);
assert!(after_start.load(Ordering::Relaxed) > 0);
assert!(before_stop.load(Ordering::Relaxed) > 0);
}
#[test]
fn blocking() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..10 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
// there are four workers in the pool
// so, if we run 4 blocking tasks, we know that handoff must have happened
let block = Arc::new(std::sync::Barrier::new(5));
for _ in 0..4 {
let block = block.clone();
rt.spawn(async move {
tokio::task::block_in_place(move || {
block.wait();
block.wait();
})
});
}
block.wait();
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
block.wait();
}
}
#[test]
fn multi_threadpool() {
use tokio::sync::oneshot;
let rt1 = rt();
let rt2 = rt();
let (tx, rx) = oneshot::channel();
let (done_tx, done_rx) = mpsc::channel();
rt2.spawn(async move {
rx.await.unwrap();
done_tx.send(()).unwrap();
});
rt1.spawn(async move {
tx.send(()).unwrap();
});
done_rx.recv().unwrap();
}
// When `block_in_place` returns, it attempts to reclaim the yielded runtime
// worker. In this case, the remainder of the task is on the runtime worker and
// must take part in the cooperative task budgeting system.
//
// The test ensures that, when this happens, attempting to consume from a
// channel yields occasionally even if there are values ready to receive.
#[test]
fn coop_and_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
// Setting max threads to 1 prevents another thread from claiming the
// runtime worker yielded as part of `block_in_place` and guarantees the
// same thread will reclaim the worker at the end of the
// `block_in_place` call.
.max_blocking_threads(1)
.build()
.unwrap();
rt.block_on(async move {
let (tx, mut rx) = tokio::sync::mpsc::channel(1024);
// Fill the channel
for _ in 0..1024 {
tx.send(()).await.unwrap();
}
drop(tx);
tokio::spawn(async move {
// Block in place without doing anything
tokio::task::block_in_place(|| {});
// Receive all the values, this should trigger a `Pending` as the
// coop limit will be reached.
poll_fn(|cx| {
while let Poll::Ready(v) = {
tokio::pin! {
let fut = rx.recv();
}
Pin::new(&mut fut).poll(cx)
} {
if v.is_none() {
panic!("did not yield");
}
}
Poll::Ready(())
})
.await
})
.await
.unwrap();
});
}
#[test]
fn yield_after_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async move {
// Block in place then enter a new runtime
tokio::task::block_in_place(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {});
});
// Yield, then complete
tokio::task::yield_now().await;
})
.await
.unwrap()
});
}
// Testing this does not panic
#[test]
fn max_blocking_threads() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(1)
.build()
.unwrap();
}
#[test]
#[should_panic]
fn max_blocking_threads_set_to_zero() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(0)
.build()
.unwrap();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn hang_on_shutdown() {
let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>();
tokio::spawn(async move {
tokio::task::block_in_place(|| sync_rx.recv().ok());
});
tokio::spawn(async {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
drop(sync_tx);
});
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
/// Demonstrates tokio-rs/tokio#3869
#[test]
fn wake_during_shutdown() {
struct Shared {
waker: Option<Waker>,
}
struct MyFuture {
shared: Arc<Mutex<Shared>>,
put_waker: bool,
}
impl MyFuture {
fn new() -> (Self, Self) {
let shared = Arc::new(Mutex::new(Shared { waker: None }));
let f1 = MyFuture {
shared: shared.clone(),
put_waker: true,
};
let f2 = MyFuture {
shared,
put_waker: false,
};
(f1, f2)
}
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let me = Pin::into_inner(self);
let mut lock = me.shared.lock().unwrap();
if me.put_waker {
lock.waker = Some(cx.waker().clone());
}
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let mut lock = self.shared.lock().unwrap();
if !self.put_waker {
lock.waker.take().unwrap().wake();
}
drop(lock);
}
}
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
let (f1, f2) = MyFuture::new();
rt.spawn(f1);
rt.spawn(f2);
rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await });
}
#[should_panic]
#[tokio::test]
async fn test_block_in_place1() {
tokio::task::block_in_place(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn test_block_in_place2() {
tokio::task::block_in_place(|| {});
}
#[should_panic]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_block_in_place3() {
tokio::task::block_in_place(|| {});
}
#[tokio::main]
#[test]
async fn test_block_in_place4() {
tokio::task::block_in_place(|| {});
}
fn rt() -> runtime::Runtime {
runtime::Runtime::new().unwrap()
}
#[cfg(tokio_unstable)]
mod unstable {
use super::*;
#[test]
fn test_disable_lifo_slot() {
let rt = runtime::Builder::new_multi_thread()
.disable_lifo_slot()
.worker_threads(2)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async {
// Spawn another task and block the thread until completion. If the LIFO slot
// is used then the test doesn't complete.
futures::executor::block_on(tokio::spawn(async {})).unwrap();
})
.await
.unwrap();
})
}
}

30
vendor/tokio/tests/signal_ctrl_c.rs vendored Normal file
View file

@ -0,0 +1,30 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal;
use tokio::sync::oneshot;
use tokio_test::assert_ok;
#[tokio::test]
async fn ctrl_c() {
let ctrl_c = signal::ctrl_c();
let (fire, wait) = oneshot::channel();
// NB: simulate a signal coming in by exercising our signal handler
// to avoid complications with sending SIGINT to the test process
tokio::spawn(async {
wait.await.expect("wait failed");
send_signal(libc::SIGINT);
});
let _ = fire.send(());
assert_ok!(ctrl_c.await);
}

22
vendor/tokio/tests/signal_drop_recv.rs vendored Normal file
View file

@ -0,0 +1,22 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
#[tokio::test]
async fn drop_then_get_a_signal() {
let kind = SignalKind::user_defined1();
let sig = signal(kind).expect("failed to create first signal");
drop(sig);
send_signal(libc::SIGUSR1);
let mut sig = signal(kind).expect("failed to create second signal");
let _ = sig.recv().await;
}

44
vendor/tokio/tests/signal_drop_rt.rs vendored Normal file
View file

@ -0,0 +1,44 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::runtime::Runtime;
use tokio::signal::unix::{signal, SignalKind};
#[test]
fn dropping_loops_does_not_cause_starvation() {
let kind = SignalKind::user_defined1();
let first_rt = rt();
let mut first_signal =
first_rt.block_on(async { signal(kind).expect("failed to register first signal") });
let second_rt = rt();
let mut second_signal =
second_rt.block_on(async { signal(kind).expect("failed to register second signal") });
send_signal(libc::SIGUSR1);
first_rt
.block_on(first_signal.recv())
.expect("failed to await first signal");
drop(first_rt);
drop(first_signal);
send_signal(libc::SIGUSR1);
second_rt.block_on(second_signal.recv());
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

View file

@ -0,0 +1,26 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
#[tokio::test]
async fn dropping_signal_does_not_deregister_any_other_instances() {
let kind = SignalKind::user_defined1();
// Signals should not starve based on ordering
let first_duplicate_signal = signal(kind).expect("failed to register first duplicate signal");
let mut sig = signal(kind).expect("failed to register signal");
let second_duplicate_signal = signal(kind).expect("failed to register second duplicate signal");
drop(first_duplicate_signal);
drop(second_duplicate_signal);
send_signal(libc::SIGUSR1);
let _ = sig.recv().await;
}

54
vendor/tokio/tests/signal_multi_rt.rs vendored Normal file
View file

@ -0,0 +1,54 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::runtime::Runtime;
use tokio::signal::unix::{signal, SignalKind};
use std::sync::mpsc::channel;
use std::thread;
#[test]
fn multi_loop() {
// An "ordinary" (non-future) channel
let (sender, receiver) = channel();
// Run multiple times, to make sure there are no race conditions
for _ in 0..10 {
// Run multiple event loops, each one in its own thread
let threads: Vec<_> = (0..4)
.map(|_| {
let sender = sender.clone();
thread::spawn(move || {
let rt = rt();
let _ = rt.block_on(async {
let mut signal = signal(SignalKind::hangup()).unwrap();
sender.send(()).unwrap();
signal.recv().await
});
})
})
.collect();
// Wait for them to declare they're ready
for &_ in threads.iter() {
receiver.recv().unwrap();
}
// Send a signal
send_signal(libc::SIGHUP);
// Make sure the threads terminated correctly
for t in threads {
t.join().unwrap();
}
}
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

12
vendor/tokio/tests/signal_no_rt.rs vendored Normal file
View file

@ -0,0 +1,12 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
use tokio::signal::unix::{signal, SignalKind};
#[cfg_attr(tokio_wasi, ignore = "Wasi does not support panic recovery")]
#[test]
#[should_panic]
fn no_runtime_panics_creating_signals() {
let _ = signal(SignalKind::hangup());
}

View file

@ -0,0 +1,23 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
#[tokio::test]
async fn notify_both() {
let kind = SignalKind::user_defined2();
let mut signal1 = signal(kind).expect("failed to create signal1");
let mut signal2 = signal(kind).expect("failed to create signal2");
send_signal(libc::SIGUSR2);
signal1.recv().await;
signal2.recv().await;
}

29
vendor/tokio/tests/signal_panic.rs vendored Normal file
View file

@ -0,0 +1,29 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
use std::error::Error;
use tokio::runtime::Builder;
use tokio::signal::unix::{signal, SignalKind};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn signal_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = Builder::new_current_thread().build().unwrap();
rt.block_on(async {
let kind = SignalKind::from_raw(-1);
let _ = signal(kind);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}

22
vendor/tokio/tests/signal_twice.rs vendored Normal file
View file

@ -0,0 +1,22 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
#[tokio::test]
async fn twice() {
let kind = SignalKind::user_defined1();
let mut sig = signal(kind).expect("failed to get signal");
for _ in 0..2 {
send_signal(libc::SIGUSR1);
assert!(sig.recv().await.is_some());
}
}

23
vendor/tokio/tests/signal_usr1.rs vendored Normal file
View file

@ -0,0 +1,23 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
use tokio_test::assert_ok;
#[tokio::test]
async fn signal_usr1() {
let mut signal = assert_ok!(
signal(SignalKind::user_defined1()),
"failed to create signal"
);
send_signal(libc::SIGUSR1);
signal.recv().await;
}

45
vendor/tokio/tests/support/io_vec.rs vendored Normal file
View file

@ -0,0 +1,45 @@
use std::io::IoSlice;
use std::ops::Deref;
use std::slice;
pub struct IoBufs<'a, 'b>(&'b mut [IoSlice<'a>]);
impl<'a, 'b> IoBufs<'a, 'b> {
pub fn new(slices: &'b mut [IoSlice<'a>]) -> Self {
IoBufs(slices)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn advance(mut self, n: usize) -> IoBufs<'a, 'b> {
let mut to_remove = 0;
let mut remaining_len = n;
for slice in self.0.iter() {
if remaining_len < slice.len() {
break;
} else {
remaining_len -= slice.len();
to_remove += 1;
}
}
self.0 = self.0.split_at_mut(to_remove).1;
if let Some(slice) = self.0.first_mut() {
let tail = &slice[remaining_len..];
// Safety: recasts slice to the original lifetime
let tail = unsafe { slice::from_raw_parts(tail.as_ptr(), tail.len()) };
*slice = IoSlice::new(tail);
} else if remaining_len != 0 {
panic!("advance past the end of the slice vector");
}
self
}
}
impl<'a, 'b> Deref for IoBufs<'a, 'b> {
type Target = [IoSlice<'a>];
fn deref(&self) -> &[IoSlice<'a>] {
self.0
}
}

View file

@ -0,0 +1,26 @@
/// Can create buffers of arbitrary lifetime.
/// Frees created buffers when dropped.
///
/// This struct is of course unsafe and the fact that
/// it must outlive the created slices has to be ensured by
/// the programmer.
///
/// Used at certain test scenarios as a safer version of
/// Vec::leak, to satisfy the address sanitizer.
pub struct LeakedBuffers {
leaked_vecs: Vec<Box<[u8]>>,
}
impl LeakedBuffers {
pub fn new() -> Self {
Self {
leaked_vecs: vec![],
}
}
pub unsafe fn create<'a>(&mut self, size: usize) -> &'a mut [u8] {
let mut new_mem = vec![0u8; size].into_boxed_slice();
let slice = std::slice::from_raw_parts_mut(new_mem.as_mut_ptr(), new_mem.len());
self.leaked_vecs.push(new_mem);
slice
}
}

View file

@ -0,0 +1,42 @@
#![allow(dead_code)]
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::mpsc::{self, Receiver, Sender, UnboundedReceiver, UnboundedSender};
use tokio_stream::Stream;
struct UnboundedStream<T> {
recv: UnboundedReceiver<T>,
}
impl<T> Stream for UnboundedStream<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
Pin::into_inner(self).recv.poll_recv(cx)
}
}
pub fn unbounded_channel_stream<T: Unpin>() -> (UnboundedSender<T>, impl Stream<Item = T>) {
let (tx, rx) = mpsc::unbounded_channel();
let stream = UnboundedStream { recv: rx };
(tx, stream)
}
struct BoundedStream<T> {
recv: Receiver<T>,
}
impl<T> Stream for BoundedStream<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<T>> {
Pin::into_inner(self).recv.poll_recv(cx)
}
}
pub fn channel_stream<T: Unpin>(size: usize) -> (Sender<T>, impl Stream<Item = T>) {
let (tx, rx) = mpsc::channel(size);
let stream = BoundedStream { recv: rx };
(tx, stream)
}

34
vendor/tokio/tests/support/panic.rs vendored Normal file
View file

@ -0,0 +1,34 @@
use parking_lot::{const_mutex, Mutex};
use std::panic;
use std::sync::Arc;
pub fn test_panic<Func: FnOnce() + panic::UnwindSafe>(func: Func) -> Option<String> {
static PANIC_MUTEX: Mutex<()> = const_mutex(());
{
let _guard = PANIC_MUTEX.lock();
let panic_file: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
let prev_hook = panic::take_hook();
{
let panic_file = panic_file.clone();
panic::set_hook(Box::new(move |panic_info| {
let panic_location = panic_info.location().unwrap();
panic_file
.lock()
.clone_from(&Some(panic_location.file().to_string()));
}));
}
let result = panic::catch_unwind(func);
// Return to the previously set panic hook (maybe default) so that we get nice error
// messages in the tests.
panic::set_hook(prev_hook);
if result.is_err() {
panic_file.lock().clone()
} else {
None
}
}
}

7
vendor/tokio/tests/support/signal.rs vendored Normal file
View file

@ -0,0 +1,7 @@
pub fn send_signal(signal: libc::c_int) {
use libc::{getpid, kill};
unsafe {
assert_eq!(kill(getpid(), signal), 0);
}
}

99
vendor/tokio/tests/sync_barrier.rs vendored Normal file
View file

@ -0,0 +1,99 @@
#![allow(clippy::unnecessary_operation)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use tokio::sync::Barrier;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
struct IsSend<T: Send>(T);
#[test]
fn barrier_future_is_send() {
let b = Barrier::new(0);
IsSend(b.wait());
}
#[test]
fn zero_does_not_block() {
let b = Barrier::new(0);
{
let mut w = spawn(b.wait());
let wr = assert_ready!(w.poll());
assert!(wr.is_leader());
}
{
let mut w = spawn(b.wait());
let wr = assert_ready!(w.poll());
assert!(wr.is_leader());
}
}
#[test]
fn single() {
let b = Barrier::new(1);
{
let mut w = spawn(b.wait());
let wr = assert_ready!(w.poll());
assert!(wr.is_leader());
}
{
let mut w = spawn(b.wait());
let wr = assert_ready!(w.poll());
assert!(wr.is_leader());
}
{
let mut w = spawn(b.wait());
let wr = assert_ready!(w.poll());
assert!(wr.is_leader());
}
}
#[test]
fn tango() {
let b = Barrier::new(2);
let mut w1 = spawn(b.wait());
assert_pending!(w1.poll());
let mut w2 = spawn(b.wait());
let wr2 = assert_ready!(w2.poll());
let wr1 = assert_ready!(w1.poll());
assert!(wr1.is_leader() || wr2.is_leader());
assert!(!(wr1.is_leader() && wr2.is_leader()));
}
#[test]
fn lots() {
let b = Barrier::new(100);
for _ in 0..10 {
let mut wait = Vec::new();
for _ in 0..99 {
let mut w = spawn(b.wait());
assert_pending!(w.poll());
wait.push(w);
}
for w in &mut wait {
assert_pending!(w.poll());
}
// pass the barrier
let mut w = spawn(b.wait());
let mut found_leader = assert_ready!(w.poll()).is_leader();
for mut w in wait {
let wr = assert_ready!(w.poll());
if wr.is_leader() {
assert!(!found_leader);
found_leader = true;
}
}
assert!(found_leader);
}
}

528
vendor/tokio/tests/sync_broadcast.rs vendored Normal file
View file

@ -0,0 +1,528 @@
#![allow(clippy::cognitive_complexity)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use tokio::sync::broadcast;
use tokio_test::task;
use tokio_test::{
assert_err, assert_ok, assert_pending, assert_ready, assert_ready_err, assert_ready_ok,
};
use std::sync::Arc;
macro_rules! assert_recv {
($e:expr) => {
match $e.try_recv() {
Ok(value) => value,
Err(e) => panic!("expected recv; got = {:?}", e),
}
};
}
macro_rules! assert_empty {
($e:expr) => {
match $e.try_recv() {
Ok(value) => panic!("expected empty; got = {:?}", value),
Err(broadcast::error::TryRecvError::Empty) => {}
Err(e) => panic!("expected empty; got = {:?}", e),
}
};
}
macro_rules! assert_lagged {
($e:expr, $n:expr) => {
match assert_err!($e) {
broadcast::error::TryRecvError::Lagged(n) => {
assert_eq!(n, $n);
}
_ => panic!("did not lag"),
}
};
}
macro_rules! assert_closed {
($e:expr) => {
match assert_err!($e) {
broadcast::error::TryRecvError::Closed => {}
_ => panic!("is not closed"),
}
};
}
trait AssertSend: Send + Sync {}
impl AssertSend for broadcast::Sender<i32> {}
impl AssertSend for broadcast::Receiver<i32> {}
#[test]
fn send_try_recv_bounded() {
let (tx, mut rx) = broadcast::channel(16);
assert_empty!(rx);
let n = assert_ok!(tx.send("hello"));
assert_eq!(n, 1);
let val = assert_recv!(rx);
assert_eq!(val, "hello");
assert_empty!(rx);
}
#[test]
fn send_two_recv() {
let (tx, mut rx1) = broadcast::channel(16);
let mut rx2 = tx.subscribe();
assert_empty!(rx1);
assert_empty!(rx2);
let n = assert_ok!(tx.send("hello"));
assert_eq!(n, 2);
let val = assert_recv!(rx1);
assert_eq!(val, "hello");
let val = assert_recv!(rx2);
assert_eq!(val, "hello");
assert_empty!(rx1);
assert_empty!(rx2);
}
#[test]
fn send_recv_bounded() {
let (tx, mut rx) = broadcast::channel(16);
let mut recv = task::spawn(rx.recv());
assert_pending!(recv.poll());
assert_ok!(tx.send("hello"));
assert!(recv.is_woken());
let val = assert_ready_ok!(recv.poll());
assert_eq!(val, "hello");
}
#[test]
fn send_two_recv_bounded() {
let (tx, mut rx1) = broadcast::channel(16);
let mut rx2 = tx.subscribe();
let mut recv1 = task::spawn(rx1.recv());
let mut recv2 = task::spawn(rx2.recv());
assert_pending!(recv1.poll());
assert_pending!(recv2.poll());
assert_ok!(tx.send("hello"));
assert!(recv1.is_woken());
assert!(recv2.is_woken());
let val1 = assert_ready_ok!(recv1.poll());
let val2 = assert_ready_ok!(recv2.poll());
assert_eq!(val1, "hello");
assert_eq!(val2, "hello");
drop((recv1, recv2));
let mut recv1 = task::spawn(rx1.recv());
let mut recv2 = task::spawn(rx2.recv());
assert_pending!(recv1.poll());
assert_ok!(tx.send("world"));
assert!(recv1.is_woken());
assert!(!recv2.is_woken());
let val1 = assert_ready_ok!(recv1.poll());
let val2 = assert_ready_ok!(recv2.poll());
assert_eq!(val1, "world");
assert_eq!(val2, "world");
}
#[test]
fn change_tasks() {
let (tx, mut rx) = broadcast::channel(1);
let mut recv = Box::pin(rx.recv());
let mut task1 = task::spawn(&mut recv);
assert_pending!(task1.poll());
let mut task2 = task::spawn(&mut recv);
assert_pending!(task2.poll());
tx.send("hello").unwrap();
assert!(task2.is_woken());
}
#[test]
fn send_slow_rx() {
let (tx, mut rx1) = broadcast::channel(16);
let mut rx2 = tx.subscribe();
{
let mut recv2 = task::spawn(rx2.recv());
{
let mut recv1 = task::spawn(rx1.recv());
assert_pending!(recv1.poll());
assert_pending!(recv2.poll());
assert_ok!(tx.send("one"));
assert!(recv1.is_woken());
assert!(recv2.is_woken());
assert_ok!(tx.send("two"));
let val = assert_ready_ok!(recv1.poll());
assert_eq!(val, "one");
}
let val = assert_ready_ok!(task::spawn(rx1.recv()).poll());
assert_eq!(val, "two");
let mut recv1 = task::spawn(rx1.recv());
assert_pending!(recv1.poll());
assert_ok!(tx.send("three"));
assert!(recv1.is_woken());
let val = assert_ready_ok!(recv1.poll());
assert_eq!(val, "three");
let val = assert_ready_ok!(recv2.poll());
assert_eq!(val, "one");
}
let val = assert_recv!(rx2);
assert_eq!(val, "two");
let val = assert_recv!(rx2);
assert_eq!(val, "three");
}
#[test]
fn drop_rx_while_values_remain() {
let (tx, mut rx1) = broadcast::channel(16);
let mut rx2 = tx.subscribe();
assert_ok!(tx.send("one"));
assert_ok!(tx.send("two"));
assert_recv!(rx1);
assert_recv!(rx2);
drop(rx2);
drop(rx1);
}
#[test]
fn lagging_rx() {
let (tx, mut rx1) = broadcast::channel(2);
let mut rx2 = tx.subscribe();
assert_ok!(tx.send("one"));
assert_ok!(tx.send("two"));
assert_eq!("one", assert_recv!(rx1));
assert_ok!(tx.send("three"));
// Lagged too far
let x = dbg!(rx2.try_recv());
assert_lagged!(x, 1);
// Calling again gets the next value
assert_eq!("two", assert_recv!(rx2));
assert_eq!("two", assert_recv!(rx1));
assert_eq!("three", assert_recv!(rx1));
assert_ok!(tx.send("four"));
assert_ok!(tx.send("five"));
assert_lagged!(rx2.try_recv(), 1);
assert_ok!(tx.send("six"));
assert_lagged!(rx2.try_recv(), 1);
}
#[test]
fn send_no_rx() {
let (tx, _) = broadcast::channel(16);
assert_err!(tx.send("hello"));
let mut rx = tx.subscribe();
assert_ok!(tx.send("world"));
let val = assert_recv!(rx);
assert_eq!("world", val);
}
#[test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn zero_capacity() {
broadcast::channel::<()>(0);
}
#[test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn capacity_too_big() {
use std::usize;
broadcast::channel::<()>(1 + (usize::MAX >> 1));
}
#[test]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn panic_in_clone() {
use std::panic::{self, AssertUnwindSafe};
#[derive(Eq, PartialEq, Debug)]
struct MyVal(usize);
impl Clone for MyVal {
fn clone(&self) -> MyVal {
assert_ne!(0, self.0);
MyVal(self.0)
}
}
let (tx, mut rx) = broadcast::channel(16);
assert_ok!(tx.send(MyVal(0)));
assert_ok!(tx.send(MyVal(1)));
let res = panic::catch_unwind(AssertUnwindSafe(|| {
let _ = rx.try_recv();
}));
assert_err!(res);
let val = assert_recv!(rx);
assert_eq!(val, MyVal(1));
}
#[test]
fn dropping_tx_notifies_rx() {
let (tx, mut rx1) = broadcast::channel::<()>(16);
let mut rx2 = tx.subscribe();
let tx2 = tx.clone();
let mut recv1 = task::spawn(rx1.recv());
let mut recv2 = task::spawn(rx2.recv());
assert_pending!(recv1.poll());
assert_pending!(recv2.poll());
drop(tx);
assert_pending!(recv1.poll());
assert_pending!(recv2.poll());
drop(tx2);
assert!(recv1.is_woken());
assert!(recv2.is_woken());
let err = assert_ready_err!(recv1.poll());
assert!(is_closed(err));
let err = assert_ready_err!(recv2.poll());
assert!(is_closed(err));
}
#[test]
fn unconsumed_messages_are_dropped() {
let (tx, rx) = broadcast::channel(16);
let msg = Arc::new(());
assert_ok!(tx.send(msg.clone()));
assert_eq!(2, Arc::strong_count(&msg));
drop(rx);
assert_eq!(1, Arc::strong_count(&msg));
}
#[test]
fn single_capacity_recvs() {
let (tx, mut rx) = broadcast::channel(1);
assert_ok!(tx.send(1));
assert_eq!(assert_recv!(rx), 1);
assert_empty!(rx);
}
#[test]
fn single_capacity_recvs_after_drop_1() {
let (tx, mut rx) = broadcast::channel(1);
assert_ok!(tx.send(1));
drop(tx);
assert_eq!(assert_recv!(rx), 1);
assert_closed!(rx.try_recv());
}
#[test]
fn single_capacity_recvs_after_drop_2() {
let (tx, mut rx) = broadcast::channel(1);
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
drop(tx);
assert_lagged!(rx.try_recv(), 1);
assert_eq!(assert_recv!(rx), 2);
assert_closed!(rx.try_recv());
}
#[test]
fn dropping_sender_does_not_overwrite() {
let (tx, mut rx) = broadcast::channel(2);
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
drop(tx);
assert_eq!(assert_recv!(rx), 1);
assert_eq!(assert_recv!(rx), 2);
assert_closed!(rx.try_recv());
}
#[test]
fn lagging_receiver_recovers_after_wrap_closed_1() {
let (tx, mut rx) = broadcast::channel(2);
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
assert_ok!(tx.send(3));
drop(tx);
assert_lagged!(rx.try_recv(), 1);
assert_eq!(assert_recv!(rx), 2);
assert_eq!(assert_recv!(rx), 3);
assert_closed!(rx.try_recv());
}
#[test]
fn lagging_receiver_recovers_after_wrap_closed_2() {
let (tx, mut rx) = broadcast::channel(2);
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
assert_ok!(tx.send(3));
assert_ok!(tx.send(4));
drop(tx);
assert_lagged!(rx.try_recv(), 2);
assert_eq!(assert_recv!(rx), 3);
assert_eq!(assert_recv!(rx), 4);
assert_closed!(rx.try_recv());
}
#[test]
fn lagging_receiver_recovers_after_wrap_open() {
let (tx, mut rx) = broadcast::channel(2);
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
assert_ok!(tx.send(3));
assert_lagged!(rx.try_recv(), 1);
assert_eq!(assert_recv!(rx), 2);
assert_eq!(assert_recv!(rx), 3);
assert_empty!(rx);
}
#[test]
fn receiver_len_with_lagged() {
let (tx, mut rx) = broadcast::channel(3);
tx.send(10).unwrap();
tx.send(20).unwrap();
tx.send(30).unwrap();
tx.send(40).unwrap();
assert_eq!(rx.len(), 4);
assert_eq!(assert_recv!(rx), 10);
tx.send(50).unwrap();
tx.send(60).unwrap();
assert_eq!(rx.len(), 5);
assert_lagged!(rx.try_recv(), 1);
}
fn is_closed(err: broadcast::error::RecvError) -> bool {
matches!(err, broadcast::error::RecvError::Closed)
}
#[test]
fn resubscribe_points_to_tail() {
let (tx, mut rx) = broadcast::channel(3);
tx.send(1).unwrap();
let mut rx_resub = rx.resubscribe();
// verify we're one behind at the start
assert_empty!(rx_resub);
assert_eq!(assert_recv!(rx), 1);
// verify we do not affect rx
tx.send(2).unwrap();
assert_eq!(assert_recv!(rx_resub), 2);
tx.send(3).unwrap();
assert_eq!(assert_recv!(rx), 2);
assert_eq!(assert_recv!(rx), 3);
assert_empty!(rx);
assert_eq!(assert_recv!(rx_resub), 3);
assert_empty!(rx_resub);
}
#[test]
fn resubscribe_lagged() {
let (tx, mut rx) = broadcast::channel(1);
tx.send(1).unwrap();
tx.send(2).unwrap();
let mut rx_resub = rx.resubscribe();
assert_lagged!(rx.try_recv(), 1);
assert_empty!(rx_resub);
assert_eq!(assert_recv!(rx), 2);
assert_empty!(rx);
assert_empty!(rx_resub);
}
#[test]
fn resubscribe_to_closed_channel() {
let (tx, rx) = tokio::sync::broadcast::channel::<u32>(2);
drop(tx);
let mut rx_resub = rx.resubscribe();
assert_closed!(rx_resub.try_recv());
}

30
vendor/tokio/tests/sync_errors.rs vendored Normal file
View file

@ -0,0 +1,30 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
fn is_error<T: std::error::Error + Send + Sync>() {}
#[test]
fn mpsc_error_bound() {
use tokio::sync::mpsc::error;
is_error::<error::SendError<()>>();
is_error::<error::TrySendError<()>>();
}
#[test]
fn oneshot_error_bound() {
use tokio::sync::oneshot::error;
is_error::<error::RecvError>();
is_error::<error::TryRecvError>();
}
#[test]
fn watch_error_bound() {
use tokio::sync::watch::error;
is_error::<error::SendError<()>>();
}

679
vendor/tokio/tests/sync_mpsc.rs vendored Normal file
View file

@ -0,0 +1,679 @@
#![allow(clippy::redundant_clone)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
use std::fmt;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::{TryRecvError, TrySendError};
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio_test::*;
#[cfg(not(tokio_wasm))]
mod support {
pub(crate) mod mpsc_stream;
}
trait AssertSend: Send {}
impl AssertSend for mpsc::Sender<i32> {}
impl AssertSend for mpsc::Receiver<i32> {}
#[maybe_tokio_test]
async fn send_recv_with_buffer() {
let (tx, mut rx) = mpsc::channel::<i32>(16);
// Using poll_ready / try_send
// let permit assert_ready_ok!(tx.reserve());
let permit = tx.reserve().await.unwrap();
permit.send(1);
// Without poll_ready
tx.try_send(2).unwrap();
drop(tx);
let val = rx.recv().await;
assert_eq!(val, Some(1));
let val = rx.recv().await;
assert_eq!(val, Some(2));
let val = rx.recv().await;
assert!(val.is_none());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn reserve_disarm() {
let (tx, mut rx) = mpsc::channel::<i32>(2);
let tx1 = tx.clone();
let tx2 = tx.clone();
let tx3 = tx.clone();
let tx4 = tx;
// We should be able to `poll_ready` two handles without problem
let permit1 = assert_ok!(tx1.reserve().await);
let permit2 = assert_ok!(tx2.reserve().await);
// But a third should not be ready
let mut r3 = tokio_test::task::spawn(tx3.reserve());
assert_pending!(r3.poll());
let mut r4 = tokio_test::task::spawn(tx4.reserve());
assert_pending!(r4.poll());
// Using one of the reserved slots should allow a new handle to become ready
permit1.send(1);
// We also need to receive for the slot to be free
assert!(!r3.is_woken());
rx.recv().await.unwrap();
// Now there's a free slot!
assert!(r3.is_woken());
assert!(!r4.is_woken());
// Dropping a permit should also open up a slot
drop(permit2);
assert!(r4.is_woken());
let mut r1 = tokio_test::task::spawn(tx1.reserve());
assert_pending!(r1.poll());
}
#[tokio::test]
#[cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
async fn send_recv_stream_with_buffer() {
use tokio_stream::StreamExt;
let (tx, rx) = support::mpsc_stream::channel_stream::<i32>(16);
let mut rx = Box::pin(rx);
tokio::spawn(async move {
assert_ok!(tx.send(1).await);
assert_ok!(tx.send(2).await);
});
assert_eq!(Some(1), rx.next().await);
assert_eq!(Some(2), rx.next().await);
assert_eq!(None, rx.next().await);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn async_send_recv_with_buffer() {
let (tx, mut rx) = mpsc::channel(16);
tokio::spawn(async move {
assert_ok!(tx.send(1).await);
assert_ok!(tx.send(2).await);
});
assert_eq!(Some(1), rx.recv().await);
assert_eq!(Some(2), rx.recv().await);
assert_eq!(None, rx.recv().await);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn start_send_past_cap() {
use std::future::Future;
let mut t1 = tokio_test::task::spawn(());
let (tx1, mut rx) = mpsc::channel(1);
let tx2 = tx1.clone();
assert_ok!(tx1.try_send(()));
let mut r1 = Box::pin(tx1.reserve());
t1.enter(|cx, _| assert_pending!(r1.as_mut().poll(cx)));
{
let mut r2 = tokio_test::task::spawn(tx2.reserve());
assert_pending!(r2.poll());
drop(r1);
assert!(rx.recv().await.is_some());
assert!(r2.is_woken());
assert!(!t1.is_woken());
}
drop(tx1);
drop(tx2);
assert!(rx.recv().await.is_none());
}
#[test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn buffer_gteq_one() {
mpsc::channel::<i32>(0);
}
#[maybe_tokio_test]
async fn send_recv_unbounded() {
let (tx, mut rx) = mpsc::unbounded_channel::<i32>();
// Using `try_send`
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
assert_eq!(rx.recv().await, Some(1));
assert_eq!(rx.recv().await, Some(2));
drop(tx);
assert!(rx.recv().await.is_none());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn async_send_recv_unbounded() {
let (tx, mut rx) = mpsc::unbounded_channel();
tokio::spawn(async move {
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
});
assert_eq!(Some(1), rx.recv().await);
assert_eq!(Some(2), rx.recv().await);
assert_eq!(None, rx.recv().await);
}
#[tokio::test]
#[cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
async fn send_recv_stream_unbounded() {
use tokio_stream::StreamExt;
let (tx, rx) = support::mpsc_stream::unbounded_channel_stream::<i32>();
let mut rx = Box::pin(rx);
tokio::spawn(async move {
assert_ok!(tx.send(1));
assert_ok!(tx.send(2));
});
assert_eq!(Some(1), rx.next().await);
assert_eq!(Some(2), rx.next().await);
assert_eq!(None, rx.next().await);
}
#[maybe_tokio_test]
async fn no_t_bounds_buffer() {
struct NoImpls;
let (tx, mut rx) = mpsc::channel(100);
// sender should be Debug even though T isn't Debug
is_debug(&tx);
// same with Receiver
is_debug(&rx);
// and sender should be Clone even though T isn't Clone
assert!(tx.clone().try_send(NoImpls).is_ok());
assert!(rx.recv().await.is_some());
}
#[maybe_tokio_test]
async fn no_t_bounds_unbounded() {
struct NoImpls;
let (tx, mut rx) = mpsc::unbounded_channel();
// sender should be Debug even though T isn't Debug
is_debug(&tx);
// same with Receiver
is_debug(&rx);
// and sender should be Clone even though T isn't Clone
assert!(tx.clone().send(NoImpls).is_ok());
assert!(rx.recv().await.is_some());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn send_recv_buffer_limited() {
let (tx, mut rx) = mpsc::channel::<i32>(1);
// Reserve capacity
let p1 = assert_ok!(tx.reserve().await);
// Send first message
p1.send(1);
// Not ready
let mut p2 = tokio_test::task::spawn(tx.reserve());
assert_pending!(p2.poll());
// Take the value
assert!(rx.recv().await.is_some());
// Notified
assert!(p2.is_woken());
// Trying to send fails
assert_err!(tx.try_send(1337));
// Send second
let permit = assert_ready_ok!(p2.poll());
permit.send(2);
assert!(rx.recv().await.is_some());
}
#[maybe_tokio_test]
async fn recv_close_gets_none_idle() {
let (tx, mut rx) = mpsc::channel::<i32>(10);
rx.close();
assert!(rx.recv().await.is_none());
assert_err!(tx.send(1).await);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn recv_close_gets_none_reserved() {
let (tx1, mut rx) = mpsc::channel::<i32>(1);
let tx2 = tx1.clone();
let permit1 = assert_ok!(tx1.reserve().await);
let mut permit2 = tokio_test::task::spawn(tx2.reserve());
assert_pending!(permit2.poll());
rx.close();
assert!(permit2.is_woken());
assert_ready_err!(permit2.poll());
{
let mut recv = tokio_test::task::spawn(rx.recv());
assert_pending!(recv.poll());
permit1.send(123);
assert!(recv.is_woken());
let v = assert_ready!(recv.poll());
assert_eq!(v, Some(123));
}
assert!(rx.recv().await.is_none());
}
#[maybe_tokio_test]
async fn tx_close_gets_none() {
let (_, mut rx) = mpsc::channel::<i32>(10);
assert!(rx.recv().await.is_none());
}
#[maybe_tokio_test]
async fn try_send_fail() {
let (tx, mut rx) = mpsc::channel(1);
tx.try_send("hello").unwrap();
// This should fail
match assert_err!(tx.try_send("fail")) {
TrySendError::Full(..) => {}
_ => panic!(),
}
assert_eq!(rx.recv().await, Some("hello"));
assert_ok!(tx.try_send("goodbye"));
drop(tx);
assert_eq!(rx.recv().await, Some("goodbye"));
assert!(rx.recv().await.is_none());
}
#[maybe_tokio_test]
async fn try_send_fail_with_try_recv() {
let (tx, mut rx) = mpsc::channel(1);
tx.try_send("hello").unwrap();
// This should fail
match assert_err!(tx.try_send("fail")) {
TrySendError::Full(..) => {}
_ => panic!(),
}
assert_eq!(rx.try_recv(), Ok("hello"));
assert_ok!(tx.try_send("goodbye"));
drop(tx);
assert_eq!(rx.try_recv(), Ok("goodbye"));
assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
}
#[maybe_tokio_test]
async fn try_reserve_fails() {
let (tx, mut rx) = mpsc::channel(1);
let permit = tx.try_reserve().unwrap();
// This should fail
match assert_err!(tx.try_reserve()) {
TrySendError::Full(()) => {}
_ => panic!(),
}
permit.send("foo");
assert_eq!(rx.recv().await, Some("foo"));
// Dropping permit releases the slot.
let permit = tx.try_reserve().unwrap();
drop(permit);
let _permit = tx.try_reserve().unwrap();
}
#[tokio::test]
#[cfg(feature = "full")]
async fn drop_permit_releases_permit() {
// poll_ready reserves capacity, ensure that the capacity is released if tx
// is dropped w/o sending a value.
let (tx1, _rx) = mpsc::channel::<i32>(1);
let tx2 = tx1.clone();
let permit = assert_ok!(tx1.reserve().await);
let mut reserve2 = tokio_test::task::spawn(tx2.reserve());
assert_pending!(reserve2.poll());
drop(permit);
assert!(reserve2.is_woken());
assert_ready_ok!(reserve2.poll());
}
#[maybe_tokio_test]
async fn dropping_rx_closes_channel() {
let (tx, rx) = mpsc::channel(100);
let msg = Arc::new(());
assert_ok!(tx.try_send(msg.clone()));
drop(rx);
assert_err!(tx.reserve().await);
assert_eq!(1, Arc::strong_count(&msg));
}
#[test]
fn dropping_rx_closes_channel_for_try() {
let (tx, rx) = mpsc::channel(100);
let msg = Arc::new(());
tx.try_send(msg.clone()).unwrap();
drop(rx);
assert!(matches!(
tx.try_send(msg.clone()),
Err(TrySendError::Closed(_))
));
assert!(matches!(tx.try_reserve(), Err(TrySendError::Closed(_))));
assert!(matches!(
tx.try_reserve_owned(),
Err(TrySendError::Closed(_))
));
assert_eq!(1, Arc::strong_count(&msg));
}
#[test]
fn unconsumed_messages_are_dropped() {
let msg = Arc::new(());
let (tx, rx) = mpsc::channel(100);
tx.try_send(msg.clone()).unwrap();
assert_eq!(2, Arc::strong_count(&msg));
drop((tx, rx));
assert_eq!(1, Arc::strong_count(&msg));
}
#[test]
#[cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
fn blocking_recv() {
let (tx, mut rx) = mpsc::channel::<u8>(1);
let sync_code = std::thread::spawn(move || {
assert_eq!(Some(10), rx.blocking_recv());
});
tokio::runtime::Runtime::new()
.unwrap()
.block_on(async move {
let _ = tx.send(10).await;
});
sync_code.join().unwrap()
}
#[tokio::test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
async fn blocking_recv_async() {
let (_tx, mut rx) = mpsc::channel::<()>(1);
let _ = rx.blocking_recv();
}
#[test]
#[cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
fn blocking_send() {
let (tx, mut rx) = mpsc::channel::<u8>(1);
let sync_code = std::thread::spawn(move || {
tx.blocking_send(10).unwrap();
});
tokio::runtime::Runtime::new()
.unwrap()
.block_on(async move {
assert_eq!(Some(10), rx.recv().await);
});
sync_code.join().unwrap()
}
#[tokio::test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
async fn blocking_send_async() {
let (tx, _rx) = mpsc::channel::<()>(1);
let _ = tx.blocking_send(());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn ready_close_cancel_bounded() {
let (tx, mut rx) = mpsc::channel::<()>(100);
let _tx2 = tx.clone();
let permit = assert_ok!(tx.reserve().await);
rx.close();
let mut recv = tokio_test::task::spawn(rx.recv());
assert_pending!(recv.poll());
drop(permit);
assert!(recv.is_woken());
let val = assert_ready!(recv.poll());
assert!(val.is_none());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn permit_available_not_acquired_close() {
let (tx1, mut rx) = mpsc::channel::<()>(1);
let tx2 = tx1.clone();
let permit1 = assert_ok!(tx1.reserve().await);
let mut permit2 = tokio_test::task::spawn(tx2.reserve());
assert_pending!(permit2.poll());
rx.close();
drop(permit1);
assert!(permit2.is_woken());
drop(permit2);
assert!(rx.recv().await.is_none());
}
#[test]
fn try_recv_bounded() {
let (tx, mut rx) = mpsc::channel(5);
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
assert!(tx.try_send("hello").is_err());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
assert_eq!(Ok("hello"), rx.try_recv());
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
assert!(tx.try_send("hello").is_err());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
tx.try_send("hello").unwrap();
drop(tx);
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Ok("hello"), rx.try_recv());
assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
}
#[test]
fn try_recv_unbounded() {
for num in 0..100 {
let (tx, mut rx) = mpsc::unbounded_channel();
for i in 0..num {
tx.send(i).unwrap();
}
for i in 0..num {
assert_eq!(rx.try_recv(), Ok(i));
}
assert_eq!(rx.try_recv(), Err(TryRecvError::Empty));
drop(tx);
assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected));
}
}
#[test]
fn try_recv_close_while_empty_bounded() {
let (tx, mut rx) = mpsc::channel::<()>(5);
assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
drop(tx);
assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
}
#[test]
fn try_recv_close_while_empty_unbounded() {
let (tx, mut rx) = mpsc::unbounded_channel::<()>();
assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
drop(tx);
assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
}
#[tokio::test(start_paused = true)]
#[cfg(feature = "full")]
async fn recv_timeout() {
use tokio::sync::mpsc::error::SendTimeoutError::{Closed, Timeout};
use tokio::time::Duration;
let (tx, rx) = mpsc::channel(5);
assert_eq!(tx.send_timeout(10, Duration::from_secs(1)).await, Ok(()));
assert_eq!(tx.send_timeout(20, Duration::from_secs(1)).await, Ok(()));
assert_eq!(tx.send_timeout(30, Duration::from_secs(1)).await, Ok(()));
assert_eq!(tx.send_timeout(40, Duration::from_secs(1)).await, Ok(()));
assert_eq!(tx.send_timeout(50, Duration::from_secs(1)).await, Ok(()));
assert_eq!(
tx.send_timeout(60, Duration::from_secs(1)).await,
Err(Timeout(60))
);
drop(rx);
assert_eq!(
tx.send_timeout(70, Duration::from_secs(1)).await,
Err(Closed(70))
);
}
#[test]
#[should_panic = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn recv_timeout_panic() {
use futures::future::FutureExt;
use tokio::time::Duration;
let (tx, _rx) = mpsc::channel(5);
tx.send_timeout(10, Duration::from_secs(1)).now_or_never();
}
// Tests that channel `capacity` changes and `max_capacity` stays the same
#[tokio::test]
async fn test_tx_capacity() {
let (tx, _rx) = mpsc::channel::<()>(10);
// both capacities are same before
assert_eq!(tx.capacity(), 10);
assert_eq!(tx.max_capacity(), 10);
let _permit = tx.reserve().await.unwrap();
// after reserve, only capacity should drop by one
assert_eq!(tx.capacity(), 9);
assert_eq!(tx.max_capacity(), 10);
tx.send(()).await.unwrap();
// after send, capacity should drop by one again
assert_eq!(tx.capacity(), 8);
assert_eq!(tx.max_capacity(), 10);
}
fn is_debug<T: fmt::Debug>(_: &T) {}

513
vendor/tokio/tests/sync_mpsc_weak.rs vendored Normal file
View file

@ -0,0 +1,513 @@
#![allow(clippy::redundant_clone)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{Acquire, Release};
use tokio::sync::mpsc::{self, channel, unbounded_channel};
use tokio::sync::oneshot;
#[tokio::test]
async fn weak_sender() {
let (tx, mut rx) = channel(11);
let tx_weak = tokio::spawn(async move {
let tx_weak = tx.clone().downgrade();
for i in 0..10 {
if tx.send(i).await.is_err() {
return None;
}
}
let tx2 = tx_weak
.upgrade()
.expect("expected to be able to upgrade tx_weak");
let _ = tx2.send(20).await;
let tx_weak = tx2.downgrade();
Some(tx_weak)
})
.await
.unwrap();
for i in 0..12 {
let recvd = rx.recv().await;
match recvd {
Some(msg) => {
if i == 10 {
assert_eq!(msg, 20);
}
}
None => {
assert_eq!(i, 11);
break;
}
}
}
let tx_weak = tx_weak.unwrap();
let upgraded = tx_weak.upgrade();
assert!(upgraded.is_none());
}
#[tokio::test]
async fn actor_weak_sender() {
pub struct MyActor {
receiver: mpsc::Receiver<ActorMessage>,
sender: mpsc::WeakSender<ActorMessage>,
next_id: u32,
pub received_self_msg: bool,
}
enum ActorMessage {
GetUniqueId { respond_to: oneshot::Sender<u32> },
SelfMessage {},
}
impl MyActor {
fn new(
receiver: mpsc::Receiver<ActorMessage>,
sender: mpsc::WeakSender<ActorMessage>,
) -> Self {
MyActor {
receiver,
sender,
next_id: 0,
received_self_msg: false,
}
}
fn handle_message(&mut self, msg: ActorMessage) {
match msg {
ActorMessage::GetUniqueId { respond_to } => {
self.next_id += 1;
// The `let _ =` ignores any errors when sending.
//
// This can happen if the `select!` macro is used
// to cancel waiting for the response.
let _ = respond_to.send(self.next_id);
}
ActorMessage::SelfMessage { .. } => {
self.received_self_msg = true;
}
}
}
async fn send_message_to_self(&mut self) {
let msg = ActorMessage::SelfMessage {};
let sender = self.sender.clone();
// cannot move self.sender here
if let Some(sender) = sender.upgrade() {
let _ = sender.send(msg).await;
self.sender = sender.downgrade();
}
}
async fn run(&mut self) {
let mut i = 0;
while let Some(msg) = self.receiver.recv().await {
self.handle_message(msg);
if i == 0 {
self.send_message_to_self().await;
}
i += 1
}
assert!(self.received_self_msg);
}
}
#[derive(Clone)]
pub struct MyActorHandle {
sender: mpsc::Sender<ActorMessage>,
}
impl MyActorHandle {
pub fn new() -> (Self, MyActor) {
let (sender, receiver) = mpsc::channel(8);
let actor = MyActor::new(receiver, sender.clone().downgrade());
(Self { sender }, actor)
}
pub async fn get_unique_id(&self) -> u32 {
let (send, recv) = oneshot::channel();
let msg = ActorMessage::GetUniqueId { respond_to: send };
// Ignore send errors. If this send fails, so does the
// recv.await below. There's no reason to check the
// failure twice.
let _ = self.sender.send(msg).await;
recv.await.expect("Actor task has been killed")
}
}
let (handle, mut actor) = MyActorHandle::new();
let actor_handle = tokio::spawn(async move { actor.run().await });
let _ = tokio::spawn(async move {
let _ = handle.get_unique_id().await;
drop(handle);
})
.await;
let _ = actor_handle.await;
}
static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug)]
struct Msg;
impl Drop for Msg {
fn drop(&mut self) {
NUM_DROPPED.fetch_add(1, Release);
}
}
// Tests that no pending messages are put onto the channel after `Rx` was
// dropped.
//
// Note: After the introduction of `WeakSender`, which internally
// used `Arc` and doesn't call a drop of the channel after the last strong
// `Sender` was dropped while more than one `WeakSender` remains, we want to
// ensure that no messages are kept in the channel, which were sent after
// the receiver was dropped.
#[tokio::test]
async fn test_msgs_dropped_on_rx_drop() {
let (tx, mut rx) = mpsc::channel(3);
tx.send(Msg {}).await.unwrap();
tx.send(Msg {}).await.unwrap();
// This msg will be pending and should be dropped when `rx` is dropped
let sent_fut = tx.send(Msg {});
let _ = rx.recv().await.unwrap();
let _ = rx.recv().await.unwrap();
sent_fut.await.unwrap();
drop(rx);
assert_eq!(NUM_DROPPED.load(Acquire), 3);
// This msg will not be put onto `Tx` list anymore, since `Rx` is closed.
assert!(tx.send(Msg {}).await.is_err());
assert_eq!(NUM_DROPPED.load(Acquire), 4);
}
// Tests that a `WeakSender` is upgradeable when other `Sender`s exist.
#[test]
fn downgrade_upgrade_sender_success() {
let (tx, _rx) = mpsc::channel::<i32>(1);
let weak_tx = tx.downgrade();
assert!(weak_tx.upgrade().is_some());
}
// Tests that a `WeakSender` fails to upgrade when no other `Sender` exists.
#[test]
fn downgrade_upgrade_sender_failure() {
let (tx, _rx) = mpsc::channel::<i32>(1);
let weak_tx = tx.downgrade();
drop(tx);
assert!(weak_tx.upgrade().is_none());
}
// Tests that a `WeakSender` cannot be upgraded after a `Sender` was dropped,
// which existed at the time of the `downgrade` call.
#[test]
fn downgrade_drop_upgrade() {
let (tx, _rx) = mpsc::channel::<i32>(1);
// the cloned `Tx` is dropped right away
let weak_tx = tx.clone().downgrade();
drop(tx);
assert!(weak_tx.upgrade().is_none());
}
// Tests that we can upgrade a weak sender with an outstanding permit
// but no other strong senders.
#[tokio::test]
async fn downgrade_get_permit_upgrade_no_senders() {
let (tx, _rx) = mpsc::channel::<i32>(1);
let weak_tx = tx.downgrade();
let _permit = tx.reserve_owned().await.unwrap();
assert!(weak_tx.upgrade().is_some());
}
// Tests that you can downgrade and upgrade a sender with an outstanding permit
// but no other senders left.
#[tokio::test]
async fn downgrade_upgrade_get_permit_no_senders() {
let (tx, _rx) = mpsc::channel::<i32>(1);
let tx2 = tx.clone();
let _permit = tx.reserve_owned().await.unwrap();
let weak_tx = tx2.downgrade();
drop(tx2);
assert!(weak_tx.upgrade().is_some());
}
// Tests that `downgrade` does not change the `tx_count` of the channel.
#[test]
fn test_tx_count_weak_sender() {
let (tx, _rx) = mpsc::channel::<i32>(1);
let tx_weak = tx.downgrade();
let tx_weak2 = tx.downgrade();
drop(tx);
assert!(tx_weak.upgrade().is_none() && tx_weak2.upgrade().is_none());
}
#[tokio::test]
async fn weak_unbounded_sender() {
let (tx, mut rx) = unbounded_channel();
let tx_weak = tokio::spawn(async move {
let tx_weak = tx.clone().downgrade();
for i in 0..10 {
if tx.send(i).is_err() {
return None;
}
}
let tx2 = tx_weak
.upgrade()
.expect("expected to be able to upgrade tx_weak");
let _ = tx2.send(20);
let tx_weak = tx2.downgrade();
Some(tx_weak)
})
.await
.unwrap();
for i in 0..12 {
let recvd = rx.recv().await;
match recvd {
Some(msg) => {
if i == 10 {
assert_eq!(msg, 20);
}
}
None => {
assert_eq!(i, 11);
break;
}
}
}
let tx_weak = tx_weak.unwrap();
let upgraded = tx_weak.upgrade();
assert!(upgraded.is_none());
}
#[tokio::test]
async fn actor_weak_unbounded_sender() {
pub struct MyActor {
receiver: mpsc::UnboundedReceiver<ActorMessage>,
sender: mpsc::WeakUnboundedSender<ActorMessage>,
next_id: u32,
pub received_self_msg: bool,
}
enum ActorMessage {
GetUniqueId { respond_to: oneshot::Sender<u32> },
SelfMessage {},
}
impl MyActor {
fn new(
receiver: mpsc::UnboundedReceiver<ActorMessage>,
sender: mpsc::WeakUnboundedSender<ActorMessage>,
) -> Self {
MyActor {
receiver,
sender,
next_id: 0,
received_self_msg: false,
}
}
fn handle_message(&mut self, msg: ActorMessage) {
match msg {
ActorMessage::GetUniqueId { respond_to } => {
self.next_id += 1;
// The `let _ =` ignores any errors when sending.
//
// This can happen if the `select!` macro is used
// to cancel waiting for the response.
let _ = respond_to.send(self.next_id);
}
ActorMessage::SelfMessage { .. } => {
self.received_self_msg = true;
}
}
}
async fn send_message_to_self(&mut self) {
let msg = ActorMessage::SelfMessage {};
let sender = self.sender.clone();
// cannot move self.sender here
if let Some(sender) = sender.upgrade() {
let _ = sender.send(msg);
self.sender = sender.downgrade();
}
}
async fn run(&mut self) {
let mut i = 0;
while let Some(msg) = self.receiver.recv().await {
self.handle_message(msg);
if i == 0 {
self.send_message_to_self().await;
}
i += 1
}
assert!(self.received_self_msg);
}
}
#[derive(Clone)]
pub struct MyActorHandle {
sender: mpsc::UnboundedSender<ActorMessage>,
}
impl MyActorHandle {
pub fn new() -> (Self, MyActor) {
let (sender, receiver) = mpsc::unbounded_channel();
let actor = MyActor::new(receiver, sender.clone().downgrade());
(Self { sender }, actor)
}
pub async fn get_unique_id(&self) -> u32 {
let (send, recv) = oneshot::channel();
let msg = ActorMessage::GetUniqueId { respond_to: send };
// Ignore send errors. If this send fails, so does the
// recv.await below. There's no reason to check the
// failure twice.
let _ = self.sender.send(msg);
recv.await.expect("Actor task has been killed")
}
}
let (handle, mut actor) = MyActorHandle::new();
let actor_handle = tokio::spawn(async move { actor.run().await });
let _ = tokio::spawn(async move {
let _ = handle.get_unique_id().await;
drop(handle);
})
.await;
let _ = actor_handle.await;
}
static NUM_DROPPED_UNBOUNDED: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug)]
struct MsgUnbounded;
impl Drop for MsgUnbounded {
fn drop(&mut self) {
NUM_DROPPED_UNBOUNDED.fetch_add(1, Release);
}
}
// Tests that no pending messages are put onto the channel after `Rx` was
// dropped.
//
// Note: After the introduction of `UnboundedWeakSender`, which internally
// used `Arc` and doesn't call a drop of the channel after the last strong
// `UnboundedSender` was dropped while more than one `UnboundedWeakSender`
// remains, we want to ensure that no messages are kept in the channel, which
// were sent after the receiver was dropped.
#[tokio::test]
async fn test_msgs_dropped_on_unbounded_rx_drop() {
let (tx, mut rx) = mpsc::unbounded_channel();
tx.send(MsgUnbounded {}).unwrap();
tx.send(MsgUnbounded {}).unwrap();
// This msg will be pending and should be dropped when `rx` is dropped
let sent = tx.send(MsgUnbounded {});
let _ = rx.recv().await.unwrap();
let _ = rx.recv().await.unwrap();
sent.unwrap();
drop(rx);
assert_eq!(NUM_DROPPED_UNBOUNDED.load(Acquire), 3);
// This msg will not be put onto `Tx` list anymore, since `Rx` is closed.
assert!(tx.send(MsgUnbounded {}).is_err());
assert_eq!(NUM_DROPPED_UNBOUNDED.load(Acquire), 4);
}
// Tests that an `WeakUnboundedSender` is upgradeable when other
// `UnboundedSender`s exist.
#[test]
fn downgrade_upgrade_unbounded_sender_success() {
let (tx, _rx) = mpsc::unbounded_channel::<i32>();
let weak_tx = tx.downgrade();
assert!(weak_tx.upgrade().is_some());
}
// Tests that a `WeakUnboundedSender` fails to upgrade when no other
// `UnboundedSender` exists.
#[test]
fn downgrade_upgrade_unbounded_sender_failure() {
let (tx, _rx) = mpsc::unbounded_channel::<i32>();
let weak_tx = tx.downgrade();
drop(tx);
assert!(weak_tx.upgrade().is_none());
}
// Tests that an `WeakUnboundedSender` cannot be upgraded after an
// `UnboundedSender` was dropped, which existed at the time of the `downgrade` call.
#[test]
fn downgrade_drop_upgrade_unbounded() {
let (tx, _rx) = mpsc::unbounded_channel::<i32>();
// the cloned `Tx` is dropped right away
let weak_tx = tx.clone().downgrade();
drop(tx);
assert!(weak_tx.upgrade().is_none());
}
// Tests that `downgrade` does not change the `tx_count` of the channel.
#[test]
fn test_tx_count_weak_unbounded_sender() {
let (tx, _rx) = mpsc::unbounded_channel::<i32>();
let tx_weak = tx.downgrade();
let tx_weak2 = tx.downgrade();
drop(tx);
assert!(tx_weak.upgrade().is_none() && tx_weak2.upgrade().is_none());
}

178
vendor/tokio/tests/sync_mutex.rs vendored Normal file
View file

@ -0,0 +1,178 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio::sync::Mutex;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
use std::sync::Arc;
#[test]
fn straight_execution() {
let l = Mutex::new(100);
{
let mut t = spawn(l.lock());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &100);
*g = 99;
}
{
let mut t = spawn(l.lock());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &99);
*g = 98;
}
{
let mut t = spawn(l.lock());
let g = assert_ready!(t.poll());
assert_eq!(&*g, &98);
}
}
#[test]
fn readiness() {
let l1 = Arc::new(Mutex::new(100));
let l2 = Arc::clone(&l1);
let mut t1 = spawn(l1.lock());
let mut t2 = spawn(l2.lock());
let g = assert_ready!(t1.poll());
// We can't now acquire the lease since it's already held in g
assert_pending!(t2.poll());
// But once g unlocks, we can acquire it
drop(g);
assert!(t2.is_woken());
let _t2 = assert_ready!(t2.poll());
}
/*
#[test]
#[ignore]
fn lock() {
let mut lock = Mutex::new(false);
let mut lock2 = lock.clone();
std::thread::spawn(move || {
let l = lock2.lock();
pin_mut!(l);
let mut task = MockTask::new();
let mut g = assert_ready!(task.poll(&mut l));
std::thread::sleep(std::time::Duration::from_millis(500));
*g = true;
drop(g);
});
std::thread::sleep(std::time::Duration::from_millis(50));
let mut task = MockTask::new();
let l = lock.lock();
pin_mut!(l);
assert_pending!(task.poll(&mut l));
std::thread::sleep(std::time::Duration::from_millis(500));
assert!(task.is_woken());
let result = assert_ready!(task.poll(&mut l));
assert!(*result);
}
*/
/// Ensure a mutex is unlocked if a future holding the lock
/// is aborted prematurely.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_1() {
use std::time::Duration;
use tokio::time::{interval, timeout};
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
let iv = interval(Duration::from_millis(1000));
tokio::pin!(iv);
let _g = m2.lock().await;
iv.as_mut().tick().await;
iv.as_mut().tick().await;
})
.await
.unwrap_err();
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
}
/// This test is similar to `aborted_future_1` but this time the
/// aborted future is waiting for the lock.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_2() {
use std::time::Duration;
use tokio::time::timeout;
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
// Lock mutex
let _lock = m1.lock().await;
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
let _g = m2.lock().await;
})
.await
.unwrap_err();
}
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
}
#[test]
fn try_lock() {
let m: Mutex<usize> = Mutex::new(0);
{
let g1 = m.try_lock();
assert!(g1.is_ok());
let g2 = m.try_lock();
assert!(g2.is_err());
}
let g3 = m.try_lock();
assert!(g3.is_ok());
}
#[maybe_tokio_test]
async fn debug_format() {
let s = "debug";
let m = Mutex::new(s.to_string());
assert_eq!(format!("{:?}", s), format!("{:?}", m.lock().await));
}
#[maybe_tokio_test]
async fn mutex_debug() {
let s = "data";
let m = Mutex::new(s.to_string());
assert_eq!(format!("{:?}", m), r#"Mutex { data: "data" }"#);
let _guard = m.lock().await;
assert_eq!(format!("{:?}", m), r#"Mutex { data: <locked> }"#)
}

136
vendor/tokio/tests/sync_mutex_owned.rs vendored Normal file
View file

@ -0,0 +1,136 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio::sync::Mutex;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
use std::sync::Arc;
#[test]
fn straight_execution() {
let l = Arc::new(Mutex::new(100));
{
let mut t = spawn(l.clone().lock_owned());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &100);
*g = 99;
}
{
let mut t = spawn(l.clone().lock_owned());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &99);
*g = 98;
}
{
let mut t = spawn(l.lock_owned());
let g = assert_ready!(t.poll());
assert_eq!(&*g, &98);
}
}
#[test]
fn readiness() {
let l = Arc::new(Mutex::new(100));
let mut t1 = spawn(l.clone().lock_owned());
let mut t2 = spawn(l.lock_owned());
let g = assert_ready!(t1.poll());
// We can't now acquire the lease since it's already held in g
assert_pending!(t2.poll());
// But once g unlocks, we can acquire it
drop(g);
assert!(t2.is_woken());
assert_ready!(t2.poll());
}
/// Ensure a mutex is unlocked if a future holding the lock
/// is aborted prematurely.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_1() {
use std::time::Duration;
use tokio::time::{interval, timeout};
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
let iv = interval(Duration::from_millis(1000));
tokio::pin!(iv);
m2.lock_owned().await;
iv.as_mut().tick().await;
iv.as_mut().tick().await;
})
.await
.unwrap_err();
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
m1.lock_owned().await;
})
.await
.expect("Mutex is locked");
}
/// This test is similar to `aborted_future_1` but this time the
/// aborted future is waiting for the lock.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_2() {
use std::time::Duration;
use tokio::time::timeout;
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
// Lock mutex
let _lock = m1.clone().lock_owned().await;
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
m2.lock_owned().await;
})
.await
.unwrap_err();
}
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
m1.lock_owned().await;
})
.await
.expect("Mutex is locked");
}
#[test]
fn try_lock_owned() {
let m: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
let g1 = m.clone().try_lock_owned();
assert!(g1.is_ok());
let g2 = m.clone().try_lock_owned();
assert!(g2.is_err());
}
let g3 = m.try_lock_owned();
assert!(g3.is_ok());
}
#[maybe_tokio_test]
async fn debug_format() {
let s = "debug";
let m = Arc::new(Mutex::new(s.to_string()));
assert_eq!(format!("{:?}", s), format!("{:?}", m.lock_owned().await));
}

227
vendor/tokio/tests/sync_notify.rs vendored Normal file
View file

@ -0,0 +1,227 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use tokio::sync::Notify;
use tokio_test::task::spawn;
use tokio_test::*;
trait AssertSend: Send + Sync {}
impl AssertSend for Notify {}
#[test]
fn notify_notified_one() {
let notify = Notify::new();
let mut notified = spawn(async { notify.notified().await });
notify.notify_one();
assert_ready!(notified.poll());
}
#[test]
fn notified_one_notify() {
let notify = Notify::new();
let mut notified = spawn(async { notify.notified().await });
assert_pending!(notified.poll());
notify.notify_one();
assert!(notified.is_woken());
assert_ready!(notified.poll());
}
#[test]
fn notified_multi_notify() {
let notify = Notify::new();
let mut notified1 = spawn(async { notify.notified().await });
let mut notified2 = spawn(async { notify.notified().await });
assert_pending!(notified1.poll());
assert_pending!(notified2.poll());
notify.notify_one();
assert!(notified1.is_woken());
assert!(!notified2.is_woken());
assert_ready!(notified1.poll());
assert_pending!(notified2.poll());
}
#[test]
fn notify_notified_multi() {
let notify = Notify::new();
notify.notify_one();
let mut notified1 = spawn(async { notify.notified().await });
let mut notified2 = spawn(async { notify.notified().await });
assert_ready!(notified1.poll());
assert_pending!(notified2.poll());
notify.notify_one();
assert!(notified2.is_woken());
assert_ready!(notified2.poll());
}
#[test]
fn notified_drop_notified_notify() {
let notify = Notify::new();
let mut notified1 = spawn(async { notify.notified().await });
let mut notified2 = spawn(async { notify.notified().await });
assert_pending!(notified1.poll());
drop(notified1);
assert_pending!(notified2.poll());
notify.notify_one();
assert!(notified2.is_woken());
assert_ready!(notified2.poll());
}
#[test]
fn notified_multi_notify_drop_one() {
let notify = Notify::new();
let mut notified1 = spawn(async { notify.notified().await });
let mut notified2 = spawn(async { notify.notified().await });
assert_pending!(notified1.poll());
assert_pending!(notified2.poll());
notify.notify_one();
assert!(notified1.is_woken());
assert!(!notified2.is_woken());
drop(notified1);
assert!(notified2.is_woken());
assert_ready!(notified2.poll());
}
#[test]
fn notify_in_drop_after_wake() {
use futures::task::ArcWake;
use std::future::Future;
use std::sync::Arc;
let notify = Arc::new(Notify::new());
struct NotifyOnDrop(Arc<Notify>);
impl ArcWake for NotifyOnDrop {
fn wake_by_ref(_arc_self: &Arc<Self>) {}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
self.0.notify_waiters();
}
}
let mut fut = Box::pin(async {
notify.notified().await;
});
{
let waker = futures::task::waker(Arc::new(NotifyOnDrop(notify.clone())));
let mut cx = std::task::Context::from_waker(&waker);
assert!(fut.as_mut().poll(&mut cx).is_pending());
}
// Now, notifying **should not** deadlock
notify.notify_waiters();
}
#[test]
fn notify_one_after_dropped_all() {
let notify = Notify::new();
let mut notified1 = spawn(async { notify.notified().await });
assert_pending!(notified1.poll());
notify.notify_waiters();
notify.notify_one();
drop(notified1);
let mut notified2 = spawn(async { notify.notified().await });
assert_ready!(notified2.poll());
}
#[test]
fn test_notify_one_not_enabled() {
let notify = Notify::new();
let mut future = spawn(notify.notified());
notify.notify_one();
assert_ready!(future.poll());
}
#[test]
fn test_notify_one_after_enable() {
let notify = Notify::new();
let mut future = spawn(notify.notified());
future.enter(|_, fut| assert!(!fut.enable()));
notify.notify_one();
assert_ready!(future.poll());
future.enter(|_, fut| assert!(fut.enable()));
}
#[test]
fn test_poll_after_enable() {
let notify = Notify::new();
let mut future = spawn(notify.notified());
future.enter(|_, fut| assert!(!fut.enable()));
assert_pending!(future.poll());
}
#[test]
fn test_enable_after_poll() {
let notify = Notify::new();
let mut future = spawn(notify.notified());
assert_pending!(future.poll());
future.enter(|_, fut| assert!(!fut.enable()));
}
#[test]
fn test_enable_consumes_permit() {
let notify = Notify::new();
// Add a permit.
notify.notify_one();
let mut future1 = spawn(notify.notified());
future1.enter(|_, fut| assert!(fut.enable()));
let mut future2 = spawn(notify.notified());
future2.enter(|_, fut| assert!(!fut.enable()));
}
#[test]
fn test_waker_update() {
use futures::task::noop_waker;
use std::future::Future;
use std::task::Context;
let notify = Notify::new();
let mut future = spawn(notify.notified());
let noop = noop_waker();
future.enter(|_, fut| assert_pending!(fut.poll(&mut Context::from_waker(&noop))));
assert_pending!(future.poll());
notify.notify_one();
assert!(future.is_woken());
}

274
vendor/tokio/tests/sync_once_cell.rs vendored Normal file
View file

@ -0,0 +1,274 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::mem;
use std::ops::Drop;
use std::sync::atomic::{AtomicU32, Ordering};
use std::time::Duration;
use tokio::runtime;
use tokio::sync::{OnceCell, SetError};
use tokio::time;
async fn func1() -> u32 {
5
}
async fn func2() -> u32 {
time::sleep(Duration::from_millis(1)).await;
10
}
async fn func_err() -> Result<u32, ()> {
Err(())
}
async fn func_ok() -> Result<u32, ()> {
Ok(10)
}
async fn func_panic() -> u32 {
time::sleep(Duration::from_millis(1)).await;
panic!();
}
async fn sleep_and_set() -> u32 {
// Simulate sleep by pausing time and waiting for another thread to
// resume clock when calling `set`, then finding the cell being initialized
// by this call
time::sleep(Duration::from_millis(2)).await;
5
}
async fn advance_time_and_set(cell: &'static OnceCell<u32>, v: u32) -> Result<(), SetError<u32>> {
time::advance(Duration::from_millis(1)).await;
cell.set(v)
}
#[test]
fn get_or_init() {
let rt = runtime::Builder::new_current_thread()
.enable_time()
.start_paused(true)
.build()
.unwrap();
static ONCE: OnceCell<u32> = OnceCell::const_new();
rt.block_on(async {
let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
let handle2 = rt.spawn(async { ONCE.get_or_init(func2).await });
time::advance(Duration::from_millis(1)).await;
time::resume();
let result1 = handle1.await.unwrap();
let result2 = handle2.await.unwrap();
assert_eq!(*result1, 5);
assert_eq!(*result2, 5);
});
}
#[test]
fn get_or_init_panic() {
let rt = runtime::Builder::new_current_thread()
.enable_time()
.build()
.unwrap();
static ONCE: OnceCell<u32> = OnceCell::const_new();
rt.block_on(async {
time::pause();
let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
let handle2 = rt.spawn(async { ONCE.get_or_init(func_panic).await });
time::advance(Duration::from_millis(1)).await;
let result1 = handle1.await.unwrap();
let result2 = handle2.await.unwrap();
assert_eq!(*result1, 5);
assert_eq!(*result2, 5);
});
}
#[test]
fn set_and_get() {
let rt = runtime::Builder::new_current_thread()
.enable_time()
.build()
.unwrap();
static ONCE: OnceCell<u32> = OnceCell::const_new();
rt.block_on(async {
let _ = rt.spawn(async { ONCE.set(5) }).await;
let value = ONCE.get().unwrap();
assert_eq!(*value, 5);
});
}
#[test]
fn get_uninit() {
static ONCE: OnceCell<u32> = OnceCell::const_new();
let uninit = ONCE.get();
assert!(uninit.is_none());
}
#[test]
fn set_twice() {
static ONCE: OnceCell<u32> = OnceCell::const_new();
let first = ONCE.set(5);
assert_eq!(first, Ok(()));
let second = ONCE.set(6);
assert!(second.err().unwrap().is_already_init_err());
}
#[test]
fn set_while_initializing() {
let rt = runtime::Builder::new_current_thread()
.enable_time()
.build()
.unwrap();
static ONCE: OnceCell<u32> = OnceCell::const_new();
rt.block_on(async {
time::pause();
let handle1 = rt.spawn(async { ONCE.get_or_init(sleep_and_set).await });
let handle2 = rt.spawn(async { advance_time_and_set(&ONCE, 10).await });
time::advance(Duration::from_millis(2)).await;
let result1 = handle1.await.unwrap();
let result2 = handle2.await.unwrap();
assert_eq!(*result1, 5);
assert!(result2.err().unwrap().is_initializing_err());
});
}
#[test]
fn get_or_try_init() {
let rt = runtime::Builder::new_current_thread()
.enable_time()
.start_paused(true)
.build()
.unwrap();
static ONCE: OnceCell<u32> = OnceCell::const_new();
rt.block_on(async {
let handle1 = rt.spawn(async { ONCE.get_or_try_init(func_err).await });
let handle2 = rt.spawn(async { ONCE.get_or_try_init(func_ok).await });
time::advance(Duration::from_millis(1)).await;
time::resume();
let result1 = handle1.await.unwrap();
assert!(result1.is_err());
let result2 = handle2.await.unwrap();
assert_eq!(*result2.unwrap(), 10);
});
}
#[test]
fn drop_cell() {
static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
struct Foo {}
let fooer = Foo {};
impl Drop for Foo {
fn drop(&mut self) {
NUM_DROPS.fetch_add(1, Ordering::Release);
}
}
{
let once_cell = OnceCell::new();
let prev = once_cell.set(fooer);
assert!(prev.is_ok())
}
assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
}
#[test]
fn drop_cell_new_with() {
static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
struct Foo {}
let fooer = Foo {};
impl Drop for Foo {
fn drop(&mut self) {
NUM_DROPS.fetch_add(1, Ordering::Release);
}
}
{
let once_cell = OnceCell::new_with(Some(fooer));
assert!(once_cell.initialized());
}
assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
}
#[test]
fn drop_into_inner() {
static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
struct Foo {}
let fooer = Foo {};
impl Drop for Foo {
fn drop(&mut self) {
NUM_DROPS.fetch_add(1, Ordering::Release);
}
}
let once_cell = OnceCell::new();
assert!(once_cell.set(fooer).is_ok());
let fooer = once_cell.into_inner();
let count = NUM_DROPS.load(Ordering::Acquire);
assert!(count == 0);
drop(fooer);
let count = NUM_DROPS.load(Ordering::Acquire);
assert!(count == 1);
}
#[test]
fn drop_into_inner_new_with() {
static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
struct Foo {}
let fooer = Foo {};
impl Drop for Foo {
fn drop(&mut self) {
NUM_DROPS.fetch_add(1, Ordering::Release);
}
}
let once_cell = OnceCell::new_with(Some(fooer));
let fooer = once_cell.into_inner();
let count = NUM_DROPS.load(Ordering::Acquire);
assert!(count == 0);
mem::drop(fooer);
let count = NUM_DROPS.load(Ordering::Acquire);
assert!(count == 1);
}
#[test]
fn from() {
let cell = OnceCell::from(2);
assert_eq!(*cell.get().unwrap(), 2);
}

291
vendor/tokio/tests/sync_oneshot.rs vendored Normal file
View file

@ -0,0 +1,291 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use tokio::sync::oneshot;
use tokio::sync::oneshot::error::TryRecvError;
use tokio_test::*;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
trait AssertSend: Send {}
impl AssertSend for oneshot::Sender<i32> {}
impl AssertSend for oneshot::Receiver<i32> {}
trait SenderExt {
fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()>;
}
impl<T> SenderExt for oneshot::Sender<T> {
fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> {
tokio::pin! {
let fut = self.closed();
}
fut.poll(cx)
}
}
#[test]
fn send_recv() {
let (tx, rx) = oneshot::channel();
let mut rx = task::spawn(rx);
assert_pending!(rx.poll());
assert_ok!(tx.send(1));
assert!(rx.is_woken());
let val = assert_ready_ok!(rx.poll());
assert_eq!(val, 1);
}
#[maybe_tokio_test]
async fn async_send_recv() {
let (tx, rx) = oneshot::channel();
assert_ok!(tx.send(1));
assert_eq!(1, assert_ok!(rx.await));
}
#[test]
fn close_tx() {
let (tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
assert_pending!(rx.poll());
drop(tx);
assert!(rx.is_woken());
assert_ready_err!(rx.poll());
}
#[test]
fn close_rx() {
// First, without checking poll_closed()
//
let (tx, _) = oneshot::channel();
assert_err!(tx.send(1));
// Second, via poll_closed();
let (tx, rx) = oneshot::channel();
let mut tx = task::spawn(tx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
drop(rx);
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(tx.into_inner().send(1));
}
#[tokio::test]
#[cfg(feature = "full")]
async fn async_rx_closed() {
let (mut tx, rx) = oneshot::channel::<()>();
tokio::spawn(async move {
drop(rx);
});
tx.closed().await;
}
#[test]
fn explicit_close_poll() {
// First, with message sent
let (tx, rx) = oneshot::channel();
let mut rx = task::spawn(rx);
assert_ok!(tx.send(1));
rx.close();
let value = assert_ready_ok!(rx.poll());
assert_eq!(value, 1);
// Second, without the message sent
let (tx, rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
let mut rx = task::spawn(rx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(tx.into_inner().send(1));
assert_ready_err!(rx.poll());
// Again, but without sending the value this time
let (tx, rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
let mut rx = task::spawn(rx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_ready_err!(rx.poll());
}
#[test]
fn explicit_close_try_recv() {
// First, with message sent
let (tx, mut rx) = oneshot::channel();
assert_ok!(tx.send(1));
rx.close();
let val = assert_ok!(rx.try_recv());
assert_eq!(1, val);
// Second, without the message sent
let (tx, mut rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(rx.try_recv());
}
#[test]
#[should_panic]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn close_try_recv_poll() {
let (_tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
rx.close();
assert_err!(rx.try_recv());
let _ = rx.poll();
}
#[test]
fn close_after_recv() {
let (tx, mut rx) = oneshot::channel::<i32>();
tx.send(17).unwrap();
assert_eq!(17, rx.try_recv().unwrap());
rx.close();
}
#[test]
fn try_recv_after_completion() {
let (tx, mut rx) = oneshot::channel::<i32>();
tx.send(17).unwrap();
assert_eq!(17, rx.try_recv().unwrap());
assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
rx.close();
}
#[test]
fn try_recv_after_completion_await() {
let (tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
tx.send(17).unwrap();
assert_eq!(Ok(17), assert_ready!(rx.poll()));
assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
rx.close();
}
#[test]
fn drops_tasks() {
let (mut tx, mut rx) = oneshot::channel::<i32>();
let mut tx_task = task::spawn(());
let mut rx_task = task::spawn(());
assert_pending!(tx_task.enter(|cx, _| tx.poll_closed(cx)));
assert_pending!(rx_task.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
drop(tx);
drop(rx);
assert_eq!(1, tx_task.waker_ref_count());
assert_eq!(1, rx_task.waker_ref_count());
}
#[test]
fn receiver_changes_task() {
let (tx, mut rx) = oneshot::channel();
let mut task1 = task::spawn(());
let mut task2 = task::spawn(());
assert_pending!(task1.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
assert_eq!(2, task1.waker_ref_count());
assert_eq!(1, task2.waker_ref_count());
assert_pending!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
assert_eq!(1, task1.waker_ref_count());
assert_eq!(2, task2.waker_ref_count());
assert_ok!(tx.send(1));
assert!(!task1.is_woken());
assert!(task2.is_woken());
assert_ready_ok!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
}
#[test]
fn sender_changes_task() {
let (mut tx, rx) = oneshot::channel::<i32>();
let mut task1 = task::spawn(());
let mut task2 = task::spawn(());
assert_pending!(task1.enter(|cx, _| tx.poll_closed(cx)));
assert_eq!(2, task1.waker_ref_count());
assert_eq!(1, task2.waker_ref_count());
assert_pending!(task2.enter(|cx, _| tx.poll_closed(cx)));
assert_eq!(1, task1.waker_ref_count());
assert_eq!(2, task2.waker_ref_count());
drop(rx);
assert!(!task1.is_woken());
assert!(task2.is_woken());
assert_ready!(task2.enter(|cx, _| tx.poll_closed(cx)));
}

197
vendor/tokio/tests/sync_panic.rs vendored Normal file
View file

@ -0,0 +1,197 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use std::{error::Error, sync::Arc};
use tokio::{
runtime::{Builder, Runtime},
sync::{broadcast, mpsc, oneshot, Mutex, RwLock, Semaphore},
};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn broadcast_channel_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (_, _) = broadcast::channel::<u32>(0);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mutex_blocking_lock_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let mutex = Mutex::new(5_u32);
let _g = mutex.blocking_lock();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn oneshot_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let (_tx, rx) = oneshot::channel::<u8>();
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_with_max_readers_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = RwLock::<u8>::with_max_readers(0, (u32::MAX >> 3) + 1);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_blocking_read_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let lock = RwLock::<u8>::new(0);
let _ = lock.blocking_read();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_blocking_write_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let lock = RwLock::<u8>::new(0);
let _ = lock.blocking_write();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_channel_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (_, _) = mpsc::channel::<u8>(0);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_receiver_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::channel::<u8>(1);
rt.block_on(async {
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_sender_blocking_send_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (tx, _rx) = mpsc::channel::<u8>(1);
rt.block_on(async {
let _ = tx.blocking_send(3);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_unbounded_receiver_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::unbounded_channel::<u8>();
rt.block_on(async {
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn semaphore_merge_unrelated_owned_permits() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let sem1 = Arc::new(Semaphore::new(42));
let sem2 = Arc::new(Semaphore::new(42));
let mut p1 = sem1.try_acquire_owned().unwrap();
let p2 = sem2.try_acquire_owned().unwrap();
p1.merge(p2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn semaphore_merge_unrelated_permits() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let sem1 = Semaphore::new(42);
let sem2 = Semaphore::new(42);
let mut p1 = sem1.try_acquire().unwrap();
let p2 = sem2.try_acquire().unwrap();
p1.merge(p2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
fn current_thread() -> Runtime {
Builder::new_current_thread().enable_all().build().unwrap()
}

281
vendor/tokio/tests/sync_rwlock.rs vendored Normal file
View file

@ -0,0 +1,281 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(tokio_wasm_not_wasi))]
use tokio::test as maybe_tokio_test;
use std::task::Poll;
use futures::future::FutureExt;
use tokio::sync::RwLock;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
#[test]
fn into_inner() {
let rwlock = RwLock::new(42);
assert_eq!(rwlock.into_inner(), 42);
}
// multiple reads should be Ready
#[test]
fn read_shared() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.read());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.read());
let _g2 = assert_ready!(t2.poll());
}
// When there is an active shared owner, exclusive access should not be possible
#[test]
fn write_shared_pending() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.read());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.write());
assert_pending!(t2.poll());
}
// When there is an active exclusive owner, subsequent exclusive access should not be possible
#[test]
fn read_exclusive_pending() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.write());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.read());
assert_pending!(t2.poll());
}
// If the max shared access is reached and subsequent shared access is pending
// should be made available when one of the shared accesses is dropped
#[test]
fn exhaust_reading() {
let rwlock = RwLock::with_max_readers(100, 1024);
let mut reads = Vec::new();
loop {
let mut t = spawn(rwlock.read());
match t.poll() {
Poll::Ready(guard) => reads.push(guard),
Poll::Pending => break,
}
}
let mut t1 = spawn(rwlock.read());
assert_pending!(t1.poll());
let g2 = reads.pop().unwrap();
drop(g2);
assert!(t1.is_woken());
let _g1 = assert_ready!(t1.poll());
}
// When there is an active exclusive owner, subsequent exclusive access should not be possible
#[test]
fn write_exclusive_pending() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.write());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.write());
assert_pending!(t2.poll());
}
// When there is an active shared owner, exclusive access should be possible after shared is dropped
#[test]
fn write_shared_drop() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.read());
let g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.write());
assert_pending!(t2.poll());
drop(g1);
assert!(t2.is_woken());
let _g2 = assert_ready!(t2.poll());
}
// when there is an active shared owner, and exclusive access is triggered,
// subsequent shared access should not be possible as write gathers all the available semaphore permits
#[test]
fn write_read_shared_pending() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.read());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.read());
let _g2 = assert_ready!(t2.poll());
let mut t3 = spawn(rwlock.write());
assert_pending!(t3.poll());
let mut t4 = spawn(rwlock.read());
assert_pending!(t4.poll());
}
// when there is an active shared owner, and exclusive access is triggered,
// reading should be possible after pending exclusive access is dropped
#[test]
fn write_read_shared_drop_pending() {
let rwlock = RwLock::new(100);
let mut t1 = spawn(rwlock.read());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.write());
assert_pending!(t2.poll());
let mut t3 = spawn(rwlock.read());
assert_pending!(t3.poll());
drop(t2);
assert!(t3.is_woken());
let _t3 = assert_ready!(t3.poll());
}
// Acquire an RwLock nonexclusively by a single task
#[maybe_tokio_test]
async fn read_uncontested() {
let rwlock = RwLock::new(100);
let result = *rwlock.read().await;
assert_eq!(result, 100);
}
// Acquire an uncontested RwLock in exclusive mode
#[maybe_tokio_test]
async fn write_uncontested() {
let rwlock = RwLock::new(100);
let mut result = rwlock.write().await;
*result += 50;
assert_eq!(*result, 150);
}
// RwLocks should be acquired in the order that their Futures are waited upon.
#[maybe_tokio_test]
async fn write_order() {
let rwlock = RwLock::<Vec<u32>>::new(vec![]);
let fut2 = rwlock.write().map(|mut guard| guard.push(2));
let fut1 = rwlock.write().map(|mut guard| guard.push(1));
fut1.await;
fut2.await;
let g = rwlock.read().await;
assert_eq!(*g, vec![1, 2]);
}
// A single RwLock is contested by tasks in multiple threads
#[cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread", worker_threads = 8)]
async fn multithreaded() {
use futures::stream::{self, StreamExt};
use std::sync::Arc;
use tokio::sync::Barrier;
let barrier = Arc::new(Barrier::new(5));
let rwlock = Arc::new(RwLock::<u32>::new(0));
let rwclone1 = rwlock.clone();
let rwclone2 = rwlock.clone();
let rwclone3 = rwlock.clone();
let rwclone4 = rwlock.clone();
let b1 = barrier.clone();
tokio::spawn(async move {
stream::iter(0..1000)
.for_each(move |_| {
let rwlock = rwclone1.clone();
async move {
let mut guard = rwlock.write().await;
*guard += 2;
}
})
.await;
b1.wait().await;
});
let b2 = barrier.clone();
tokio::spawn(async move {
stream::iter(0..1000)
.for_each(move |_| {
let rwlock = rwclone2.clone();
async move {
let mut guard = rwlock.write().await;
*guard += 3;
}
})
.await;
b2.wait().await;
});
let b3 = barrier.clone();
tokio::spawn(async move {
stream::iter(0..1000)
.for_each(move |_| {
let rwlock = rwclone3.clone();
async move {
let mut guard = rwlock.write().await;
*guard += 5;
}
})
.await;
b3.wait().await;
});
let b4 = barrier.clone();
tokio::spawn(async move {
stream::iter(0..1000)
.for_each(move |_| {
let rwlock = rwclone4.clone();
async move {
let mut guard = rwlock.write().await;
*guard += 7;
}
})
.await;
b4.wait().await;
});
barrier.wait().await;
let g = rwlock.read().await;
assert_eq!(*g, 17_000);
}
#[maybe_tokio_test]
async fn try_write() {
let lock = RwLock::new(0);
let read_guard = lock.read().await;
assert!(lock.try_write().is_err());
drop(read_guard);
assert!(lock.try_write().is_ok());
}
#[test]
fn try_read_try_write() {
let lock: RwLock<usize> = RwLock::new(15);
{
let rg1 = lock.try_read().unwrap();
assert_eq!(*rg1, 15);
assert!(lock.try_write().is_err());
let rg2 = lock.try_read().unwrap();
assert_eq!(*rg2, 15)
}
{
let mut wg = lock.try_write().unwrap();
*wg = 1515;
assert!(lock.try_read().is_err())
}
assert_eq!(*lock.try_read().unwrap(), 1515);
}

150
vendor/tokio/tests/sync_semaphore.rs vendored Normal file
View file

@ -0,0 +1,150 @@
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use std::sync::Arc;
use tokio::sync::Semaphore;
#[test]
fn no_permits() {
// this should not panic
Semaphore::new(0);
}
#[test]
fn try_acquire() {
let sem = Semaphore::new(1);
{
let p1 = sem.try_acquire();
assert!(p1.is_ok());
let p2 = sem.try_acquire();
assert!(p2.is_err());
}
let p3 = sem.try_acquire();
assert!(p3.is_ok());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn acquire() {
let sem = Arc::new(Semaphore::new(1));
let p1 = sem.try_acquire().unwrap();
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire().await;
});
drop(p1);
j.await.unwrap();
}
#[tokio::test]
#[cfg(feature = "full")]
async fn add_permits() {
let sem = Arc::new(Semaphore::new(0));
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire().await;
});
sem.add_permits(1);
j.await.unwrap();
}
#[test]
fn forget() {
let sem = Arc::new(Semaphore::new(1));
{
let p = sem.try_acquire().unwrap();
assert_eq!(sem.available_permits(), 0);
p.forget();
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 0);
assert!(sem.try_acquire().is_err());
}
#[test]
fn merge() {
let sem = Arc::new(Semaphore::new(3));
{
let mut p1 = sem.try_acquire().unwrap();
assert_eq!(sem.available_permits(), 2);
let p2 = sem.try_acquire_many(2).unwrap();
assert_eq!(sem.available_permits(), 0);
p1.merge(p2);
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 3);
}
#[test]
#[cfg(not(tokio_wasm))] // No stack unwinding on wasm targets
#[should_panic]
fn merge_unrelated_permits() {
let sem1 = Arc::new(Semaphore::new(3));
let sem2 = Arc::new(Semaphore::new(3));
let mut p1 = sem1.try_acquire().unwrap();
let p2 = sem2.try_acquire().unwrap();
p1.merge(p2);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn stress_test() {
let sem = Arc::new(Semaphore::new(5));
let mut join_handles = Vec::new();
for _ in 0..1000 {
let sem_clone = sem.clone();
join_handles.push(tokio::spawn(async move {
let _p = sem_clone.acquire().await;
}));
}
for j in join_handles {
j.await.unwrap();
}
// there should be exactly 5 semaphores available now
let _p1 = sem.try_acquire().unwrap();
let _p2 = sem.try_acquire().unwrap();
let _p3 = sem.try_acquire().unwrap();
let _p4 = sem.try_acquire().unwrap();
let _p5 = sem.try_acquire().unwrap();
assert!(sem.try_acquire().is_err());
}
#[test]
fn add_max_amount_permits() {
let s = tokio::sync::Semaphore::new(0);
s.add_permits(tokio::sync::Semaphore::MAX_PERMITS);
assert_eq!(s.available_permits(), tokio::sync::Semaphore::MAX_PERMITS);
}
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn add_more_than_max_amount_permits1() {
let s = tokio::sync::Semaphore::new(1);
s.add_permits(tokio::sync::Semaphore::MAX_PERMITS);
}
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn add_more_than_max_amount_permits2() {
let s = Semaphore::new(Semaphore::MAX_PERMITS - 1);
s.add_permits(1);
s.add_permits(1);
}
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn panic_when_exceeds_maxpermits() {
let _ = Semaphore::new(Semaphore::MAX_PERMITS + 1);
}
#[test]
fn no_panic_at_maxpermits() {
let _ = Semaphore::new(Semaphore::MAX_PERMITS);
let s = Semaphore::new(Semaphore::MAX_PERMITS - 1);
s.add_permits(1);
}

View file

@ -0,0 +1,138 @@
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use std::sync::Arc;
use tokio::sync::Semaphore;
#[test]
fn try_acquire() {
let sem = Arc::new(Semaphore::new(1));
{
let p1 = sem.clone().try_acquire_owned();
assert!(p1.is_ok());
let p2 = sem.clone().try_acquire_owned();
assert!(p2.is_err());
}
let p3 = sem.try_acquire_owned();
assert!(p3.is_ok());
}
#[test]
fn try_acquire_many() {
let sem = Arc::new(Semaphore::new(42));
{
let p1 = sem.clone().try_acquire_many_owned(42);
assert!(p1.is_ok());
let p2 = sem.clone().try_acquire_owned();
assert!(p2.is_err());
}
let p3 = sem.clone().try_acquire_many_owned(32);
assert!(p3.is_ok());
let p4 = sem.clone().try_acquire_many_owned(10);
assert!(p4.is_ok());
assert!(sem.try_acquire_owned().is_err());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn acquire() {
let sem = Arc::new(Semaphore::new(1));
let p1 = sem.clone().try_acquire_owned().unwrap();
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire_owned().await;
});
drop(p1);
j.await.unwrap();
}
#[tokio::test]
#[cfg(feature = "full")]
async fn acquire_many() {
let semaphore = Arc::new(Semaphore::new(42));
let permit32 = semaphore.clone().try_acquire_many_owned(32).unwrap();
let (sender, receiver) = tokio::sync::oneshot::channel();
let join_handle = tokio::spawn(async move {
let _permit10 = semaphore.clone().acquire_many_owned(10).await.unwrap();
sender.send(()).unwrap();
let _permit32 = semaphore.acquire_many_owned(32).await.unwrap();
});
receiver.await.unwrap();
drop(permit32);
join_handle.await.unwrap();
}
#[tokio::test]
#[cfg(feature = "full")]
async fn add_permits() {
let sem = Arc::new(Semaphore::new(0));
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire_owned().await;
});
sem.add_permits(1);
j.await.unwrap();
}
#[test]
fn forget() {
let sem = Arc::new(Semaphore::new(1));
{
let p = sem.clone().try_acquire_owned().unwrap();
assert_eq!(sem.available_permits(), 0);
p.forget();
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 0);
assert!(sem.try_acquire_owned().is_err());
}
#[test]
fn merge() {
let sem = Arc::new(Semaphore::new(3));
{
let mut p1 = sem.clone().try_acquire_owned().unwrap();
assert_eq!(sem.available_permits(), 2);
let p2 = sem.clone().try_acquire_many_owned(2).unwrap();
assert_eq!(sem.available_permits(), 0);
p1.merge(p2);
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 3);
}
#[test]
#[cfg(not(tokio_wasm))] // No stack unwinding on wasm targets
#[should_panic]
fn merge_unrelated_permits() {
let sem1 = Arc::new(Semaphore::new(3));
let sem2 = Arc::new(Semaphore::new(3));
let mut p1 = sem1.try_acquire_owned().unwrap();
let p2 = sem2.try_acquire_owned().unwrap();
p1.merge(p2)
}
#[tokio::test]
#[cfg(feature = "full")]
async fn stress_test() {
let sem = Arc::new(Semaphore::new(5));
let mut join_handles = Vec::new();
for _ in 0..1000 {
let sem_clone = sem.clone();
join_handles.push(tokio::spawn(async move {
let _p = sem_clone.acquire_owned().await;
}));
}
for j in join_handles {
j.await.unwrap();
}
// there should be exactly 5 semaphores available now
let _p1 = sem.clone().try_acquire_owned().unwrap();
let _p2 = sem.clone().try_acquire_owned().unwrap();
let _p3 = sem.clone().try_acquire_owned().unwrap();
let _p4 = sem.clone().try_acquire_owned().unwrap();
let _p5 = sem.clone().try_acquire_owned().unwrap();
assert!(sem.try_acquire_owned().is_err());
}

242
vendor/tokio/tests/sync_watch.rs vendored Normal file
View file

@ -0,0 +1,242 @@
#![allow(clippy::cognitive_complexity)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(tokio_wasm_not_wasi)]
use wasm_bindgen_test::wasm_bindgen_test as test;
use tokio::sync::watch;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok};
#[test]
fn single_rx_recv() {
let (tx, mut rx) = watch::channel("one");
{
// Not initially notified
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
}
assert_eq!(*rx.borrow(), "one");
{
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
tx.send("two").unwrap();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
assert_eq!(*rx.borrow(), "two");
{
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
drop(tx);
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
assert_eq!(*rx.borrow(), "two");
}
#[test]
fn multi_rx() {
let (tx, mut rx1) = watch::channel("one");
let mut rx2 = rx1.clone();
{
let mut t1 = spawn(rx1.changed());
let mut t2 = spawn(rx2.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
}
assert_eq!(*rx1.borrow(), "one");
assert_eq!(*rx2.borrow(), "one");
let mut t2 = spawn(rx2.changed());
{
let mut t1 = spawn(rx1.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
tx.send("two").unwrap();
assert!(t1.is_woken());
assert!(t2.is_woken());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx1.borrow(), "two");
{
let mut t1 = spawn(rx1.changed());
assert_pending!(t1.poll());
tx.send("three").unwrap();
assert!(t1.is_woken());
assert!(t2.is_woken());
assert_ready_ok!(t1.poll());
assert_ready_ok!(t2.poll());
}
assert_eq!(*rx1.borrow(), "three");
drop(t2);
assert_eq!(*rx2.borrow(), "three");
{
let mut t1 = spawn(rx1.changed());
let mut t2 = spawn(rx2.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
tx.send("four").unwrap();
assert_ready_ok!(t1.poll());
assert_ready_ok!(t2.poll());
}
assert_eq!(*rx1.borrow(), "four");
assert_eq!(*rx2.borrow(), "four");
}
#[test]
fn rx_observes_final_value() {
// Initial value
let (tx, mut rx) = watch::channel("one");
drop(tx);
{
let mut t1 = spawn(rx.changed());
assert_ready_err!(t1.poll());
}
assert_eq!(*rx.borrow(), "one");
// Sending a value
let (tx, mut rx) = watch::channel("one");
tx.send("two").unwrap();
{
let mut t1 = spawn(rx.changed());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx.borrow(), "two");
{
let mut t1 = spawn(rx.changed());
assert_pending!(t1.poll());
tx.send("three").unwrap();
drop(tx);
assert!(t1.is_woken());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx.borrow(), "three");
{
let mut t1 = spawn(rx.changed());
assert_ready_err!(t1.poll());
}
assert_eq!(*rx.borrow(), "three");
}
#[test]
fn poll_close() {
let (tx, rx) = watch::channel("one");
{
let mut t = spawn(tx.closed());
assert_pending!(t.poll());
drop(rx);
assert!(t.is_woken());
assert_ready!(t.poll());
}
assert!(tx.send("two").is_err());
}
#[test]
fn borrow_and_update() {
let (tx, mut rx) = watch::channel("one");
assert!(!rx.has_changed().unwrap());
tx.send("two").unwrap();
assert!(rx.has_changed().unwrap());
assert_ready!(spawn(rx.changed()).poll()).unwrap();
assert_pending!(spawn(rx.changed()).poll());
assert!(!rx.has_changed().unwrap());
tx.send("three").unwrap();
assert!(rx.has_changed().unwrap());
assert_eq!(*rx.borrow_and_update(), "three");
assert_pending!(spawn(rx.changed()).poll());
assert!(!rx.has_changed().unwrap());
drop(tx);
assert_eq!(*rx.borrow_and_update(), "three");
assert_ready!(spawn(rx.changed()).poll()).unwrap_err();
assert!(rx.has_changed().is_err());
}
#[test]
fn reopened_after_subscribe() {
let (tx, rx) = watch::channel("one");
assert!(!tx.is_closed());
drop(rx);
assert!(tx.is_closed());
let rx = tx.subscribe();
assert!(!tx.is_closed());
drop(rx);
assert!(tx.is_closed());
}
#[test]
#[cfg(not(tokio_wasm))] // wasm currently doesn't support unwinding
fn send_modify_panic() {
let (tx, mut rx) = watch::channel("one");
tx.send_modify(|old| *old = "two");
assert_eq!(*rx.borrow_and_update(), "two");
let mut rx2 = rx.clone();
assert_eq!(*rx2.borrow_and_update(), "two");
let mut task = spawn(rx2.changed());
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
tx.send_modify(|old| {
*old = "panicked";
panic!();
})
}));
assert!(result.is_err());
assert_pending!(task.poll());
assert_eq!(*rx.borrow(), "panicked");
tx.send_modify(|old| *old = "three");
assert_ready_ok!(task.poll());
assert_eq!(*rx.borrow_and_update(), "three");
}

218
vendor/tokio/tests/task_abort.rs vendored Normal file
View file

@ -0,0 +1,218 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support panic recovery
use std::sync::Arc;
use std::thread::sleep;
use tokio::time::Duration;
use tokio::runtime::Builder;
struct PanicOnDrop;
impl Drop for PanicOnDrop {
fn drop(&mut self) {
panic!("Well what did you expect would happen...");
}
}
/// Checks that a suspended task can be aborted without panicking as reported in
/// issue #3157: <https://github.com/tokio-rs/tokio/issues/3157>.
#[test]
fn test_abort_without_panic_3157() {
let rt = Builder::new_multi_thread()
.enable_time()
.worker_threads(1)
.build()
.unwrap();
rt.block_on(async move {
let handle = tokio::spawn(async move { tokio::time::sleep(Duration::new(100, 0)).await });
// wait for task to sleep.
tokio::time::sleep(Duration::from_millis(10)).await;
handle.abort();
let _ = handle.await;
});
}
/// Checks that a suspended task can be aborted inside of a current_thread
/// executor without panicking as reported in issue #3662:
/// <https://github.com/tokio-rs/tokio/issues/3662>.
#[test]
fn test_abort_without_panic_3662() {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
struct DropCheck(Arc<AtomicBool>);
impl Drop for DropCheck {
fn drop(&mut self) {
self.0.store(true, Ordering::SeqCst);
}
}
let rt = Builder::new_current_thread().build().unwrap();
rt.block_on(async move {
let drop_flag = Arc::new(AtomicBool::new(false));
let drop_check = DropCheck(drop_flag.clone());
let j = tokio::spawn(async move {
// NB: just grab the drop check here so that it becomes part of the
// task.
let _drop_check = drop_check;
futures::future::pending::<()>().await;
});
let drop_flag2 = drop_flag.clone();
let task = std::thread::spawn(move || {
// This runs in a separate thread so it doesn't have immediate
// thread-local access to the executor. It does however transition
// the underlying task to be completed, which will cause it to be
// dropped (but not in this thread).
assert!(!drop_flag2.load(Ordering::SeqCst));
j.abort();
j
})
.join()
.unwrap();
let result = task.await;
assert!(drop_flag.load(Ordering::SeqCst));
assert!(result.unwrap_err().is_cancelled());
// Note: We do the following to trigger a deferred task cleanup.
//
// The relevant piece of code you want to look at is in:
// `Inner::block_on` of `scheduler/current_thread.rs`.
//
// We cause the cleanup to happen by having a poll return Pending once
// so that the scheduler can go into the "auxiliary tasks" mode, at
// which point the task is removed from the scheduler.
let i = tokio::spawn(async move {
tokio::task::yield_now().await;
});
i.await.unwrap();
});
}
/// Checks that a suspended LocalSet task can be aborted from a remote thread
/// without panicking and without running the tasks destructor on the wrong thread.
/// <https://github.com/tokio-rs/tokio/issues/3929>
#[test]
fn remote_abort_local_set_3929() {
struct DropCheck {
created_on: std::thread::ThreadId,
not_send: std::marker::PhantomData<*const ()>,
}
impl DropCheck {
fn new() -> Self {
Self {
created_on: std::thread::current().id(),
not_send: std::marker::PhantomData,
}
}
}
impl Drop for DropCheck {
fn drop(&mut self) {
if std::thread::current().id() != self.created_on {
panic!("non-Send value dropped in another thread!");
}
}
}
let rt = Builder::new_current_thread().build().unwrap();
let local = tokio::task::LocalSet::new();
let check = DropCheck::new();
let jh = local.spawn_local(async move {
futures::future::pending::<()>().await;
drop(check);
});
let jh2 = std::thread::spawn(move || {
sleep(Duration::from_millis(10));
jh.abort();
});
rt.block_on(local);
jh2.join().unwrap();
}
/// Checks that a suspended task can be aborted even if the `JoinHandle` is immediately dropped.
/// issue #3964: <https://github.com/tokio-rs/tokio/issues/3964>.
#[test]
fn test_abort_wakes_task_3964() {
let rt = Builder::new_current_thread().enable_time().build().unwrap();
rt.block_on(async move {
let notify_dropped = Arc::new(());
let weak_notify_dropped = Arc::downgrade(&notify_dropped);
let handle = tokio::spawn(async move {
// Make sure the Arc is moved into the task
let _notify_dropped = notify_dropped;
tokio::time::sleep(Duration::new(100, 0)).await
});
// wait for task to sleep.
tokio::time::sleep(Duration::from_millis(10)).await;
handle.abort();
drop(handle);
// wait for task to abort.
tokio::time::sleep(Duration::from_millis(10)).await;
// Check that the Arc has been dropped.
assert!(weak_notify_dropped.upgrade().is_none());
});
}
/// Checks that aborting a task whose destructor panics does not allow the
/// panic to escape the task.
#[test]
fn test_abort_task_that_panics_on_drop_contained() {
let rt = Builder::new_current_thread().enable_time().build().unwrap();
rt.block_on(async move {
let handle = tokio::spawn(async move {
// Make sure the Arc is moved into the task
let _panic_dropped = PanicOnDrop;
tokio::time::sleep(Duration::new(100, 0)).await
});
// wait for task to sleep.
tokio::time::sleep(Duration::from_millis(10)).await;
handle.abort();
drop(handle);
// wait for task to abort.
tokio::time::sleep(Duration::from_millis(10)).await;
});
}
/// Checks that aborting a task whose destructor panics has the expected result.
#[test]
fn test_abort_task_that_panics_on_drop_returned() {
let rt = Builder::new_current_thread().enable_time().build().unwrap();
rt.block_on(async move {
let handle = tokio::spawn(async move {
// Make sure the Arc is moved into the task
let _panic_dropped = PanicOnDrop;
tokio::time::sleep(Duration::new(100, 0)).await
});
// wait for task to sleep.
tokio::time::sleep(Duration::from_millis(10)).await;
handle.abort();
assert!(handle.await.unwrap_err().is_panic());
});
}

228
vendor/tokio/tests/task_blocking.rs vendored Normal file
View file

@ -0,0 +1,228 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
use tokio::{runtime, task};
use tokio_test::assert_ok;
use std::thread;
use std::time::Duration;
mod support {
pub(crate) mod mpsc_stream;
}
#[tokio::test]
async fn basic_blocking() {
// Run a few times
for _ in 0..100 {
let out = assert_ok!(
tokio::spawn(async {
assert_ok!(
task::spawn_blocking(|| {
thread::sleep(Duration::from_millis(5));
"hello"
})
.await
)
})
.await
);
assert_eq!(out, "hello");
}
}
#[tokio::test(flavor = "multi_thread")]
async fn block_in_blocking() {
// Run a few times
for _ in 0..100 {
let out = assert_ok!(
tokio::spawn(async {
assert_ok!(
task::spawn_blocking(|| {
task::block_in_place(|| {
thread::sleep(Duration::from_millis(5));
});
"hello"
})
.await
)
})
.await
);
assert_eq!(out, "hello");
}
}
#[tokio::test(flavor = "multi_thread")]
async fn block_in_block() {
// Run a few times
for _ in 0..100 {
let out = assert_ok!(
tokio::spawn(async {
task::block_in_place(|| {
task::block_in_place(|| {
thread::sleep(Duration::from_millis(5));
});
"hello"
})
})
.await
);
assert_eq!(out, "hello");
}
}
#[tokio::test(flavor = "current_thread")]
#[should_panic]
async fn no_block_in_current_thread_scheduler() {
task::block_in_place(|| {});
}
#[test]
fn yes_block_in_threaded_block_on() {
let rt = runtime::Runtime::new().unwrap();
rt.block_on(async {
task::block_in_place(|| {});
});
}
#[test]
#[should_panic]
fn no_block_in_current_thread_block_on() {
let rt = runtime::Builder::new_current_thread().build().unwrap();
rt.block_on(async {
task::block_in_place(|| {});
});
}
#[test]
fn can_enter_current_thread_rt_from_within_block_in_place() {
let outer = tokio::runtime::Runtime::new().unwrap();
outer.block_on(async {
tokio::task::block_in_place(|| {
let inner = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
inner.block_on(async {})
})
});
}
#[test]
fn useful_panic_message_when_dropping_rt_in_rt() {
use std::panic::{catch_unwind, AssertUnwindSafe};
let outer = tokio::runtime::Runtime::new().unwrap();
let result = catch_unwind(AssertUnwindSafe(|| {
outer.block_on(async {
let _ = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
});
}));
assert!(result.is_err());
let err = result.unwrap_err();
let err: &'static str = err.downcast_ref::<&'static str>().unwrap();
assert!(
err.contains("Cannot drop a runtime"),
"Wrong panic message: {:?}",
err
);
}
#[test]
fn can_shutdown_with_zero_timeout_in_runtime() {
let outer = tokio::runtime::Runtime::new().unwrap();
outer.block_on(async {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.shutdown_timeout(Duration::from_nanos(0));
});
}
#[test]
fn can_shutdown_now_in_runtime() {
let outer = tokio::runtime::Runtime::new().unwrap();
outer.block_on(async {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.shutdown_background();
});
}
#[test]
fn coop_disabled_in_block_in_place() {
let outer = tokio::runtime::Builder::new_multi_thread()
.enable_time()
.build()
.unwrap();
let (tx, rx) = support::mpsc_stream::unbounded_channel_stream();
for i in 0..200 {
tx.send(i).unwrap();
}
drop(tx);
outer.block_on(async move {
let jh = tokio::spawn(async move {
tokio::task::block_in_place(move || {
futures::executor::block_on(async move {
use tokio_stream::StreamExt;
assert_eq!(rx.fold(0, |n, _| n + 1).await, 200);
})
})
});
tokio::time::timeout(Duration::from_secs(1), jh)
.await
.expect("timed out (probably hanging)")
.unwrap()
});
}
#[test]
fn coop_disabled_in_block_in_place_in_block_on() {
let (done_tx, done_rx) = std::sync::mpsc::channel();
let done = done_tx.clone();
thread::spawn(move || {
let outer = tokio::runtime::Runtime::new().unwrap();
let (tx, rx) = support::mpsc_stream::unbounded_channel_stream();
for i in 0..200 {
tx.send(i).unwrap();
}
drop(tx);
outer.block_on(async move {
tokio::task::block_in_place(move || {
futures::executor::block_on(async move {
use tokio_stream::StreamExt;
assert_eq!(rx.fold(0, |n, _| n + 1).await, 200);
})
})
});
let _ = done.send(Ok(()));
});
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
let _ = done_tx.send(Err("timed out (probably hanging)"));
});
done_rx.recv().unwrap().unwrap();
}

81
vendor/tokio/tests/task_builder.rs vendored Normal file
View file

@ -0,0 +1,81 @@
#[cfg(all(tokio_unstable, feature = "tracing"))]
mod tests {
use std::rc::Rc;
use tokio::{
task::{Builder, LocalSet},
test,
};
#[test]
async fn spawn_with_name() {
let result = Builder::new()
.name("name")
.spawn(async { "task executed" })
.unwrap()
.await;
assert_eq!(result.unwrap(), "task executed");
}
#[test]
async fn spawn_blocking_with_name() {
let result = Builder::new()
.name("name")
.spawn_blocking(|| "task executed")
.unwrap()
.await;
assert_eq!(result.unwrap(), "task executed");
}
#[test]
async fn spawn_local_with_name() {
let unsend_data = Rc::new("task executed");
let result = LocalSet::new()
.run_until(async move {
Builder::new()
.name("name")
.spawn_local(async move { unsend_data })
.unwrap()
.await
})
.await;
assert_eq!(*result.unwrap(), "task executed");
}
#[test]
async fn spawn_without_name() {
let result = Builder::new()
.spawn(async { "task executed" })
.unwrap()
.await;
assert_eq!(result.unwrap(), "task executed");
}
#[test]
async fn spawn_blocking_without_name() {
let result = Builder::new()
.spawn_blocking(|| "task executed")
.unwrap()
.await;
assert_eq!(result.unwrap(), "task executed");
}
#[test]
async fn spawn_local_without_name() {
let unsend_data = Rc::new("task executed");
let result = LocalSet::new()
.run_until(async move {
Builder::new()
.spawn_local(async move { unsend_data })
.unwrap()
.await
})
.await;
assert_eq!(*result.unwrap(), "task executed");
}
}

303
vendor/tokio/tests/task_id.rs vendored Normal file
View file

@ -0,0 +1,303 @@
#![warn(rust_2018_idioms)]
#![allow(clippy::declare_interior_mutable_const)]
#![cfg(all(feature = "full", tokio_unstable))]
#[cfg(not(tokio_wasi))]
use std::error::Error;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
#[cfg(not(tokio_wasi))]
use tokio::runtime::{Builder, Runtime};
use tokio::sync::oneshot;
use tokio::task::{self, Id, LocalSet};
#[cfg(not(tokio_wasi))]
mod support {
pub mod panic;
}
#[cfg(not(tokio_wasi))]
use support::panic::test_panic;
#[tokio::test(flavor = "current_thread")]
async fn task_id_spawn() {
tokio::spawn(async { println!("task id: {}", task::id()) })
.await
.unwrap();
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "current_thread")]
async fn task_id_spawn_blocking() {
task::spawn_blocking(|| println!("task id: {}", task::id()))
.await
.unwrap();
}
#[tokio::test(flavor = "current_thread")]
async fn task_id_collision_current_thread() {
let handle1 = tokio::spawn(async { task::id() });
let handle2 = tokio::spawn(async { task::id() });
let (id1, id2) = tokio::join!(handle1, handle2);
assert_ne!(id1.unwrap(), id2.unwrap());
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "multi_thread")]
async fn task_id_collision_multi_thread() {
let handle1 = tokio::spawn(async { task::id() });
let handle2 = tokio::spawn(async { task::id() });
let (id1, id2) = tokio::join!(handle1, handle2);
assert_ne!(id1.unwrap(), id2.unwrap());
}
#[tokio::test(flavor = "current_thread")]
async fn task_ids_match_current_thread() {
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(async {
let id = rx.await.unwrap();
assert_eq!(id, task::id());
});
tx.send(handle.id()).unwrap();
handle.await.unwrap();
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "multi_thread")]
async fn task_ids_match_multi_thread() {
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(async {
let id = rx.await.unwrap();
assert_eq!(id, task::id());
});
tx.send(handle.id()).unwrap();
handle.await.unwrap();
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "multi_thread")]
async fn task_id_future_destructor_completion() {
struct MyFuture {
tx: Option<oneshot::Sender<Id>>,
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Ready(())
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let _ = self.tx.take().unwrap().send(task::id());
}
}
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(MyFuture { tx: Some(tx) });
let id = handle.id();
handle.await.unwrap();
assert_eq!(rx.await.unwrap(), id);
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "multi_thread")]
async fn task_id_future_destructor_abort() {
struct MyFuture {
tx: Option<oneshot::Sender<Id>>,
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let _ = self.tx.take().unwrap().send(task::id());
}
}
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(MyFuture { tx: Some(tx) });
let id = handle.id();
handle.abort();
assert!(handle.await.unwrap_err().is_cancelled());
assert_eq!(rx.await.unwrap(), id);
}
#[tokio::test(flavor = "current_thread")]
async fn task_id_output_destructor_handle_dropped_before_completion() {
struct MyOutput {
tx: Option<oneshot::Sender<Id>>,
}
impl Drop for MyOutput {
fn drop(&mut self) {
let _ = self.tx.take().unwrap().send(task::id());
}
}
struct MyFuture {
tx: Option<oneshot::Sender<Id>>,
}
impl Future for MyFuture {
type Output = MyOutput;
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(MyOutput { tx: self.tx.take() })
}
}
let (tx, mut rx) = oneshot::channel();
let handle = tokio::spawn(MyFuture { tx: Some(tx) });
let id = handle.id();
drop(handle);
assert!(rx.try_recv().is_err());
assert_eq!(rx.await.unwrap(), id);
}
#[tokio::test(flavor = "current_thread")]
async fn task_id_output_destructor_handle_dropped_after_completion() {
struct MyOutput {
tx: Option<oneshot::Sender<Id>>,
}
impl Drop for MyOutput {
fn drop(&mut self) {
let _ = self.tx.take().unwrap().send(task::id());
}
}
struct MyFuture {
tx_output: Option<oneshot::Sender<Id>>,
tx_future: Option<oneshot::Sender<()>>,
}
impl Future for MyFuture {
type Output = MyOutput;
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
let _ = self.tx_future.take().unwrap().send(());
Poll::Ready(MyOutput {
tx: self.tx_output.take(),
})
}
}
let (tx_output, mut rx_output) = oneshot::channel();
let (tx_future, rx_future) = oneshot::channel();
let handle = tokio::spawn(MyFuture {
tx_output: Some(tx_output),
tx_future: Some(tx_future),
});
let id = handle.id();
rx_future.await.unwrap();
assert!(rx_output.try_recv().is_err());
drop(handle);
assert_eq!(rx_output.await.unwrap(), id);
}
#[test]
fn task_try_id_outside_task() {
assert_eq!(None, task::try_id());
}
#[cfg(not(tokio_wasi))]
#[test]
fn task_try_id_inside_block_on() {
let rt = Runtime::new().unwrap();
rt.block_on(async {
assert_eq!(None, task::try_id());
});
}
#[tokio::test(flavor = "current_thread")]
async fn task_id_spawn_local() {
LocalSet::new()
.run_until(async {
task::spawn_local(async { println!("task id: {}", task::id()) })
.await
.unwrap();
})
.await
}
#[tokio::test(flavor = "current_thread")]
async fn task_id_nested_spawn_local() {
LocalSet::new()
.run_until(async {
task::spawn_local(async {
let parent_id = task::id();
LocalSet::new()
.run_until(async {
task::spawn_local(async move {
assert_ne!(parent_id, task::id());
})
.await
.unwrap();
})
.await;
assert_eq!(parent_id, task::id());
})
.await
.unwrap();
})
.await;
}
#[cfg(not(tokio_wasi))]
#[tokio::test(flavor = "multi_thread")]
async fn task_id_block_in_place_block_on_spawn() {
task::spawn(async {
let parent_id = task::id();
task::block_in_place(move || {
let rt = Builder::new_current_thread().build().unwrap();
rt.block_on(rt.spawn(async move {
assert_ne!(parent_id, task::id());
}))
.unwrap();
});
assert_eq!(parent_id, task::id());
})
.await
.unwrap();
}
#[cfg(not(tokio_wasi))]
#[test]
fn task_id_outside_task_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = task::id();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[cfg(not(tokio_wasi))]
#[test]
fn task_id_inside_block_on_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = Runtime::new().unwrap();
rt.block_on(async {
task::id();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}

230
vendor/tokio/tests/task_join_set.rs vendored Normal file
View file

@ -0,0 +1,230 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full"))]
use tokio::sync::oneshot;
use tokio::task::JoinSet;
use tokio::time::Duration;
use futures::future::FutureExt;
fn rt() -> tokio::runtime::Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
#[tokio::test(start_paused = true)]
async fn test_with_sleep() {
let mut set = JoinSet::new();
for i in 0..10 {
set.spawn(async move { i });
assert_eq!(set.len(), 1 + i);
}
set.detach_all();
assert_eq!(set.len(), 0);
assert!(matches!(set.join_next().await, None));
for i in 0..10 {
set.spawn(async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
i
});
assert_eq!(set.len(), 1 + i);
}
let mut seen = [false; 10];
while let Some(res) = set.join_next().await.transpose().unwrap() {
seen[res] = true;
}
for was_seen in &seen {
assert!(was_seen);
}
assert!(matches!(set.join_next().await, None));
// Do it again.
for i in 0..10 {
set.spawn(async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
i
});
}
let mut seen = [false; 10];
while let Some(res) = set.join_next().await.transpose().unwrap() {
seen[res] = true;
}
for was_seen in &seen {
assert!(was_seen);
}
assert!(matches!(set.join_next().await, None));
}
#[tokio::test]
async fn test_abort_on_drop() {
let mut set = JoinSet::new();
let mut recvs = Vec::new();
for _ in 0..16 {
let (send, recv) = oneshot::channel::<()>();
recvs.push(recv);
set.spawn(async {
// This task will never complete on its own.
futures::future::pending::<()>().await;
drop(send);
});
}
drop(set);
for recv in recvs {
// The task is aborted soon and we will receive an error.
assert!(recv.await.is_err());
}
}
#[tokio::test]
async fn alternating() {
let mut set = JoinSet::new();
assert_eq!(set.len(), 0);
set.spawn(async {});
assert_eq!(set.len(), 1);
set.spawn(async {});
assert_eq!(set.len(), 2);
for _ in 0..16 {
let () = set.join_next().await.unwrap().unwrap();
assert_eq!(set.len(), 1);
set.spawn(async {});
assert_eq!(set.len(), 2);
}
}
#[tokio::test(start_paused = true)]
async fn abort_tasks() {
let mut set = JoinSet::new();
let mut num_canceled = 0;
let mut num_completed = 0;
for i in 0..16 {
let abort = set.spawn(async move {
tokio::time::sleep(Duration::from_secs(i as u64)).await;
i
});
if i % 2 != 0 {
// abort odd-numbered tasks.
abort.abort();
}
}
loop {
match set.join_next().await {
Some(Ok(res)) => {
num_completed += 1;
assert_eq!(res % 2, 0);
}
Some(Err(e)) => {
assert!(e.is_cancelled());
num_canceled += 1;
}
None => break,
}
}
assert_eq!(num_canceled, 8);
assert_eq!(num_completed, 8);
}
#[test]
fn runtime_gone() {
let mut set = JoinSet::new();
{
let rt = rt();
set.spawn_on(async { 1 }, rt.handle());
drop(rt);
}
assert!(rt()
.block_on(set.join_next())
.unwrap()
.unwrap_err()
.is_cancelled());
}
// This ensures that `join_next` works correctly when the coop budget is
// exhausted.
#[tokio::test(flavor = "current_thread")]
async fn join_set_coop() {
// Large enough to trigger coop.
const TASK_NUM: u32 = 1000;
static SEM: tokio::sync::Semaphore = tokio::sync::Semaphore::const_new(0);
let mut set = JoinSet::new();
for _ in 0..TASK_NUM {
set.spawn(async {
SEM.add_permits(1);
});
}
// Wait for all tasks to complete.
//
// Since this is a `current_thread` runtime, there's no race condition
// between the last permit being added and the task completing.
let _ = SEM.acquire_many(TASK_NUM).await.unwrap();
let mut count = 0;
let mut coop_count = 0;
loop {
match set.join_next().now_or_never() {
Some(Some(Ok(()))) => {}
Some(Some(Err(err))) => panic!("failed: {}", err),
None => {
coop_count += 1;
tokio::task::yield_now().await;
continue;
}
Some(None) => break,
}
count += 1;
}
assert!(coop_count >= 1);
assert_eq!(count, TASK_NUM);
}
#[tokio::test(start_paused = true)]
async fn abort_all() {
let mut set: JoinSet<()> = JoinSet::new();
for _ in 0..5 {
set.spawn(futures::future::pending());
}
for _ in 0..5 {
set.spawn(async {
tokio::time::sleep(Duration::from_secs(1)).await;
});
}
// The join set will now have 5 pending tasks and 5 ready tasks.
tokio::time::sleep(Duration::from_secs(2)).await;
set.abort_all();
assert_eq!(set.len(), 10);
let mut count = 0;
while let Some(res) = set.join_next().await {
if let Err(err) = res {
assert!(err.is_cancelled());
}
count += 1;
}
assert_eq!(count, 10);
assert_eq!(set.len(), 0);
}

119
vendor/tokio/tests/task_local.rs vendored Normal file
View file

@ -0,0 +1,119 @@
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support threads
#![allow(clippy::declare_interior_mutable_const)]
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::oneshot;
#[tokio::test(flavor = "multi_thread")]
async fn local() {
tokio::task_local! {
static REQ_ID: u32;
pub static FOO: bool;
}
let j1 = tokio::spawn(REQ_ID.scope(1, async move {
assert_eq!(REQ_ID.get(), 1);
assert_eq!(REQ_ID.get(), 1);
}));
let j2 = tokio::spawn(REQ_ID.scope(2, async move {
REQ_ID.with(|v| {
assert_eq!(REQ_ID.get(), 2);
assert_eq!(*v, 2);
});
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
assert_eq!(REQ_ID.get(), 2);
}));
let j3 = tokio::spawn(FOO.scope(true, async move {
assert!(FOO.get());
}));
j1.await.unwrap();
j2.await.unwrap();
j3.await.unwrap();
}
#[tokio::test]
async fn task_local_available_on_abort() {
tokio::task_local! {
static KEY: u32;
}
struct MyFuture {
tx_poll: Option<oneshot::Sender<()>>,
tx_drop: Option<oneshot::Sender<u32>>,
}
impl Future for MyFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
if let Some(tx_poll) = self.tx_poll.take() {
let _ = tx_poll.send(());
}
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let _ = self.tx_drop.take().unwrap().send(KEY.get());
}
}
let (tx_drop, rx_drop) = oneshot::channel();
let (tx_poll, rx_poll) = oneshot::channel();
let h = tokio::spawn(KEY.scope(
42,
MyFuture {
tx_poll: Some(tx_poll),
tx_drop: Some(tx_drop),
},
));
rx_poll.await.unwrap();
h.abort();
assert_eq!(rx_drop.await.unwrap(), 42);
let err = h.await.unwrap_err();
if !err.is_cancelled() {
if let Ok(panic) = err.try_into_panic() {
std::panic::resume_unwind(panic);
} else {
panic!();
}
}
}
#[tokio::test]
async fn task_local_available_on_completion_drop() {
tokio::task_local! {
static KEY: u32;
}
struct MyFuture {
tx: Option<oneshot::Sender<u32>>,
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Ready(())
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let _ = self.tx.take().unwrap().send(KEY.get());
}
}
let (tx, rx) = oneshot::channel();
let h = tokio::spawn(KEY.scope(42, MyFuture { tx: Some(tx) }));
assert_eq!(rx.await.unwrap(), 42);
h.await.unwrap();
}

693
vendor/tokio/tests/task_local_set.rs vendored Normal file
View file

@ -0,0 +1,693 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use futures::{
future::{pending, ready},
FutureExt,
};
use tokio::runtime;
use tokio::sync::{mpsc, oneshot};
use tokio::task::{self, LocalSet};
use tokio::time;
#[cfg(not(tokio_wasi))]
use std::cell::Cell;
use std::sync::atomic::AtomicBool;
#[cfg(not(tokio_wasi))]
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
#[cfg(not(tokio_wasi))]
use std::sync::atomic::Ordering::SeqCst;
use std::time::Duration;
#[tokio::test(flavor = "current_thread")]
async fn local_current_thread_scheduler() {
LocalSet::new()
.run_until(async {
task::spawn_local(async {}).await.unwrap();
})
.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn local_threadpool() {
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
LocalSet::new()
.run_until(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
})
.await
.unwrap();
})
.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn localset_future_threadpool() {
thread_local! {
static ON_LOCAL_THREAD: Cell<bool> = Cell::new(false);
}
ON_LOCAL_THREAD.with(|cell| cell.set(true));
let local = LocalSet::new();
local.spawn_local(async move {
assert!(ON_LOCAL_THREAD.with(|cell| cell.get()));
});
local.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn localset_future_timers() {
static RAN1: AtomicBool = AtomicBool::new(false);
static RAN2: AtomicBool = AtomicBool::new(false);
let local = LocalSet::new();
local.spawn_local(async move {
time::sleep(Duration::from_millis(5)).await;
RAN1.store(true, Ordering::SeqCst);
});
local.spawn_local(async move {
time::sleep(Duration::from_millis(10)).await;
RAN2.store(true, Ordering::SeqCst);
});
local.await;
assert!(RAN1.load(Ordering::SeqCst));
assert!(RAN2.load(Ordering::SeqCst));
}
#[tokio::test]
async fn localset_future_drives_all_local_futs() {
static RAN1: AtomicBool = AtomicBool::new(false);
static RAN2: AtomicBool = AtomicBool::new(false);
static RAN3: AtomicBool = AtomicBool::new(false);
let local = LocalSet::new();
local.spawn_local(async move {
task::spawn_local(async {
task::yield_now().await;
RAN3.store(true, Ordering::SeqCst);
});
task::yield_now().await;
RAN1.store(true, Ordering::SeqCst);
});
local.spawn_local(async move {
task::yield_now().await;
RAN2.store(true, Ordering::SeqCst);
});
local.await;
assert!(RAN1.load(Ordering::SeqCst));
assert!(RAN2.load(Ordering::SeqCst));
assert!(RAN3.load(Ordering::SeqCst));
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn local_threadpool_timer() {
// This test ensures that runtime services like the timer are properly
// set for the local task set.
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
LocalSet::new()
.run_until(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
let join = task::spawn_local(async move {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
time::sleep(Duration::from_millis(10)).await;
assert!(ON_RT_THREAD.with(|cell| cell.get()));
});
join.await.unwrap();
})
.await;
}
#[test]
fn enter_guard_spawn() {
let local = LocalSet::new();
let _guard = local.enter();
// Run the local task set.
let join = task::spawn_local(async { true });
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
local.block_on(&rt, async move {
assert!(join.await.unwrap());
});
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support panic recovery
#[test]
// This will panic, since the thread that calls `block_on` cannot use
// in-place blocking inside of `block_on`.
#[should_panic]
fn local_threadpool_blocking_in_place() {
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
let rt = runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
LocalSet::new().block_on(&rt, async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
let join = task::spawn_local(async move {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::block_in_place(|| {});
assert!(ON_RT_THREAD.with(|cell| cell.get()));
});
join.await.unwrap();
});
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn local_threadpool_blocking_run() {
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
LocalSet::new()
.run_until(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
let join = task::spawn_local(async move {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_blocking(|| {
assert!(
!ON_RT_THREAD.with(|cell| cell.get()),
"blocking must not run on the local task set's thread"
);
})
.await
.unwrap();
assert!(ON_RT_THREAD.with(|cell| cell.get()));
});
join.await.unwrap();
})
.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn all_spawns_are_local() {
use futures::future;
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
LocalSet::new()
.run_until(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
let handles = (0..128)
.map(|_| {
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
})
})
.collect::<Vec<_>>();
for joined in future::join_all(handles).await {
joined.unwrap();
}
})
.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn nested_spawn_is_local() {
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
LocalSet::new()
.run_until(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
task::spawn_local(async {
assert!(ON_RT_THREAD.with(|cell| cell.get()));
})
.await
.unwrap();
})
.await
.unwrap();
})
.await
.unwrap();
})
.await
.unwrap();
})
.await;
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[test]
fn join_local_future_elsewhere() {
thread_local! {
static ON_RT_THREAD: Cell<bool> = Cell::new(false);
}
ON_RT_THREAD.with(|cell| cell.set(true));
let rt = runtime::Runtime::new().unwrap();
let local = LocalSet::new();
local.block_on(&rt, async move {
let (tx, rx) = oneshot::channel();
let join = task::spawn_local(async move {
assert!(
ON_RT_THREAD.with(|cell| cell.get()),
"local task must run on local thread, no matter where it is awaited"
);
rx.await.unwrap();
"hello world"
});
let join2 = task::spawn(async move {
assert!(
!ON_RT_THREAD.with(|cell| cell.get()),
"spawned task should be on a worker"
);
tx.send(()).expect("task shouldn't have ended yet");
join.await.expect("task should complete successfully");
});
join2.await.unwrap()
});
}
// Tests for <https://github.com/tokio-rs/tokio/issues/4973>
#[cfg(not(tokio_wasi))] // Wasi doesn't support threads
#[tokio::test(flavor = "multi_thread")]
async fn localset_in_thread_local() {
thread_local! {
static LOCAL_SET: LocalSet = LocalSet::new();
}
// holds runtime thread until end of main fn.
let (_tx, rx) = oneshot::channel::<()>();
let handle = tokio::runtime::Handle::current();
std::thread::spawn(move || {
LOCAL_SET.with(|local_set| {
handle.block_on(local_set.run_until(async move {
let _ = rx.await;
}))
});
});
}
#[test]
fn drop_cancels_tasks() {
use std::rc::Rc;
// This test reproduces issue #1842
let rt = rt();
let rc1 = Rc::new(());
let rc2 = rc1.clone();
let (started_tx, started_rx) = oneshot::channel();
let local = LocalSet::new();
local.spawn_local(async move {
// Move this in
let _rc2 = rc2;
started_tx.send(()).unwrap();
futures::future::pending::<()>().await;
});
local.block_on(&rt, async {
started_rx.await.unwrap();
});
drop(local);
drop(rt);
assert_eq!(1, Rc::strong_count(&rc1));
}
/// Runs a test function in a separate thread, and panics if the test does not
/// complete within the specified timeout, or if the test function panics.
///
/// This is intended for running tests whose failure mode is a hang or infinite
/// loop that cannot be detected otherwise.
fn with_timeout(timeout: Duration, f: impl FnOnce() + Send + 'static) {
use std::sync::mpsc::RecvTimeoutError;
let (done_tx, done_rx) = std::sync::mpsc::channel();
let thread = std::thread::spawn(move || {
f();
// Send a message on the channel so that the test thread can
// determine if we have entered an infinite loop:
done_tx.send(()).unwrap();
});
// Since the failure mode of this test is an infinite loop, rather than
// something we can easily make assertions about, we'll run it in a
// thread. When the test thread finishes, it will send a message on a
// channel to this thread. We'll wait for that message with a fairly
// generous timeout, and if we don't receive it, we assume the test
// thread has hung.
//
// Note that it should definitely complete in under a minute, but just
// in case CI is slow, we'll give it a long timeout.
match done_rx.recv_timeout(timeout) {
Err(RecvTimeoutError::Timeout) => panic!(
"test did not complete within {:?} seconds, \
we have (probably) entered an infinite loop!",
timeout,
),
// Did the test thread panic? We'll find out for sure when we `join`
// with it.
Err(RecvTimeoutError::Disconnected) => {}
// Test completed successfully!
Ok(()) => {}
}
thread.join().expect("test thread should not panic!")
}
#[cfg_attr(tokio_wasi, ignore = "`unwrap()` in `with_timeout()` panics on Wasi")]
#[test]
fn drop_cancels_remote_tasks() {
// This test reproduces issue #1885.
with_timeout(Duration::from_secs(60), || {
let (tx, mut rx) = mpsc::channel::<()>(1024);
let rt = rt();
let local = LocalSet::new();
local.spawn_local(async move { while rx.recv().await.is_some() {} });
local.block_on(&rt, async {
time::sleep(Duration::from_millis(1)).await;
});
drop(tx);
// This enters an infinite loop if the remote notified tasks are not
// properly cancelled.
drop(local);
});
}
#[cfg_attr(
tokio_wasi,
ignore = "FIXME: `task::spawn_local().await.unwrap()` panics on Wasi"
)]
#[test]
fn local_tasks_wake_join_all() {
// This test reproduces issue #2460.
with_timeout(Duration::from_secs(60), || {
use futures::future::join_all;
use tokio::task::LocalSet;
let rt = rt();
let set = LocalSet::new();
let mut handles = Vec::new();
for _ in 1..=128 {
handles.push(set.spawn_local(async move {
tokio::task::spawn_local(async move {}).await.unwrap();
}));
}
rt.block_on(set.run_until(join_all(handles)));
});
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support panic recovery
#[test]
fn local_tasks_are_polled_after_tick() {
// This test depends on timing, so we run it up to five times.
for _ in 0..4 {
let res = std::panic::catch_unwind(local_tasks_are_polled_after_tick_inner);
if res.is_ok() {
// success
return;
}
}
// Test failed 4 times. Try one more time without catching panics. If it
// fails again, the test fails.
local_tasks_are_polled_after_tick_inner();
}
#[cfg(not(tokio_wasi))] // Wasi doesn't support panic recovery
#[tokio::main(flavor = "current_thread")]
async fn local_tasks_are_polled_after_tick_inner() {
// Reproduces issues #1899 and #1900
static RX1: AtomicUsize = AtomicUsize::new(0);
static RX2: AtomicUsize = AtomicUsize::new(0);
const EXPECTED: usize = 500;
RX1.store(0, SeqCst);
RX2.store(0, SeqCst);
let (tx, mut rx) = mpsc::unbounded_channel();
let local = LocalSet::new();
local
.run_until(async {
let task2 = task::spawn(async move {
// Wait a bit
time::sleep(Duration::from_millis(10)).await;
let mut oneshots = Vec::with_capacity(EXPECTED);
// Send values
for _ in 0..EXPECTED {
let (oneshot_tx, oneshot_rx) = oneshot::channel();
oneshots.push(oneshot_tx);
tx.send(oneshot_rx).unwrap();
}
time::sleep(Duration::from_millis(10)).await;
for tx in oneshots.drain(..) {
tx.send(()).unwrap();
}
loop {
time::sleep(Duration::from_millis(20)).await;
let rx1 = RX1.load(SeqCst);
let rx2 = RX2.load(SeqCst);
if rx1 == EXPECTED && rx2 == EXPECTED {
break;
}
}
});
while let Some(oneshot) = rx.recv().await {
RX1.fetch_add(1, SeqCst);
task::spawn_local(async move {
oneshot.await.unwrap();
RX2.fetch_add(1, SeqCst);
});
}
task2.await.unwrap();
})
.await;
}
#[tokio::test]
async fn acquire_mutex_in_drop() {
use futures::future::pending;
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
let local = LocalSet::new();
local.spawn_local(async move {
let _ = rx2.await;
unreachable!();
});
local.spawn_local(async move {
let _ = rx1.await;
tx2.send(()).unwrap();
unreachable!();
});
// Spawn a task that will never notify
local.spawn_local(async move {
pending::<()>().await;
tx1.send(()).unwrap();
});
// Tick the loop
local
.run_until(async {
task::yield_now().await;
})
.await;
// Drop the LocalSet
drop(local);
}
#[tokio::test]
async fn spawn_wakes_localset() {
let local = LocalSet::new();
futures::select! {
_ = local.run_until(pending::<()>()).fuse() => unreachable!(),
ret = async { local.spawn_local(ready(())).await.unwrap()}.fuse() => ret
}
}
#[test]
fn store_local_set_in_thread_local_with_runtime() {
use tokio::runtime::Runtime;
thread_local! {
static CURRENT: RtAndLocalSet = RtAndLocalSet::new();
}
struct RtAndLocalSet {
rt: Runtime,
local: LocalSet,
}
impl RtAndLocalSet {
fn new() -> RtAndLocalSet {
RtAndLocalSet {
rt: tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap(),
local: LocalSet::new(),
}
}
async fn inner_method(&self) {
self.local
.run_until(async move {
tokio::task::spawn_local(async {});
})
.await
}
fn method(&self) {
self.rt.block_on(self.inner_method());
}
}
CURRENT.with(|f| {
f.method();
});
}
#[cfg(tokio_unstable)]
mod unstable {
use tokio::runtime::UnhandledPanic;
use tokio::task::LocalSet;
#[tokio::test]
#[should_panic(
expected = "a spawned task panicked and the LocalSet is configured to shutdown on unhandled panic"
)]
async fn shutdown_on_panic() {
LocalSet::new()
.unhandled_panic(UnhandledPanic::ShutdownRuntime)
.run_until(async {
tokio::task::spawn_local(async {
panic!("boom");
});
futures::future::pending::<()>().await;
})
.await;
}
// This test compares that, when the task driving `run_until` has already
// consumed budget, the `run_until` future has less budget than a "spawned"
// task.
//
// "Budget" is a fuzzy metric as the Tokio runtime is able to change values
// internally. This is why the test uses indirection to test this.
#[tokio::test]
async fn run_until_does_not_get_own_budget() {
// Consume some budget
tokio::task::consume_budget().await;
LocalSet::new()
.run_until(async {
let spawned = tokio::spawn(async {
let mut spawned_n = 0;
{
let mut spawned = tokio_test::task::spawn(async {
loop {
spawned_n += 1;
tokio::task::consume_budget().await;
}
});
// Poll once
assert!(!spawned.poll().is_ready());
}
spawned_n
});
let mut run_until_n = 0;
{
let mut run_until = tokio_test::task::spawn(async {
loop {
run_until_n += 1;
tokio::task::consume_budget().await;
}
});
// Poll once
assert!(!run_until.poll().is_ready());
}
let spawned_n = spawned.await.unwrap();
assert_ne!(spawned_n, 0);
assert_ne!(run_until_n, 0);
assert!(spawned_n > run_until_n);
})
.await
}
}
fn rt() -> runtime::Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}

123
vendor/tokio/tests/task_panic.rs vendored Normal file
View file

@ -0,0 +1,123 @@
#![warn(rust_2018_idioms)]
#![allow(clippy::declare_interior_mutable_const)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use futures::future;
use std::error::Error;
use tokio::runtime::Builder;
use tokio::task::{self, block_in_place};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn block_in_place_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = Builder::new_current_thread().enable_all().build().unwrap();
rt.block_on(async {
block_in_place(|| {});
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_set_spawn_local_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _local = task::LocalSet::new();
let _ = task::spawn_local(async {});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_set_block_on_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = Builder::new_current_thread().enable_all().build().unwrap();
let local = task::LocalSet::new();
rt.block_on(async {
local.block_on(&rt, future::pending::<()>());
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn spawn_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
tokio::spawn(future::pending::<()>());
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_key_sync_scope_panic_caller() -> Result<(), Box<dyn Error>> {
tokio::task_local! {
static NUMBER: u32;
}
let panic_location_file = test_panic(|| {
NUMBER.sync_scope(1, || {
NUMBER.with(|_| {
NUMBER.sync_scope(1, || {});
});
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_key_with_panic_caller() -> Result<(), Box<dyn Error>> {
tokio::task_local! {
static NUMBER: u32;
}
let panic_location_file = test_panic(|| {
NUMBER.with(|_| {});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn local_key_get_panic_caller() -> Result<(), Box<dyn Error>> {
tokio::task_local! {
static NUMBER: u32;
}
let panic_location_file = test_panic(|| {
NUMBER.get();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}

157
vendor/tokio/tests/tcp_accept.rs vendored Normal file
View file

@ -0,0 +1,157 @@
#![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))] // Wasi doesn't support bind
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{mpsc, oneshot};
use tokio_test::assert_ok;
use std::io;
use std::net::{IpAddr, SocketAddr};
macro_rules! test_accept {
($(($ident:ident, $target:expr),)*) => {
$(
#[tokio::test]
async fn $ident() {
let listener = assert_ok!(TcpListener::bind($target).await);
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
let (socket, _) = assert_ok!(listener.accept().await);
assert_ok!(tx.send(socket));
});
let cli = assert_ok!(TcpStream::connect(&addr).await);
let srv = assert_ok!(rx.await);
assert_eq!(cli.local_addr().unwrap(), srv.peer_addr().unwrap());
}
)*
}
}
test_accept! {
(ip_str, "127.0.0.1:0"),
(host_str, "localhost:0"),
(socket_addr, "127.0.0.1:0".parse::<SocketAddr>().unwrap()),
(str_port_tuple, ("127.0.0.1", 0)),
(ip_port_tuple, ("127.0.0.1".parse::<IpAddr>().unwrap(), 0)),
}
use std::pin::Pin;
use std::sync::{
atomic::{AtomicUsize, Ordering::SeqCst},
Arc,
};
use std::task::{Context, Poll};
use tokio_stream::{Stream, StreamExt};
struct TrackPolls<'a> {
npolls: Arc<AtomicUsize>,
listener: &'a mut TcpListener,
}
impl<'a> Stream for TrackPolls<'a> {
type Item = io::Result<(TcpStream, SocketAddr)>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.npolls.fetch_add(1, SeqCst);
self.listener.poll_accept(cx).map(Some)
}
}
#[tokio::test]
async fn no_extra_poll() {
let mut listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
let (accepted_tx, mut accepted_rx) = mpsc::unbounded_channel();
tokio::spawn(async move {
let mut incoming = TrackPolls {
npolls: Arc::new(AtomicUsize::new(0)),
listener: &mut listener,
};
assert_ok!(tx.send(Arc::clone(&incoming.npolls)));
while incoming.next().await.is_some() {
accepted_tx.send(()).unwrap();
}
});
let npolls = assert_ok!(rx.await);
tokio::task::yield_now().await;
// should have been polled exactly once: the initial poll
assert_eq!(npolls.load(SeqCst), 1);
let _ = assert_ok!(TcpStream::connect(&addr).await);
accepted_rx.recv().await.unwrap();
// should have been polled twice more: once to yield Some(), then once to yield Pending
assert_eq!(npolls.load(SeqCst), 1 + 2);
}
#[tokio::test]
async fn accept_many() {
use futures::future::poll_fn;
use std::future::Future;
use std::sync::atomic::AtomicBool;
const N: usize = 50;
let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let listener = Arc::new(listener);
let addr = listener.local_addr().unwrap();
let connected = Arc::new(AtomicBool::new(false));
let (pending_tx, mut pending_rx) = mpsc::unbounded_channel();
let (notified_tx, mut notified_rx) = mpsc::unbounded_channel();
for _ in 0..N {
let listener = listener.clone();
let connected = connected.clone();
let pending_tx = pending_tx.clone();
let notified_tx = notified_tx.clone();
tokio::spawn(async move {
let accept = listener.accept();
tokio::pin!(accept);
let mut polled = false;
poll_fn(|cx| {
if !polled {
polled = true;
assert!(Pin::new(&mut accept).poll(cx).is_pending());
pending_tx.send(()).unwrap();
Poll::Pending
} else if connected.load(SeqCst) {
notified_tx.send(()).unwrap();
Poll::Ready(())
} else {
Poll::Pending
}
})
.await;
pending_tx.send(()).unwrap();
});
}
// Wait for all tasks to have polled at least once
for _ in 0..N {
pending_rx.recv().await.unwrap();
}
// Establish a TCP connection
connected.store(true, SeqCst);
let _sock = TcpStream::connect(addr).await.unwrap();
// Wait for all notifications
for _ in 0..N {
notified_rx.recv().await.unwrap();
}
}

Some files were not shown because too many files have changed in this diff Show more